[llvm-branch-commits] [flang] [Flang][OpenMP] NFC: Remove unused argument for omp.target lowering (PR #97564)
Sergio Afonso via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Thu Jul 4 06:17:15 PDT 2024
Thorsten =?utf-8?q?Schütt?= <schuett at gmail.com>,Joachim
<jenke at itc.rwth-aachen.de>,David Spickett <david.spickett at linaro.org>,Nikita
Popov <npopov at redhat.com>,Luke Lau <luke at igalia.com>,Jay Foad
<jay.foad at amd.com>,Shengchen Kan <shengchen.kan at intel.com>,Pavel Labath
<pavel at labath.sk>,Stefan =?utf-8?q?Gränitz?= <stefan.graenitz at gmail.com>
=?utf-8?q?,?=Michael Buch <michaelbuch12 at gmail.com>,Michael Buch
<michaelbuch12 at gmail.com>,Michael Buch <michaelbuch12 at gmail.com>,Slava
Zakharin <szakharin at nvidia.com>,Benjamin Maxwell <benjamin.maxwell at arm.com>,Nikita
Popov <npopov at redhat.com>,Nikita Popov <npopov at redhat.com>,Alexis Engelke
<engelke at in.tum.de>,Alexis Engelke <engelke at in.tum.de>,David Green
<david.green at arm.com>,Dominik Adamski <dominik.adamski at amd.com>,Ramkumar
Ramachandra <ramkumar.ramachandra at codasip.com>,Ramkumar Ramachandra
<ramkumar.ramachandra at codasip.com>,Ramkumar Ramachandra
<ramkumar.ramachandra at codasip.com>,Kazu Hirata <kazu at google.com>,Allen
<zhongyunde at huawei.com>,Jay Foad <jay.foad at amd.com>,Nikita Popov
<npopov at redhat.com>,Simon Pilgrim <llvm-dev at redking.me.uk>,Tsz Chan
<keithcth2001 at gmail.com>,Simon Pilgrim <llvm-dev at redking.me.uk>,Aaron
Ballman <aaron at aaronballman.com>,Sven van Haastregt
<sven.vanhaastregt at arm.com>,Fabian Ritter <fabian.ritter at amd.com>,Youngsuk
Kim <youngsuk.kim at hpe.com>,Simon Pilgrim <llvm-dev at redking.me.uk>,
Timm =?utf-8?q?Bäder?= <tbaeder at redhat.com>,Simon Pilgrim
<llvm-dev at redking.me.uk>,Koakuma <koachan at protonmail.com>,Noah Goldstein
<goldstein.w.n at gmail.com>,serge-sans-paille <sguelton at mozilla.com>,Alexandre
Ganea <aganea at havenstudios.com>,David Spickett <david.spickett at linaro.org>,Yingwei
Zheng <dtcxzyw2333 at gmail.com>,David Spickett <david.spickett at linaro.org>,Nikita
Popov <npopov at redhat.com>,Simon Pilgrim <llvm-dev at redking.me.uk>,Simon
Pilgrim <llvm-dev at redking.me.uk>,Krzysztof Parzyszek
<Krzysztof.Parzyszek at amd.com>,Krzysztof Parzyszek
<Krzysztof.Parzyszek at amd.com>,Nikita Popov <npopov at redhat.com>,Nikita Popov
<npopov at redhat.com>,Nikita Popov <npopov at redhat.com>,David Spickett
<david.spickett at linaro.org>,David Spickett <david.spickett at linaro.org>,David
Spickett <david.spickett at linaro.org>,Aaron Ballman <aaron at aaronballman.com>,Izaak
Schroeder <izaak.schroeder at gmail.com>,Nikolas Klauser
<nikolasklauser at berlin.de>,Simon Pilgrim <llvm-dev at redking.me.uk>,Aaron
Ballman <aaron at aaronballman.com>,
Timm =?utf-8?q?Bäder?= <tbaeder at redhat.com>,"A. Jiang" <de34 at live.cn>,Kirill
<77356738+kirillpyasecky at users.noreply.github.com>,OverMighty
<its.overmighty at gmail.com>,Izaak Schroeder <izaak.schroeder at gmail.com>,realqhc
<caiqihan021 at hotmail.com>,Simon Pilgrim <llvm-dev at redking.me.uk>,David Green
<david.green at arm.com>,Vyacheslav Levytskyy <vyacheslav.levytskyy at intel.com>,Vyacheslav
Levytskyy <vyacheslav.levytskyy at intel.com>,Simon Pilgrim
<llvm-dev at redking.me.uk>,Jonas Devlieghere <jonas at devlieghere.com>,Nikita
Popov <npopov at redhat.com>,Krystian Stasiowski <sdkrystian at gmail.com>,Fred
Grim <fgrim at apple.com>,Kamau Bridgeman <kamau.bridgeman.ibm at gmail.com>,Jeffrey
Byrnes <Jeffrey.Byrnes at amd.com>,Jon Roelofs <jonathan_roelofs at apple.com>,Arthur
Eubanks <aeubanks at google.com>,jimingham <jingham at apple.com>,jimingham
<jingham at apple.com>,David Truby <david.truby at arm.com>,Michael Buch
<michaelbuch12 at gmail.com>,Hansang Bae <hansang.bae at intel.com>,Kirill
<pyasetskiyr at gmail.com>,Thurston Dang <thurston at google.com>,Alexey Bataev
<a.bataev at outlook.com>,Alexis Engelke <engelke at in.tum.de>,Alexey Bataev
<a.bataev at outlook.com>,Shaw Young
<58664393+shawbyoung at users.noreply.github.com>,Craig Topper
<craig.topper at sifive.com>,srcarroll
<50210727+srcarroll at users.noreply.github.com>,vporpo <vporpodas at google.com>,Alexey
Bataev <a.bataev at outlook.com>,Craig Topper <craig.topper at sifive.com>,Ilia
Sergachev <1894984+sergachev at users.noreply.github.com>,Thurston Dang
<thurston at google.com>,agozillon <Andrew.Gozillon at amd.com>,Kazu Hirata
<kazu at google.com>,Kazu Hirata <kazu at google.com>,shawbyoung
<shawbyoung at gmail.com>,Alexey Bataev <a.bataev at outlook.com>,Mingming Liu
<mingmingl at google.com>,Craig Topper <craig.topper at sifive.com>,Mats Petersson
<mats.petersson at arm.com>,Fangrui Song <i at maskray.me>,Hansang Bae
<hansang.bae at intel.com>,aaryanshukla
<53713108+aaryanshukla at users.noreply.github.com>,Matt Arsenault
<Matthew.Arsenault at amd.com>,RoseZhang03 <rosezhang at google.com>,Sayhaan
Siddiqui <49014204+sayhaan at users.noreply.github.com>,lntue
<35648136+lntue at users.noreply.github.com>,Krystian Stasiowski
<sdkrystian at gmail.com>,Jeff Niu <jeff at modular.com>,Han-Chung Wang
<hanhan0912 at gmail.com>,Jon Roelofs <jonathan_roelofs at apple.com>,PiJoules
<6019989+PiJoules at users.noreply.github.com>,Michael Jones
<michaelrj at google.com>,Fangrui Song <i at maskray.me>,Fangrui Song
<i at maskray.me>,Kazu Hirata <kazu at google.com>,PiJoules
<6019989+PiJoules at users.noreply.github.com>,Fangrui Song <i at maskray.me>,Maksim
Panchenko <maks at fb.com>,Haohai Wen <haohai.wen at intel.com>,Ahmed Bougacha
<ahmed at bougacha.org>,Luke Lau <luke at igalia.com>,jyu2-git
<jennifer.yu at intel.com>,Slava Zakharin <szakharin at nvidia.com>,Vyacheslav
Levytskyy <vyacheslav.levytskyy at intel.com>,goldsteinn
<35538541+goldsteinn at users.noreply.github.com>,Daniil Kovalev
<dkovalev at accesssoftek.com>,Timm =?utf-8?q?Bäder?= <tbaeder at redhat.com>,bangyu
shen <94283495+shubaoyu2 at users.noreply.github.com>,Nikhil Kalra
<1368497+nikalra at users.noreply.github.com>,
Timm =?utf-8?q?Bäder?= <tbaeder at redhat.com>,Michael Buch
<michaelbuch12 at gmail.com>,Craig Topper <craig.topper at sifive.com>,goldsteinn
<35538541+goldsteinn at users.noreply.github.com>,Nikita Popov
<npopov at redhat.com>,Alexander Belyaev <pifon at google.com>,Nikita Popov
<npopov at redhat.com>,David Spickett <david.spickett at linaro.org>,Cullen Rhodes
<cullen.rhodes at arm.com>,Tianyi Guan <tguan at nvidia.com>,Tianyi Guan
<tguan at nvidia.com>,Tianyi Guan <tguan at nvidia.com>,David Spickett
<david.spickett at linaro.org>,Luke Lau <luke at igalia.com>,Florian Hahn
<flo at fhahn.com>,Nikita Popov <npopov at redhat.com>,Simon Pilgrim
<llvm-dev at redking.me.uk>,Tomas Matheson <Tomas.Matheson at arm.com>,Graham
Hunter <graham.hunter at arm.com>,Simon Pilgrim <llvm-dev at redking.me.uk>,Simon
Pilgrim <llvm-dev at redking.me.uk>,Florian Hahn <flo at fhahn.com>,Nikita Popov
<npopov at redhat.com>,Muhammad Omair Javaid <omair.javaid at linaro.org>,David
Spickett <david.spickett at linaro.org>,
=?utf-8?q?Kristóf?= Umann <dkszelethus at gmail.com>,Ilya Leoshkevich
<iii at linux.ibm.com>,Phoebe Wang <phoebe.wang at intel.com>,Sergio Afonso
<safonsof at amd.com>,Nicholas Guy
<67685292+NickGuy-Arm at users.noreply.github.com>,Nikita Popov
<npopov at redhat.com>,Ariel-Burton <arielburton at yahoo.com>,
Endre =?utf-8?q?Fülöp?= <endre.fulop at sigmatechnology.com>,Shan
Huang <52285902006 at stu.ecnu.edu.cn>,Shan Huang <52285902006 at stu.ecnu.edu.cn>,Sergio
Afonso <safonsof at amd.com>
Message-ID:
In-Reply-To: <llvm.org/llvm/llvm-project/pull/97564 at github.com>
https://github.com/skatrak updated https://github.com/llvm/llvm-project/pull/97564
>From 46f7929879a59ec72dc75679b4201e2d314efba9 Mon Sep 17 00:00:00 2001
From: Davide Italiano <davidino at fb.com>
Date: Tue, 2 Jul 2024 08:53:48 -0700
Subject: [PATCH 001/246] Revert "Remove llvm/MC/MCAsmLayout.h"
This reverts commit 122db8b2cb7fa43ce1d6dc17148080579fcfb55a.
---
clang/docs/tools/clang-formatted-files.txt | 1 +
llvm/include/llvm/MC/MCAsmLayout.h | 22 ++++++++++++++++++++++
2 files changed, 23 insertions(+)
create mode 100644 llvm/include/llvm/MC/MCAsmLayout.h
diff --git a/clang/docs/tools/clang-formatted-files.txt b/clang/docs/tools/clang-formatted-files.txt
index a8ee8f1fcb87c..4866bd4aee634 100644
--- a/clang/docs/tools/clang-formatted-files.txt
+++ b/clang/docs/tools/clang-formatted-files.txt
@@ -5357,6 +5357,7 @@ llvm/include/llvm/MC/MCAsmInfoELF.h
llvm/include/llvm/MC/MCAsmInfoGOFF.h
llvm/include/llvm/MC/MCAsmInfoWasm.h
llvm/include/llvm/MC/MCAsmInfoXCOFF.h
+llvm/include/llvm/MC/MCAsmLayout.h
llvm/include/llvm/MC/MCCodeView.h
llvm/include/llvm/MC/MCContext.h
llvm/include/llvm/MC/MCFixedLenDisassembler.h
diff --git a/llvm/include/llvm/MC/MCAsmLayout.h b/llvm/include/llvm/MC/MCAsmLayout.h
new file mode 100644
index 0000000000000..33fae0a0f9766
--- /dev/null
+++ b/llvm/include/llvm/MC/MCAsmLayout.h
@@ -0,0 +1,22 @@
+//===- MCAsmLayout.h - Assembly Layout Object -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMLAYOUT_H
+#define LLVM_MC_MCASMLAYOUT_H
+
+namespace llvm {
+class MCAssembler;
+
+class MCAsmLayout {
+public:
+ MCAsmLayout(MCAssembler &) {}
+};
+
+} // end namespace llvm
+
+#endif
>From ac0b48a0dbf83b0c7e73fc5635af5b2912c1c54d Mon Sep 17 00:00:00 2001
From: Davide Italiano <davidino at fb.com>
Date: Tue, 2 Jul 2024 08:54:05 -0700
Subject: [PATCH 002/246] Revert "MCAssembler::layout: remove the unused
MCAsmLayout parameter"
This reverts commit 63ec52f867ada8d841dd872acf3d0cb62e2a99e8.
---
bolt/lib/Core/BinaryContext.cpp | 4 +++-
llvm/include/llvm/MC/MCAssembler.h | 3 ++-
llvm/lib/MC/MCAssembler.cpp | 7 +++++--
llvm/lib/MC/MCExpr.cpp | 1 +
llvm/tools/dsymutil/MachOUtils.cpp | 4 +++-
5 files changed, 14 insertions(+), 5 deletions(-)
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index f28a0cd6eb9c6..3bd715d487e0f 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -20,6 +20,7 @@
#include "llvm/DebugInfo/DWARF/DWARFCompileUnit.h"
#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
+#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler/MCDisassembler.h"
@@ -2415,7 +2416,8 @@ BinaryContext::calculateEmittedSize(BinaryFunction &BF, bool FixBranches) {
MCAssembler &Assembler =
static_cast<MCObjectStreamer *>(Streamer.get())->getAssembler();
- Assembler.layout();
+ MCAsmLayout Layout(Assembler);
+ Assembler.layout(Layout);
// Obtain fragment sizes.
std::vector<uint64_t> FragmentSizes;
diff --git a/llvm/include/llvm/MC/MCAssembler.h b/llvm/include/llvm/MC/MCAssembler.h
index 9cd65d388d247..df5ad0e7bdf4b 100644
--- a/llvm/include/llvm/MC/MCAssembler.h
+++ b/llvm/include/llvm/MC/MCAssembler.h
@@ -46,6 +46,7 @@ class MCRelaxableFragment;
class MCSymbolRefExpr;
class raw_ostream;
class MCAsmBackend;
+class MCAsmLayout;
class MCContext;
class MCCodeEmitter;
class MCFragment;
@@ -340,7 +341,7 @@ class MCAssembler {
void Finish();
// Layout all section and prepare them for emission.
- void layout();
+ void layout(MCAsmLayout &Layout);
// FIXME: This does not belong here.
bool getSubsectionsViaSymbols() const { return SubsectionsViaSymbols; }
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp
index 4cafec6024a7e..6866a58ecde59 100644
--- a/llvm/lib/MC/MCAssembler.cpp
+++ b/llvm/lib/MC/MCAssembler.cpp
@@ -15,6 +15,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCCodeView.h"
#include "llvm/MC/MCContext.h"
@@ -936,7 +937,7 @@ MCAssembler::handleFixup(MCFragment &F, const MCFixup &Fixup,
return std::make_tuple(Target, FixedValue, IsResolved);
}
-void MCAssembler::layout() {
+void MCAssembler::layout(MCAsmLayout &Layout) {
assert(getBackendPtr() && "Expected assembler backend");
DEBUG_WITH_TYPE("mc-dump", {
errs() << "assembler backend - pre-layout\n--\n";
@@ -1073,7 +1074,9 @@ void MCAssembler::layout() {
}
void MCAssembler::Finish() {
- layout();
+ // Create the layout object.
+ MCAsmLayout Layout(*this);
+ layout(Layout);
// Write the object file.
stats::ObjectBytes += getWriter().writeObject(*this);
diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp
index b42a668bce23c..0a175ade68d78 100644
--- a/llvm/lib/MC/MCExpr.cpp
+++ b/llvm/lib/MC/MCExpr.cpp
@@ -12,6 +12,7 @@
#include "llvm/Config/llvm-config.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCObjectWriter.h"
diff --git a/llvm/tools/dsymutil/MachOUtils.cpp b/llvm/tools/dsymutil/MachOUtils.cpp
index d2bdcf8542b84..fba66309ca20b 100644
--- a/llvm/tools/dsymutil/MachOUtils.cpp
+++ b/llvm/tools/dsymutil/MachOUtils.cpp
@@ -12,6 +12,7 @@
#include "LinkUtils.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/NonRelocatableStringpool.h"
+#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCMachObjectWriter.h"
#include "llvm/MC/MCObjectStreamer.h"
@@ -380,7 +381,8 @@ bool generateDsymCompanion(
auto &Writer = static_cast<MachObjectWriter &>(MCAsm.getWriter());
// Layout but don't emit.
- MCAsm.layout();
+ MCAsmLayout Layout(MCAsm);
+ MCAsm.layout(Layout);
BinaryHolder InputBinaryHolder(VFS, false);
>From f80a4072ced41b52363c63df28fea9a649f7f89e Mon Sep 17 00:00:00 2001
From: Davide Italiano <davidino at fb.com>
Date: Tue, 2 Jul 2024 08:54:18 -0700
Subject: [PATCH 003/246] Revert "[MC] Use a stub ctor for MCAsmLayout"
This reverts commit bbb50369a149d9a7d1f91efaaabf75c260a220c7.
This breaks BOLT.
---
llvm/include/llvm/MC/MCAsmLayout.h | 11 ++++++++++-
llvm/include/llvm/MC/MCAssembler.h | 5 +++--
llvm/lib/MC/MCAssembler.cpp | 10 +++++-----
llvm/lib/MC/MCExpr.cpp | 4 ++--
llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp | 2 +-
llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp | 2 +-
6 files changed, 22 insertions(+), 12 deletions(-)
diff --git a/llvm/include/llvm/MC/MCAsmLayout.h b/llvm/include/llvm/MC/MCAsmLayout.h
index 33fae0a0f9766..765cc1ebb7c79 100644
--- a/llvm/include/llvm/MC/MCAsmLayout.h
+++ b/llvm/include/llvm/MC/MCAsmLayout.h
@@ -9,12 +9,21 @@
#ifndef LLVM_MC_MCASMLAYOUT_H
#define LLVM_MC_MCASMLAYOUT_H
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+
namespace llvm {
class MCAssembler;
+class MCSection;
class MCAsmLayout {
+ MCAssembler &Assembler;
+
public:
- MCAsmLayout(MCAssembler &) {}
+ MCAsmLayout(MCAssembler &Assembler);
+
+ /// Get the assembler object this is a layout for.
+ MCAssembler &getAssembler() const { return Assembler; }
};
} // end namespace llvm
diff --git a/llvm/include/llvm/MC/MCAssembler.h b/llvm/include/llvm/MC/MCAssembler.h
index df5ad0e7bdf4b..1e476ae61dec6 100644
--- a/llvm/include/llvm/MC/MCAssembler.h
+++ b/llvm/include/llvm/MC/MCAssembler.h
@@ -116,7 +116,7 @@ class MCAssembler {
std::unique_ptr<MCCodeEmitter> Emitter;
std::unique_ptr<MCObjectWriter> Writer;
- bool HasLayout = false;
+ MCAsmLayout *Layout = nullptr;
bool RelaxAll = false;
bool SubsectionsViaSymbols = false;
bool IncrementalLinkerCompatible = false;
@@ -354,7 +354,8 @@ class MCAssembler {
IncrementalLinkerCompatible = Value;
}
- bool hasLayout() const { return HasLayout; }
+ MCAsmLayout *getLayout() const { return Layout; }
+ bool hasLayout() const { return Layout; }
bool getRelaxAll() const { return RelaxAll; }
void setRelaxAll(bool Value) { RelaxAll = Value; }
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp
index 6866a58ecde59..0a6bb52a3b8f4 100644
--- a/llvm/lib/MC/MCAssembler.cpp
+++ b/llvm/lib/MC/MCAssembler.cpp
@@ -381,6 +381,8 @@ uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const {
llvm_unreachable("invalid fragment kind");
}
+MCAsmLayout::MCAsmLayout(MCAssembler &Asm) : Assembler(Asm) {}
+
// Compute the amount of padding required before the fragment \p F to
// obey bundling restrictions, where \p FOffset is the fragment's offset in
// its section and \p FSize is the fragment's size.
@@ -539,14 +541,13 @@ bool MCAssembler::getSymbolOffset(const MCSymbol &S, uint64_t &Val) const {
}
uint64_t MCAssembler::getSymbolOffset(const MCSymbol &S) const {
- assert(HasLayout);
uint64_t Val;
getSymbolOffsetImpl(*this, S, true, Val);
return Val;
}
const MCSymbol *MCAssembler::getBaseSymbol(const MCSymbol &Symbol) const {
- assert(HasLayout);
+ assert(Layout);
if (!Symbol.isVariable())
return &Symbol;
@@ -583,7 +584,6 @@ const MCSymbol *MCAssembler::getBaseSymbol(const MCSymbol &Symbol) const {
}
uint64_t MCAssembler::getSectionAddressSize(const MCSection &Sec) const {
- assert(HasLayout);
// The size is the last fragment's end offset.
const MCFragment &F = *Sec.curFragList()->Tail;
return getFragmentOffset(F) + computeFragmentSize(F);
@@ -968,7 +968,7 @@ void MCAssembler::layout(MCAsmLayout &Layout) {
}
// Layout until everything fits.
- this->HasLayout = true;
+ this->Layout = &Layout;
while (layoutOnce()) {
if (getContext().hadError())
return;
@@ -1081,7 +1081,7 @@ void MCAssembler::Finish() {
// Write the object file.
stats::ObjectBytes += getWriter().writeObject(*this);
- HasLayout = false;
+ this->Layout = nullptr;
}
bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup,
diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp
index 0a175ade68d78..82795399900c2 100644
--- a/llvm/lib/MC/MCExpr.cpp
+++ b/llvm/lib/MC/MCExpr.cpp
@@ -626,7 +626,7 @@ static void AttemptToFoldSymbolOffsetDifference(
// separated by a linker-relaxable instruction. If the section contains
// instructions and InSet is false (not expressions in directive like
// .size/.fill), disable the fast path.
- bool Layout = Asm->hasLayout();
+ const MCAsmLayout *Layout = Asm->getLayout();
if (Layout && (InSet || !SecA.hasInstructions() ||
!(Asm->getContext().getTargetTriple().isRISCV() ||
Asm->getContext().getTargetTriple().isLoongArch()))) {
@@ -817,6 +817,7 @@ bool MCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
const SectionAddrMap *Addrs,
bool InSet) const {
++stats::MCExprEvaluate;
+ MCAsmLayout *Layout = Asm ? Asm->getLayout() : nullptr;
switch (getKind()) {
case Target:
return cast<MCTargetExpr>(this)->evaluateAsRelocatableImpl(Res, Asm, Fixup);
@@ -829,7 +830,6 @@ bool MCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(this);
const MCSymbol &Sym = SRE->getSymbol();
const auto Kind = SRE->getKind();
- bool Layout = Asm && Asm->hasLayout();
// Evaluate recursively if this is a variable.
if (Sym.isVariable() && (Kind == MCSymbolRefExpr::VK_None || Layout) &&
diff --git a/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp b/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp
index 5386df7f4afcd..87355561c1cb5 100644
--- a/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp
+++ b/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp
@@ -79,7 +79,7 @@ bool AVRMCExpr::evaluateAsRelocatableImpl(MCValue &Result,
if (Value.isAbsolute()) {
Result = MCValue::get(evaluateAsInt64(Value.getConstant()));
} else {
- if (!Asm || !Asm->hasLayout())
+ if (!Asm || !Asm->getLayout())
return false;
MCContext &Context = Asm->getContext();
diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp
index cc1d98105b0cb..05fc733825113 100644
--- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp
+++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp
@@ -122,7 +122,7 @@ bool PPCMCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
Res = MCValue::get(Result);
} else {
- if (!Asm || !Asm->hasLayout())
+ if (!Asm || !Asm->getLayout())
return false;
MCContext &Context = Asm->getContext();
>From 123beb7926651217024e5db58b93ab9e8f3c77c7 Mon Sep 17 00:00:00 2001
From: Vasileios Porpodas <vporpodas at google.com>
Date: Tue, 2 Jul 2024 09:02:07 -0700
Subject: [PATCH 004/246] [NFC][SandboxIR] Drop namespace:: prefix.
---
llvm/lib/SandboxIR/SandboxIR.cpp | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/SandboxIR/SandboxIR.cpp b/llvm/lib/SandboxIR/SandboxIR.cpp
index 70101ab10ff12..86757029c821c 100644
--- a/llvm/lib/SandboxIR/SandboxIR.cpp
+++ b/llvm/lib/SandboxIR/SandboxIR.cpp
@@ -10,10 +10,9 @@
#include "llvm/Support/Debug.h"
#include <sstream>
-using namespace llvm;
-using namespace sandboxir;
+using namespace llvm::sandboxir;
-sandboxir::Value::Value(ClassID SubclassID, llvm::Value *Val, Context &Ctx)
+Value::Value(ClassID SubclassID, llvm::Value *Val, Context &Ctx)
: SubclassID(SubclassID), Val(Val), Ctx(Ctx) {
#ifndef NDEBUG
UID = 0; // FIXME: Once SBContext is available.
@@ -21,17 +20,17 @@ sandboxir::Value::Value(ClassID SubclassID, llvm::Value *Val, Context &Ctx)
}
#ifndef NDEBUG
-std::string sandboxir::Value::getName() const {
+std::string Value::getName() const {
std::stringstream SS;
SS << "SB" << UID << ".";
return SS.str();
}
-void sandboxir::Value::dumpCommonHeader(raw_ostream &OS) const {
+void Value::dumpCommonHeader(raw_ostream &OS) const {
OS << getName() << " " << getSubclassIDStr(SubclassID) << " ";
}
-void sandboxir::Value::dumpCommonFooter(raw_ostream &OS) const {
+void Value::dumpCommonFooter(raw_ostream &OS) const {
OS.indent(2) << "Val: ";
if (Val)
OS << *Val;
@@ -40,26 +39,26 @@ void sandboxir::Value::dumpCommonFooter(raw_ostream &OS) const {
OS << "\n";
}
-void sandboxir::Value::dumpCommonPrefix(raw_ostream &OS) const {
+void Value::dumpCommonPrefix(raw_ostream &OS) const {
if (Val)
OS << *Val;
else
OS << "NULL ";
}
-void sandboxir::Value::dumpCommonSuffix(raw_ostream &OS) const {
+void Value::dumpCommonSuffix(raw_ostream &OS) const {
OS << " ; " << getName() << " (" << getSubclassIDStr(SubclassID) << ") "
<< this;
}
-void sandboxir::Value::printAsOperandCommon(raw_ostream &OS) const {
+void Value::printAsOperandCommon(raw_ostream &OS) const {
if (Val)
Val->printAsOperand(OS);
else
OS << "NULL ";
}
-void sandboxir::User::dumpCommonHeader(raw_ostream &OS) const {
+void User::dumpCommonHeader(raw_ostream &OS) const {
Value::dumpCommonHeader(OS);
// TODO: This is incomplete
}
>From 6c3897d90eda4c39789ac9f4efa51db46734a249 Mon Sep 17 00:00:00 2001
From: Giuseppe Rossini <giuseppe.rossini at amd.com>
Date: Tue, 2 Jul 2024 17:12:33 +0100
Subject: [PATCH 005/246] Fix block merging (#96871)
With this PR I am trying to address:
https://github.com/llvm/llvm-project/issues/63230.
What changed:
- While merging identical blocks, don't add a block argument if it is
"identical" to another block argument. I.e., if the two block arguments
refer to the same `Value`. The operations operands in the block will
point to the argument we already inserted
- After merged the blocks, get rid of "unnecessary" arguments. I.e., if
all the predecessors pass the same block argument, there is no need to
pass it as an argument.
- This last simplification clashed with
`BufferDeallocationSimplification`. The reason, I think, is that the two
simplifications are clashing. I.e., `BufferDeallocationSimplification`
contains an analysis based on the block structure. If we simplify the
block structure (by merging and/or dropping block arguments) the
analysis is invalid . The solution I found is to do a more prudent
simplification when running that pass.
**Note**: many tests are still not passing. But I wanted to submit the
code before changing all the tests (and probably adding a couple), so
that we can agree in principle on the algorithm/design.
---
.../BufferDeallocationSimplification.cpp | 9 +-
mlir/lib/Transforms/Utils/RegionUtils.cpp | 144 ++++++++++++++++--
.../dealloc-branchop-interface.mlir | 20 ++-
.../Linalg/detensorize_entry_block.mlir | 6 +-
mlir/test/Dialect/Linalg/detensorize_if.mlir | 67 ++++----
.../Dialect/Linalg/detensorize_while.mlir | 12 +-
.../Linalg/detensorize_while_impure_cf.mlir | 12 +-
.../Linalg/detensorize_while_pure_cf.mlir | 4 +-
.../Transforms/canonicalize-block-merge.mlir | 6 +-
mlir/test/Transforms/canonicalize-dce.mlir | 8 +-
.../Transforms/make-isolated-from-above.mlir | 18 +--
.../test-canonicalize-merge-large-blocks.mlir | 76 +++++++++
12 files changed, 289 insertions(+), 93 deletions(-)
create mode 100644 mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
index 954485cfede3d..5227b22653eef 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
@@ -463,10 +463,15 @@ struct BufferDeallocationSimplificationPass
SplitDeallocWhenNotAliasingAnyOther,
RetainedMemrefAliasingAlwaysDeallocatedMemref>(&getContext(),
analysis);
+ // We don't want that the block structure changes invalidating the
+ // `BufferOriginAnalysis` so we apply the rewrites witha `Normal` level of
+ // region simplification
+ GreedyRewriteConfig config;
+ config.enableRegionSimplification = GreedySimplifyRegionLevel::Normal;
populateDeallocOpCanonicalizationPatterns(patterns, &getContext());
- if (failed(
- applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
+ if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns),
+ config)))
signalPassFailure();
}
};
diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp
index 4c0f15bafbaba..412e2456295ad 100644
--- a/mlir/lib/Transforms/Utils/RegionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp
@@ -9,6 +9,7 @@
#include "mlir/Transforms/RegionUtils.h"
#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/IR/Block.h"
+#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
@@ -16,11 +17,15 @@
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
+#include "mlir/Support/LogicalResult.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
#include <deque>
+#include <iterator>
using namespace mlir;
@@ -699,9 +704,8 @@ LogicalResult BlockMergeCluster::merge(RewriterBase &rewriter) {
blockIterators.push_back(mergeBlock->begin());
// Update each of the predecessor terminators with the new arguments.
- SmallVector<SmallVector<Value, 8>, 2> newArguments(
- 1 + blocksToMerge.size(),
- SmallVector<Value, 8>(operandsToMerge.size()));
+ SmallVector<SmallVector<Value, 8>, 2> newArguments(1 + blocksToMerge.size(),
+ SmallVector<Value, 8>());
unsigned curOpIndex = 0;
for (const auto &it : llvm::enumerate(operandsToMerge)) {
unsigned nextOpOffset = it.value().first - curOpIndex;
@@ -712,13 +716,22 @@ LogicalResult BlockMergeCluster::merge(RewriterBase &rewriter) {
Block::iterator &blockIter = blockIterators[i];
std::advance(blockIter, nextOpOffset);
auto &operand = blockIter->getOpOperand(it.value().second);
- newArguments[i][it.index()] = operand.get();
-
- // Update the operand and insert an argument if this is the leader.
- if (i == 0) {
- Value operandVal = operand.get();
- operand.set(leaderBlock->addArgument(operandVal.getType(),
- operandVal.getLoc()));
+ Value operandVal = operand.get();
+ Value *it = std::find(newArguments[i].begin(), newArguments[i].end(),
+ operandVal);
+ if (it == newArguments[i].end()) {
+ newArguments[i].push_back(operandVal);
+ // Update the operand and insert an argument if this is the leader.
+ if (i == 0) {
+ operand.set(leaderBlock->addArgument(operandVal.getType(),
+ operandVal.getLoc()));
+ }
+ } else if (i == 0) {
+ // If this is the leader, update the operand but do not insert a new
+ // argument. Instead, the opearand should point to one of the
+ // arguments we already passed (and that contained `operandVal`)
+ operand.set(leaderBlock->getArgument(
+ std::distance(newArguments[i].begin(), it)));
}
}
}
@@ -818,6 +831,109 @@ static LogicalResult mergeIdenticalBlocks(RewriterBase &rewriter,
return success(anyChanged);
}
+static LogicalResult dropRedundantArguments(RewriterBase &rewriter,
+ Block &block) {
+ SmallVector<size_t> argsToErase;
+
+ // Go through the arguments of the block
+ for (size_t argIdx = 0; argIdx < block.getNumArguments(); argIdx++) {
+ bool sameArg = true;
+ Value commonValue;
+
+ // Go through the block predecessor and flag if they pass to the block
+ // different values for the same argument
+ for (auto predIt = block.pred_begin(), predE = block.pred_end();
+ predIt != predE; ++predIt) {
+ auto branch = dyn_cast<BranchOpInterface>((*predIt)->getTerminator());
+ if (!branch) {
+ sameArg = false;
+ break;
+ }
+ unsigned succIndex = predIt.getSuccessorIndex();
+ SuccessorOperands succOperands = branch.getSuccessorOperands(succIndex);
+ auto operands = succOperands.getForwardedOperands();
+ if (!commonValue) {
+ commonValue = operands[argIdx];
+ } else {
+ if (operands[argIdx] != commonValue) {
+ sameArg = false;
+ break;
+ }
+ }
+ }
+
+ // If they are passing the same value, drop the argument
+ if (commonValue && sameArg) {
+ argsToErase.push_back(argIdx);
+
+ // Remove the argument from the block
+ Value argVal = block.getArgument(argIdx);
+ rewriter.replaceAllUsesWith(argVal, commonValue);
+ }
+ }
+
+ // Remove the arguments
+ for (auto argIdx : llvm::reverse(argsToErase)) {
+ block.eraseArgument(argIdx);
+
+ // Remove the argument from the branch ops
+ for (auto predIt = block.pred_begin(), predE = block.pred_end();
+ predIt != predE; ++predIt) {
+ auto branch = cast<BranchOpInterface>((*predIt)->getTerminator());
+ unsigned succIndex = predIt.getSuccessorIndex();
+ SuccessorOperands succOperands = branch.getSuccessorOperands(succIndex);
+ succOperands.erase(argIdx);
+ }
+ }
+ return success(!argsToErase.empty());
+}
+
+/// This optimization drops redundant argument to blocks. I.e., if a given
+/// argument to a block receives the same value from each of the block
+/// predecessors, we can remove the argument from the block and use directly the
+/// original value. This is a simple example:
+///
+/// %cond = llvm.call @rand() : () -> i1
+/// %val0 = llvm.mlir.constant(1 : i64) : i64
+/// %val1 = llvm.mlir.constant(2 : i64) : i64
+/// %val2 = llvm.mlir.constant(3 : i64) : i64
+/// llvm.cond_br %cond, ^bb1(%val0 : i64, %val1 : i64), ^bb2(%val0 : i64, %val2
+/// : i64)
+///
+/// ^bb1(%arg0 : i64, %arg1 : i64):
+/// llvm.call @foo(%arg0, %arg1)
+///
+/// The previous IR can be rewritten as:
+/// %cond = llvm.call @rand() : () -> i1
+/// %val0 = llvm.mlir.constant(1 : i64) : i64
+/// %val1 = llvm.mlir.constant(2 : i64) : i64
+/// %val2 = llvm.mlir.constant(3 : i64) : i64
+/// llvm.cond_br %cond, ^bb1(%val1 : i64), ^bb2(%val2 : i64)
+///
+/// ^bb1(%arg0 : i64):
+/// llvm.call @foo(%val0, %arg0)
+///
+static LogicalResult dropRedundantArguments(RewriterBase &rewriter,
+ MutableArrayRef<Region> regions) {
+ llvm::SmallSetVector<Region *, 1> worklist;
+ for (auto ®ion : regions)
+ worklist.insert(®ion);
+ bool anyChanged = false;
+ while (!worklist.empty()) {
+ Region *region = worklist.pop_back_val();
+
+ // Add any nested regions to the worklist.
+ for (Block &block : *region) {
+ anyChanged = succeeded(dropRedundantArguments(rewriter, block));
+
+ for (auto &op : block)
+ for (auto &nestedRegion : op.getRegions())
+ worklist.insert(&nestedRegion);
+ }
+ }
+ return success(anyChanged);
+}
+
//===----------------------------------------------------------------------===//
// Region Simplification
//===----------------------------------------------------------------------===//
@@ -832,8 +948,12 @@ LogicalResult mlir::simplifyRegions(RewriterBase &rewriter,
bool eliminatedBlocks = succeeded(eraseUnreachableBlocks(rewriter, regions));
bool eliminatedOpsOrArgs = succeeded(runRegionDCE(rewriter, regions));
bool mergedIdenticalBlocks = false;
- if (mergeBlocks)
+ bool droppedRedundantArguments = false;
+ if (mergeBlocks) {
mergedIdenticalBlocks = succeeded(mergeIdenticalBlocks(rewriter, regions));
+ droppedRedundantArguments =
+ succeeded(dropRedundantArguments(rewriter, regions));
+ }
return success(eliminatedBlocks || eliminatedOpsOrArgs ||
- mergedIdenticalBlocks);
+ mergedIdenticalBlocks || droppedRedundantArguments);
}
diff --git a/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir
index 5e8104f83cc4d..8e14990502143 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir
@@ -178,7 +178,7 @@ func.func @condBranchDynamicTypeNested(
// CHECK-NEXT: ^bb1
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.br ^bb5([[ARG1]], %false{{[0-9_]*}} :
+// CHECK: cf.br ^bb6([[ARG1]], %false{{[0-9_]*}} :
// CHECK: ^bb2([[IDX:%.*]]:{{.*}})
// CHECK: [[ALLOC1:%.*]] = memref.alloc([[IDX]])
// CHECK-NEXT: test.buffer_based
@@ -186,20 +186,24 @@ func.func @condBranchDynamicTypeNested(
// CHECK-NEXT: [[OWN:%.+]] = arith.select [[ARG0]], [[ARG0]], [[NOT_ARG0]]
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.cond_br{{.*}}, ^bb3, ^bb3
+// CHECK: cf.cond_br{{.*}}, ^bb3, ^bb4
// CHECK-NEXT: ^bb3:
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.br ^bb4([[ALLOC1]], [[OWN]]
-// CHECK-NEXT: ^bb4([[ALLOC2:%.*]]:{{.*}}, [[COND1:%.+]]:{{.*}})
+// CHECK: cf.br ^bb5([[ALLOC1]], [[OWN]]
+// CHECK-NEXT: ^bb4:
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.br ^bb5([[ALLOC2]], [[COND1]]
-// CHECK-NEXT: ^bb5([[ALLOC4:%.*]]:{{.*}}, [[COND2:%.+]]:{{.*}})
+// CHECK: cf.br ^bb5([[ALLOC1]], [[OWN]]
+// CHECK-NEXT: ^bb5([[ALLOC2:%.*]]:{{.*}}, [[COND1:%.+]]:{{.*}})
+// CHECK-NOT: bufferization.dealloc
+// CHECK-NOT: bufferization.clone
+// CHECK: cf.br ^bb6([[ALLOC2]], [[COND1]]
+// CHECK-NEXT: ^bb6([[ALLOC4:%.*]]:{{.*}}, [[COND2:%.+]]:{{.*}})
// CHECK-NEXT: [[BASE:%[a-zA-Z0-9_]+]]{{.*}} = memref.extract_strided_metadata [[ALLOC4]]
// CHECK-NEXT: [[OWN:%.+]]:2 = bufferization.dealloc ([[BASE]] :{{.*}}) if ([[COND2]]) retain ([[ALLOC4]], [[ARG2]] :
-// CHECK: cf.br ^bb6([[ALLOC4]], [[OWN]]#0
-// CHECK-NEXT: ^bb6([[ALLOC5:%.*]]:{{.*}}, [[COND3:%.+]]:{{.*}})
+// CHECK: cf.br ^bb7([[ALLOC4]], [[OWN]]#0
+// CHECK-NEXT: ^bb7([[ALLOC5:%.*]]:{{.*}}, [[COND3:%.+]]:{{.*}})
// CHECK: test.copy
// CHECK: [[BASE:%[a-zA-Z0-9_]+]]{{.*}} = memref.extract_strided_metadata [[ALLOC5]]
// CHECK-NEXT: bufferization.dealloc ([[BASE]] : {{.*}}) if ([[COND3]])
diff --git a/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir b/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir
index d1a89226fdb58..50a2d6bf532aa 100644
--- a/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir
@@ -15,7 +15,7 @@ func.func @main(%arg0: tensor<f32>) -> tensor<f32> {
// CHECK-LABEL: @main
// CHECK-SAME: (%[[ARG0:.+]]: tensor<f32>) -> tensor<f32>
// CHECK: %[[EXTRACTED:.+]] = tensor.extract %[[ARG0]][] : tensor<f32>
-// CHECK: cf.br ^{{.*}}(%[[EXTRACTED]] : f32)
-// CHECK: ^{{.*}}(%[[ARG1:.+]]: f32):
-// CHECK: %[[ELEMENTS:.+]] = tensor.from_elements %[[ARG1]] : tensor<f32>
+// CHECK: cf.br ^{{.*}}
+// CHECK: ^{{.*}}:
+// CHECK: %[[ELEMENTS:.+]] = tensor.from_elements %[[EXTRACTED]] : tensor<f32>
// CHECK: return %[[ELEMENTS]] : tensor<f32>
diff --git a/mlir/test/Dialect/Linalg/detensorize_if.mlir b/mlir/test/Dialect/Linalg/detensorize_if.mlir
index 8d17763c04b6c..c728ad21d2209 100644
--- a/mlir/test/Dialect/Linalg/detensorize_if.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_if.mlir
@@ -42,18 +42,15 @@ func.func @main() -> (tensor<i32>) attributes {} {
}
// CHECK-LABEL: func @main()
-// CHECK-DAG: arith.constant 0
-// CHECK-DAG: arith.constant 10
-// CHECK: cf.br ^[[bb1:.*]](%{{.*}}: i32)
-// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32):
-// CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb3(%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
-// CHECK-NEXT: arith.addi %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb3]](%{{.*}}: i32)
-// CHECK-NEXT: tensor.from_elements %{{.*}} : tensor<i32>
-// CHECK-NEXT: return %{{.*}}
+// CHECK-DAG: %[[cst:.*]] = arith.constant dense<0>
+// CHECK-DAG: arith.constant true
+// CHECK: cf.br
+// CHECK-NEXT: ^[[bb1:.*]]:
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^bb3
+// CHECK-NEXT: ^[[bb2]]
+// CHECK-NEXT: cf.br ^[[bb3:.*]]
+// CHECK-NEXT: ^[[bb3]]
+// CHECK-NEXT: return %[[cst]]
// CHECK-NEXT: }
// -----
@@ -106,20 +103,17 @@ func.func @main() -> (tensor<i32>) attributes {} {
}
// CHECK-LABEL: func @main()
-// CHECK-DAG: arith.constant 0
-// CHECK-DAG: arith.constant 10
-// CHECK: cf.br ^[[bb1:.*]](%{{.*}}: i32)
-// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32):
-// CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb3(%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
-// CHECK-NEXT: arith.addi %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb3]](%{{.*}}: i32)
-// CHECK-NEXT: cf.br ^[[bb4:.*]](%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb4]](%{{.*}}: i32)
-// CHECK-NEXT: tensor.from_elements %{{.*}} : tensor<i32>
-// CHECK-NEXT: return %{{.*}}
+// CHECK-DAG: %[[cst:.*]] = arith.constant dense<0>
+// CHECK-DAG: arith.constant true
+// CHECK: cf.br ^[[bb1:.*]]
+// CHECK-NEXT: ^[[bb1:.*]]:
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^bb3
+// CHECK-NEXT: ^[[bb2]]:
+// CHECK-NEXT: cf.br ^[[bb3:.*]]
+// CHECK-NEXT: ^[[bb3]]:
+// CHECK-NEXT: cf.br ^[[bb4:.*]]
+// CHECK-NEXT: ^[[bb4]]:
+// CHECK-NEXT: return %[[cst]]
// CHECK-NEXT: }
// -----
@@ -171,16 +165,13 @@ func.func @main() -> (tensor<i32>) attributes {} {
}
// CHECK-LABEL: func @main()
-// CHECK-DAG: arith.constant 0
-// CHECK-DAG: arith.constant 10
-// CHECK: cf.br ^[[bb1:.*]](%{{.*}}: i32)
-// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32):
-// CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb2(%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
-// CHECK-NEXT: arith.addi %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb3]](%{{.*}}: i32)
-// CHECK-NEXT: tensor.from_elements %{{.*}} : tensor<i32>
-// CHECK-NEXT: return %{{.*}}
+// CHECK-DAG: %[[cst:.*]] = arith.constant dense<10>
+// CHECK-DAG: arith.constant true
+// CHECK: cf.br ^[[bb1:.*]]
+// CHECK-NEXT: ^[[bb1]]:
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^bb2
+// CHECK-NEXT: ^[[bb2]]
+// CHECK-NEXT: cf.br ^[[bb3:.*]]
+// CHECK-NEXT: ^[[bb3]]
+// CHECK-NEXT: return %[[cst]]
// CHECK-NEXT: }
diff --git a/mlir/test/Dialect/Linalg/detensorize_while.mlir b/mlir/test/Dialect/Linalg/detensorize_while.mlir
index aa30900f76a33..580a97d3a851b 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while.mlir
@@ -46,11 +46,11 @@ func.func @main(%farg0: tensor<i32>, %farg1: tensor<i32>) -> tensor<i32> attribu
// DET-ALL: cf.br ^[[bb1:.*]](%{{.*}} : i32)
// DET-ALL: ^[[bb1]](%{{.*}}: i32)
// DET-ALL: arith.cmpi slt, {{.*}}
-// DET-ALL: cf.cond_br {{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32)
-// DET-ALL: ^[[bb2]](%{{.*}}: i32)
+// DET-ALL: cf.cond_br {{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
+// DET-ALL: ^[[bb2]]
// DET-ALL: arith.addi {{.*}}
// DET-ALL: cf.br ^[[bb1]](%{{.*}} : i32)
-// DET-ALL: ^[[bb3]](%{{.*}}: i32)
+// DET-ALL: ^[[bb3]]:
// DET-ALL: tensor.from_elements {{.*}}
// DET-ALL: return %{{.*}} : tensor<i32>
@@ -62,10 +62,10 @@ func.func @main(%farg0: tensor<i32>, %farg1: tensor<i32>) -> tensor<i32> attribu
// DET-CF: cf.br ^[[bb1:.*]](%{{.*}} : i32)
// DET-CF: ^[[bb1]](%{{.*}}: i32)
// DET-CF: arith.cmpi slt, {{.*}}
-// DET-CF: cf.cond_br {{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32)
-// DET-CF: ^[[bb2]](%{{.*}}: i32)
+// DET-CF: cf.cond_br {{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
+// DET-CF: ^[[bb2]]:
// DET-CF: arith.addi {{.*}}
// DET-CF: cf.br ^[[bb1]](%{{.*}} : i32)
-// DET-CF: ^[[bb3]](%{{.*}}: i32)
+// DET-CF: ^[[bb3]]:
// DET-CF: tensor.from_elements %{{.*}} : tensor<i32>
// DET-CF: return %{{.*}} : tensor<i32>
diff --git a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
index 955c7be5ef4c8..414d9b94cbf53 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
@@ -74,8 +74,8 @@ func.func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attr
// DET-ALL: } -> tensor<i32>
// DET-ALL: tensor.extract %{{.*}}[] : tensor<i32>
// DET-ALL: cmpi slt, %{{.*}}, %{{.*}} : i32
-// DET-ALL: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32)
-// DET-ALL: ^[[bb2]](%{{.*}}: i32)
+// DET-ALL: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
+// DET-ALL: ^[[bb2]]:
// DET-ALL: tensor.from_elements %{{.*}} : tensor<i32>
// DET-ALL: tensor.empty() : tensor<10xi32>
// DET-ALL: linalg.generic {{{.*}}} ins(%{{.*}} : tensor<i32>) outs(%{{.*}} : tensor<10xi32>) {
@@ -83,7 +83,7 @@ func.func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attr
// DET-ALL: linalg.yield %{{.*}} : i32
// DET-ALL: } -> tensor<10xi32>
// DET-ALL: cf.br ^[[bb1]](%{{.*}} : tensor<10xi32>)
-// DET-ALL: ^[[bb3]](%{{.*}}: i32)
+// DET-ALL: ^[[bb3]]
// DET-ALL: tensor.from_elements %{{.*}} : tensor<i32>
// DET-ALL: return %{{.*}} : tensor<i32>
// DET-ALL: }
@@ -95,10 +95,10 @@ func.func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attr
// DET-CF: %{{.*}} = linalg.generic {{{.*}}} ins(%{{.*}} : tensor<10xi32>) outs(%{{.*}} : tensor<i32>) {
// DET-CF: tensor.extract %{{.*}}[] : tensor<i32>
// DET-CF: cmpi slt, %{{.*}}, %{{.*}} : i32
-// DET-CF: cf.cond_br %{{.*}}, ^bb2(%{{.*}} : tensor<i32>), ^bb3(%{{.*}} : tensor<i32>)
-// DET-CF: ^bb2(%{{.*}}: tensor<i32>)
+// DET-CF: cf.cond_br %{{.*}}, ^bb2, ^bb3
+// DET-CF: ^bb2:
// DET-CF: %{{.*}} = linalg.generic {{{.*}}} ins(%{{.*}} : tensor<i32>) outs(%{{.*}} : tensor<10xi32>) {
// DET-CF: cf.br ^bb1(%{{.*}} : tensor<10xi32>)
-// DET-CF: ^bb3(%{{.*}}: tensor<i32>)
+// DET-CF: ^bb3:
// DET-CF: return %{{.*}} : tensor<i32>
// DET-CF: }
diff --git a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
index 6d8d5fe71fca5..913e78272db79 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
@@ -49,8 +49,8 @@ func.func @main() -> () attributes {} {
// CHECK-NEXT: cf.br ^[[bb1:.*]](%{{.*}} : i32)
// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32)
// CHECK-NEXT: %{{.*}} = arith.cmpi slt, %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]]
-// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
+// CHECK-NEXT: ^[[bb2]]
// CHECK-NEXT: %{{.*}} = arith.addi %{{.*}}, %{{.*}}
// CHECK-NEXT: cf.br ^[[bb1]](%{{.*}} : i32)
// CHECK-NEXT: ^[[bb3]]:
diff --git a/mlir/test/Transforms/canonicalize-block-merge.mlir b/mlir/test/Transforms/canonicalize-block-merge.mlir
index 3b8b1fce0575a..92cfde817cf7f 100644
--- a/mlir/test/Transforms/canonicalize-block-merge.mlir
+++ b/mlir/test/Transforms/canonicalize-block-merge.mlir
@@ -87,7 +87,7 @@ func.func @mismatch_operands_matching_arguments(%cond : i1, %arg0 : i32, %arg1 :
// CHECK-LABEL: func @mismatch_argument_uses(
func.func @mismatch_argument_uses(%cond : i1, %arg0 : i32, %arg1 : i32) -> (i32, i32) {
- // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2
+ // CHECK: return {{.*}}, {{.*}}
cf.cond_br %cond, ^bb1(%arg1 : i32), ^bb2(%arg0 : i32)
@@ -101,7 +101,7 @@ func.func @mismatch_argument_uses(%cond : i1, %arg0 : i32, %arg1 : i32) -> (i32,
// CHECK-LABEL: func @mismatch_argument_types(
func.func @mismatch_argument_types(%cond : i1, %arg0 : i32, %arg1 : i16) {
- // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2
+ // CHECK: cf.cond_br %{{.*}}, ^bb1, ^bb2
cf.cond_br %cond, ^bb1(%arg0 : i32), ^bb2(%arg1 : i16)
@@ -115,7 +115,7 @@ func.func @mismatch_argument_types(%cond : i1, %arg0 : i32, %arg1 : i16) {
// CHECK-LABEL: func @mismatch_argument_count(
func.func @mismatch_argument_count(%cond : i1, %arg0 : i32) {
- // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2
+ // CHECK: cf.cond_br %{{.*}}, ^bb1, ^bb2
cf.cond_br %cond, ^bb1(%arg0 : i32), ^bb2
diff --git a/mlir/test/Transforms/canonicalize-dce.mlir b/mlir/test/Transforms/canonicalize-dce.mlir
index ac034d567a26a..84631947970de 100644
--- a/mlir/test/Transforms/canonicalize-dce.mlir
+++ b/mlir/test/Transforms/canonicalize-dce.mlir
@@ -137,10 +137,10 @@ func.func @f(%arg0: f32) {
// Test case: Test the mechanics of deleting multiple block arguments.
// CHECK: func @f(%arg0: tensor<1xf32>, %arg1: tensor<2xf32>, %arg2: tensor<3xf32>, %arg3: tensor<4xf32>, %arg4: tensor<5xf32>)
-// CHECK-NEXT: "test.br"(%arg1, %arg3)[^bb1] : (tensor<2xf32>, tensor<4xf32>)
-// CHECK-NEXT: ^bb1([[VAL0:%.+]]: tensor<2xf32>, [[VAL1:%.+]]: tensor<4xf32>):
-// CHECK-NEXT: "foo.print"([[VAL0]])
-// CHECK-NEXT: "foo.print"([[VAL1]])
+// CHECK-NEXT: "test.br"()[^bb1]
+// CHECK-NEXT: ^bb1:
+// CHECK-NEXT: "foo.print"(%arg1)
+// CHECK-NEXT: "foo.print"(%arg3)
// CHECK-NEXT: return
diff --git a/mlir/test/Transforms/make-isolated-from-above.mlir b/mlir/test/Transforms/make-isolated-from-above.mlir
index 58f6cfbc5dd65..a9d4325944fd9 100644
--- a/mlir/test/Transforms/make-isolated-from-above.mlir
+++ b/mlir/test/Transforms/make-isolated-from-above.mlir
@@ -78,9 +78,9 @@ func.func @make_isolated_from_above_multiple_blocks(%arg0 : index, %arg1 : index
// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[EMPTY]], %[[C1]]
// CHECK: test.isolated_one_region_op %[[ARG2]], %[[C0]], %[[C1]], %[[D0]], %[[D1]]
// CHECK-NEXT: ^bb0(%[[B0:[a-zA-Z0-9]+]]: index, %[[B1:[a-zA-Z0-9]+]]: index, %[[B2:[a-zA-Z0-9]+]]: index, %[[B3:[a-zA-Z0-9]+]]: index, %[[B4:[a-zA-Z0-9]+]]: index)
-// CHECK-NEXT: cf.br ^bb1(%[[B0]] : index)
-// CHECK: ^bb1(%[[B5:.+]]: index)
-// CHECK: "foo.yield"(%[[B1]], %[[B2]], %[[B3]], %[[B4]], %[[B5]])
+// CHECK-NEXT: cf.br ^bb1
+// CHECK: ^bb1:
+// CHECK: "foo.yield"(%[[B1]], %[[B2]], %[[B3]], %[[B4]], %[[B0]])
// CLONE1-LABEL: func @make_isolated_from_above_multiple_blocks(
// CLONE1-SAME: %[[ARG0:[a-zA-Z0-9]+]]: index
@@ -95,9 +95,9 @@ func.func @make_isolated_from_above_multiple_blocks(%arg0 : index, %arg1 : index
// CLONE1-NEXT: ^bb0(%[[B0:[a-zA-Z0-9]+]]: index, %[[B1:[a-zA-Z0-9]+]]: index, %[[B2:[a-zA-Z0-9]+]]: index)
// CLONE1-DAG: %[[C0_0:.+]] = arith.constant 0 : index
// CLONE1-DAG: %[[C1_0:.+]] = arith.constant 1 : index
-// CLONE1-NEXT: cf.br ^bb1(%[[B0]] : index)
-// CLONE1: ^bb1(%[[B3:.+]]: index)
-// CLONE1: "foo.yield"(%[[C0_0]], %[[C1_0]], %[[B1]], %[[B2]], %[[B3]])
+// CLONE1-NEXT: cf.br ^bb1
+// CLONE1: ^bb1:
+// CLONE1: "foo.yield"(%[[C0_0]], %[[C1_0]], %[[B1]], %[[B2]], %[[B0]])
// CLONE2-LABEL: func @make_isolated_from_above_multiple_blocks(
// CLONE2-SAME: %[[ARG0:[a-zA-Z0-9]+]]: index
@@ -110,6 +110,6 @@ func.func @make_isolated_from_above_multiple_blocks(%arg0 : index, %arg1 : index
// CLONE2-DAG: %[[EMPTY:.+]] = tensor.empty(%[[B1]], %[[B2]])
// CLONE2-DAG: %[[D0:.+]] = tensor.dim %[[EMPTY]], %[[C0]]
// CLONE2-DAG: %[[D1:.+]] = tensor.dim %[[EMPTY]], %[[C1]]
-// CLONE2-NEXT: cf.br ^bb1(%[[B0]] : index)
-// CLONE2: ^bb1(%[[B3:.+]]: index)
-// CLONE2: "foo.yield"(%[[C0]], %[[C1]], %[[D0]], %[[D1]], %[[B3]])
+// CLONE2-NEXT: cf.br ^bb1
+// CLONE2: ^bb1:
+// CLONE2: "foo.yield"(%[[C0]], %[[C1]], %[[D0]], %[[D1]], %[[B0]])
diff --git a/mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir b/mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir
new file mode 100644
index 0000000000000..570ff6905a04d
--- /dev/null
+++ b/mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir
@@ -0,0 +1,76 @@
+ // RUN: mlir-opt -pass-pipeline='builtin.module(llvm.func(canonicalize{region-simplify=aggressive}))' %s | FileCheck %s
+
+llvm.func @foo(%arg0: i64)
+
+llvm.func @rand() -> i1
+
+// CHECK-LABEL: func @large_merge_block(
+llvm.func @large_merge_block(%arg0: i64) {
+ // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
+ // CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
+ // CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
+ // CHECK: %[[C3:.*]] = llvm.mlir.constant(3 : i64) : i64
+ // CHECK: %[[C4:.*]] = llvm.mlir.constant(4 : i64) : i64
+
+ // CHECK: llvm.cond_br %5, ^bb1(%[[C1]], %[[C3]], %[[C4]], %[[C2]] : i64, i64, i64, i64), ^bb1(%[[C4]], %[[C2]], %[[C1]], %[[C3]] : i64, i64, i64, i64)
+ // CHECK: ^bb{{.*}}(%[[arg0:.*]]: i64, %[[arg1:.*]]: i64, %[[arg2:.*]]: i64, %[[arg3:.*]]: i64):
+ // CHECK: llvm.cond_br %{{.*}}, ^bb2(%[[arg0]] : i64), ^bb2(%[[arg3]] : i64)
+ // CHECK: ^bb{{.*}}(%11: i64):
+ // CHECK: llvm.br ^bb{{.*}}
+ // CHECK: ^bb{{.*}}:
+ // CHECK: llvm.call
+ // CHECK: llvm.cond_br {{.*}}, ^bb{{.*}}(%[[arg1]] : i64), ^bb{{.*}}(%[[arg2]] : i64)
+ // CHECK: ^bb{{.*}}:
+ // CHECK: llvm.call
+ // CHECK llvm.br ^bb{{.*}}
+
+ %0 = llvm.mlir.constant(0 : i64) : i64
+ %1 = llvm.mlir.constant(1 : i64) : i64
+ %2 = llvm.mlir.constant(2 : i64) : i64
+ %3 = llvm.mlir.constant(3 : i64) : i64
+ %4 = llvm.mlir.constant(4 : i64) : i64
+ %10 = llvm.icmp "eq" %arg0, %0 : i64
+ llvm.cond_br %10, ^bb1, ^bb14
+^bb1: // pred: ^bb0
+ %11 = llvm.call @rand() : () -> i1
+ llvm.cond_br %11, ^bb2, ^bb3
+^bb2: // pred: ^bb1
+ llvm.call @foo(%1) : (i64) -> ()
+ llvm.br ^bb4
+^bb3: // pred: ^bb1
+ llvm.call @foo(%2) : (i64) -> ()
+ llvm.br ^bb4
+^bb4: // 2 preds: ^bb2, ^bb3
+ %14 = llvm.call @rand() : () -> i1
+ llvm.cond_br %14, ^bb5, ^bb6
+^bb5: // pred: ^bb4
+ llvm.call @foo(%3) : (i64) -> ()
+ llvm.br ^bb13
+^bb6: // pred: ^bb4
+ llvm.call @foo(%4) : (i64) -> ()
+ llvm.br ^bb13
+^bb13: // 2 preds: ^bb11, ^bb12
+ llvm.br ^bb27
+^bb14: // pred: ^bb0
+ %23 = llvm.call @rand() : () -> i1
+ llvm.cond_br %23, ^bb15, ^bb16
+^bb15: // pred: ^bb14
+ llvm.call @foo(%4) : (i64) -> ()
+ llvm.br ^bb17
+^bb16: // pred: ^bb14
+ llvm.call @foo(%3) : (i64) -> ()
+ llvm.br ^bb17
+^bb17: // 2 preds: ^bb15, ^bb16
+ %26 = llvm.call @rand() : () -> i1
+ llvm.cond_br %26, ^bb18, ^bb19
+^bb18: // pred: ^bb17
+ llvm.call @foo(%2) : (i64) -> ()
+ llvm.br ^bb26
+^bb19: // pred: ^bb17
+ llvm.call @foo(%1) : (i64) -> ()
+ llvm.br ^bb26
+^bb26: // 2 preds: ^bb24, ^bb25
+ llvm.br ^bb27
+^bb27: // 2 preds: ^bb13, ^bb26
+ llvm.return
+}
>From 6820b0871807abff07df118659e0de2ca741cb0b Mon Sep 17 00:00:00 2001
From: srcarroll <50210727+srcarroll at users.noreply.github.com>
Date: Tue, 2 Jul 2024 11:12:51 -0500
Subject: [PATCH 006/246] Refactor LoopFuseSiblingOp and support parallel
fusion (#94391)
This patch refactors code related to `LoopFuseSiblingOp` transform in
attempt to reduce duplicate common code. The aim is to refactor as much
as possible to a functions on `LoopLikeOpInterface`s, but this is still
a work in progress. A full refactor will require more additions to the
`LoopLikeOpInterface`.
In addition, `scf.parallel` fusion support has been added.
---
mlir/include/mlir/Dialect/SCF/IR/SCFOps.td | 3 +-
mlir/include/mlir/Dialect/SCF/Utils/Utils.h | 20 ++
.../mlir/Interfaces/LoopLikeInterface.h | 20 ++
mlir/lib/Dialect/SCF/IR/SCF.cpp | 38 +++
.../SCF/TransformOps/SCFTransformOps.cpp | 140 ++-------
.../SCF/Transforms/ParallelLoopFusion.cpp | 80 +----
mlir/lib/Dialect/SCF/Utils/Utils.cpp | 279 ++++++++++++------
mlir/lib/Interfaces/LoopLikeInterface.cpp | 55 ++++
.../SCF/transform-loop-fuse-sibling.mlir | 234 ++++++++++++++-
9 files changed, 586 insertions(+), 283 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
index f35ea962bea16..bf95fbe6721cf 100644
--- a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
+++ b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
@@ -303,7 +303,8 @@ def ForallOp : SCF_Op<"forall", [
DeclareOpInterfaceMethods<LoopLikeOpInterface,
["getInitsMutable", "getRegionIterArgs", "getLoopInductionVars",
"getLoopLowerBounds", "getLoopUpperBounds", "getLoopSteps",
- "promoteIfSingleIteration", "yieldTiledValuesAndReplace"]>,
+ "replaceWithAdditionalYields", "promoteIfSingleIteration",
+ "yieldTiledValuesAndReplace"]>,
RecursiveMemoryEffects,
SingleBlockImplicitTerminator<"scf::InParallelOp">,
DeclareOpInterfaceMethods<RegionBranchOpInterface>,
diff --git a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
index de807c3e4e1f8..6a40304e2eeba 100644
--- a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
@@ -181,6 +181,16 @@ Loops tilePerfectlyNested(scf::ForOp rootForOp, ArrayRef<Value> sizes);
void getPerfectlyNestedLoops(SmallVectorImpl<scf::ForOp> &nestedLoops,
scf::ForOp root);
+//===----------------------------------------------------------------------===//
+// Fusion related helpers
+//===----------------------------------------------------------------------===//
+
+/// Check structural compatibility between two loops such as iteration space
+/// and dominance.
+bool checkFusionStructuralLegality(LoopLikeOpInterface target,
+ LoopLikeOpInterface source,
+ Diagnostic &diag);
+
/// Given two scf.forall loops, `target` and `source`, fuses `target` into
/// `source`. Assumes that the given loops are siblings and are independent of
/// each other.
@@ -202,6 +212,16 @@ scf::ForallOp fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForOp fuseIndependentSiblingForLoops(scf::ForOp target, scf::ForOp source,
RewriterBase &rewriter);
+/// Given two scf.parallel loops, `target` and `source`, fuses `target` into
+/// `source`. Assumes that the given loops are siblings and are independent of
+/// each other.
+///
+/// This function does not perform any legality checks and simply fuses the
+/// loops. The caller is responsible for ensuring that the loops are legal to
+/// fuse.
+scf::ParallelOp fuseIndependentSiblingParallelLoops(scf::ParallelOp target,
+ scf::ParallelOp source,
+ RewriterBase &rewriter);
} // namespace mlir
#endif // MLIR_DIALECT_SCF_UTILS_UTILS_H_
diff --git a/mlir/include/mlir/Interfaces/LoopLikeInterface.h b/mlir/include/mlir/Interfaces/LoopLikeInterface.h
index 9925fc6ce6ca9..d08e097a9b4af 100644
--- a/mlir/include/mlir/Interfaces/LoopLikeInterface.h
+++ b/mlir/include/mlir/Interfaces/LoopLikeInterface.h
@@ -90,4 +90,24 @@ struct JamBlockGatherer {
/// Include the generated interface declarations.
#include "mlir/Interfaces/LoopLikeInterface.h.inc"
+namespace mlir {
+/// A function that rewrites `target`'s terminator as a teminator obtained by
+/// fusing `source` into `target`.
+using FuseTerminatorFn =
+ function_ref<void(RewriterBase &rewriter, LoopLikeOpInterface source,
+ LoopLikeOpInterface &target, IRMapping mapping)>;
+
+/// Returns a fused `LoopLikeOpInterface` created by fusing `source` to
+/// `target`. The `NewYieldValuesFn` callback is used to pass to the
+/// `replaceWithAdditionalYields` interface method to replace the loop with a
+/// new loop with (possibly) additional yields, while the `FuseTerminatorFn`
+/// callback is repsonsible for updating the fused loop terminator.
+LoopLikeOpInterface createFused(LoopLikeOpInterface target,
+ LoopLikeOpInterface source,
+ RewriterBase &rewriter,
+ NewYieldValuesFn newYieldValuesFn,
+ FuseTerminatorFn fuseTerminatorFn);
+
+} // namespace mlir
+
#endif // MLIR_INTERFACES_LOOPLIKEINTERFACE_H_
diff --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp
index 907d7f794593d..cb15e0ecebf05 100644
--- a/mlir/lib/Dialect/SCF/IR/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp
@@ -618,6 +618,44 @@ void ForOp::getSuccessorRegions(RegionBranchPoint point,
SmallVector<Region *> ForallOp::getLoopRegions() { return {&getRegion()}; }
+FailureOr<LoopLikeOpInterface> ForallOp::replaceWithAdditionalYields(
+ RewriterBase &rewriter, ValueRange newInitOperands,
+ bool replaceInitOperandUsesInLoop,
+ const NewYieldValuesFn &newYieldValuesFn) {
+ // Create a new loop before the existing one, with the extra operands.
+ OpBuilder::InsertionGuard g(rewriter);
+ rewriter.setInsertionPoint(getOperation());
+ SmallVector<Value> inits(getOutputs());
+ llvm::append_range(inits, newInitOperands);
+ scf::ForallOp newLoop = rewriter.create<scf::ForallOp>(
+ getLoc(), getMixedLowerBound(), getMixedUpperBound(), getMixedStep(),
+ inits, getMapping(),
+ /*bodyBuilderFn =*/[](OpBuilder &, Location, ValueRange) {});
+
+ // Move the loop body to the new op.
+ rewriter.mergeBlocks(getBody(), newLoop.getBody(),
+ newLoop.getBody()->getArguments().take_front(
+ getBody()->getNumArguments()));
+
+ if (replaceInitOperandUsesInLoop) {
+ // Replace all uses of `newInitOperands` with the corresponding basic block
+ // arguments.
+ for (auto &&[newOperand, oldOperand] :
+ llvm::zip(newInitOperands, newLoop.getBody()->getArguments().take_back(
+ newInitOperands.size()))) {
+ rewriter.replaceUsesWithIf(newOperand, oldOperand, [&](OpOperand &use) {
+ Operation *user = use.getOwner();
+ return newLoop->isProperAncestor(user);
+ });
+ }
+ }
+
+ // Replace the old loop.
+ rewriter.replaceOp(getOperation(),
+ newLoop->getResults().take_front(getNumResults()));
+ return cast<LoopLikeOpInterface>(newLoop.getOperation());
+}
+
/// Promotes the loop body of a forallOp to its containing block if it can be
/// determined that the loop has a single iteration.
LogicalResult scf::ForallOp::promoteIfSingleIteration(RewriterBase &rewriter) {
diff --git a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
index 56ff2709a589e..41834fea3bb84 100644
--- a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
+++ b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
@@ -261,8 +261,10 @@ loopScheduling(scf::ForOp forOp,
return 1;
};
- std::optional<int64_t> ubConstant = getConstantIntValue(forOp.getUpperBound());
- std::optional<int64_t> lbConstant = getConstantIntValue(forOp.getLowerBound());
+ std::optional<int64_t> ubConstant =
+ getConstantIntValue(forOp.getUpperBound());
+ std::optional<int64_t> lbConstant =
+ getConstantIntValue(forOp.getLowerBound());
DenseMap<Operation *, unsigned> opCycles;
std::map<unsigned, std::vector<Operation *>> wrappedSchedule;
for (Operation &op : forOp.getBody()->getOperations()) {
@@ -447,113 +449,6 @@ void transform::TakeAssumedBranchOp::getEffects(
// LoopFuseSiblingOp
//===----------------------------------------------------------------------===//
-/// Check if `target` and `source` are siblings, in the context that `target`
-/// is being fused into `source`.
-///
-/// This is a simple check that just checks if both operations are in the same
-/// block and some checks to ensure that the fused IR does not violate
-/// dominance.
-static DiagnosedSilenceableFailure isOpSibling(Operation *target,
- Operation *source) {
- // Check if both operations are same.
- if (target == source)
- return emitSilenceableFailure(source)
- << "target and source need to be different loops";
-
- // Check if both operations are in the same block.
- if (target->getBlock() != source->getBlock())
- return emitSilenceableFailure(source)
- << "target and source are not in the same block";
-
- // Check if fusion will violate dominance.
- DominanceInfo domInfo(source);
- if (target->isBeforeInBlock(source)) {
- // Since `target` is before `source`, all users of results of `target`
- // need to be dominated by `source`.
- for (Operation *user : target->getUsers()) {
- if (!domInfo.properlyDominates(source, user, /*enclosingOpOk=*/false)) {
- return emitSilenceableFailure(target)
- << "user of results of target should be properly dominated by "
- "source";
- }
- }
- } else {
- // Since `target` is after `source`, all values used by `target` need
- // to dominate `source`.
-
- // Check if operands of `target` are dominated by `source`.
- for (Value operand : target->getOperands()) {
- Operation *operandOp = operand.getDefiningOp();
- // Operands without defining operations are block arguments. When `target`
- // and `source` occur in the same block, these operands dominate `source`.
- if (!operandOp)
- continue;
-
- // Operand's defining operation should properly dominate `source`.
- if (!domInfo.properlyDominates(operandOp, source,
- /*enclosingOpOk=*/false))
- return emitSilenceableFailure(target)
- << "operands of target should be properly dominated by source";
- }
-
- // Check if values used by `target` are dominated by `source`.
- bool failed = false;
- OpOperand *failedValue = nullptr;
- visitUsedValuesDefinedAbove(target->getRegions(), [&](OpOperand *operand) {
- Operation *operandOp = operand->get().getDefiningOp();
- if (operandOp && !domInfo.properlyDominates(operandOp, source,
- /*enclosingOpOk=*/false)) {
- // `operand` is not an argument of an enclosing block and the defining
- // op of `operand` is outside `target` but does not dominate `source`.
- failed = true;
- failedValue = operand;
- }
- });
-
- if (failed)
- return emitSilenceableFailure(failedValue->getOwner())
- << "values used inside regions of target should be properly "
- "dominated by source";
- }
-
- return DiagnosedSilenceableFailure::success();
-}
-
-/// Check if `target` scf.forall can be fused into `source` scf.forall.
-///
-/// This simply checks if both loops have the same bounds, steps and mapping.
-/// No attempt is made at checking that the side effects of `target` and
-/// `source` are independent of each other.
-static bool isForallWithIdenticalConfiguration(Operation *target,
- Operation *source) {
- auto targetOp = dyn_cast<scf::ForallOp>(target);
- auto sourceOp = dyn_cast<scf::ForallOp>(source);
- if (!targetOp || !sourceOp)
- return false;
-
- return targetOp.getMixedLowerBound() == sourceOp.getMixedLowerBound() &&
- targetOp.getMixedUpperBound() == sourceOp.getMixedUpperBound() &&
- targetOp.getMixedStep() == sourceOp.getMixedStep() &&
- targetOp.getMapping() == sourceOp.getMapping();
-}
-
-/// Check if `target` scf.for can be fused into `source` scf.for.
-///
-/// This simply checks if both loops have the same bounds and steps. No attempt
-/// is made at checking that the side effects of `target` and `source` are
-/// independent of each other.
-static bool isForWithIdenticalConfiguration(Operation *target,
- Operation *source) {
- auto targetOp = dyn_cast<scf::ForOp>(target);
- auto sourceOp = dyn_cast<scf::ForOp>(source);
- if (!targetOp || !sourceOp)
- return false;
-
- return targetOp.getLowerBound() == sourceOp.getLowerBound() &&
- targetOp.getUpperBound() == sourceOp.getUpperBound() &&
- targetOp.getStep() == sourceOp.getStep();
-}
-
DiagnosedSilenceableFailure
transform::LoopFuseSiblingOp::apply(transform::TransformRewriter &rewriter,
transform::TransformResults &results,
@@ -569,25 +464,32 @@ transform::LoopFuseSiblingOp::apply(transform::TransformRewriter &rewriter,
<< "source handle (got " << llvm::range_size(sourceOps) << ")";
}
- Operation *target = *targetOps.begin();
- Operation *source = *sourceOps.begin();
+ auto target = dyn_cast<LoopLikeOpInterface>(*targetOps.begin());
+ auto source = dyn_cast<LoopLikeOpInterface>(*sourceOps.begin());
+ if (!target || !source)
+ return emitSilenceableFailure(target->getLoc())
+ << "target or source is not a loop op";
- // Check if the target and source are siblings.
- DiagnosedSilenceableFailure diag = isOpSibling(target, source);
- if (!diag.succeeded())
- return diag;
+ // Check if loops can be fused
+ Diagnostic diag(target.getLoc(), DiagnosticSeverity::Error);
+ if (!mlir::checkFusionStructuralLegality(target, source, diag))
+ return DiagnosedSilenceableFailure::silenceableFailure(std::move(diag));
Operation *fusedLoop;
- /// TODO: Support fusion for loop-like ops besides scf.for and scf.forall.
- if (isForWithIdenticalConfiguration(target, source)) {
+ // TODO: Support fusion for loop-like ops besides scf.for, scf.forall
+ // and scf.parallel.
+ if (isa<scf::ForOp>(target) && isa<scf::ForOp>(source)) {
fusedLoop = fuseIndependentSiblingForLoops(
cast<scf::ForOp>(target), cast<scf::ForOp>(source), rewriter);
- } else if (isForallWithIdenticalConfiguration(target, source)) {
+ } else if (isa<scf::ForallOp>(target) && isa<scf::ForallOp>(source)) {
fusedLoop = fuseIndependentSiblingForallLoops(
cast<scf::ForallOp>(target), cast<scf::ForallOp>(source), rewriter);
+ } else if (isa<scf::ParallelOp>(target) && isa<scf::ParallelOp>(source)) {
+ fusedLoop = fuseIndependentSiblingParallelLoops(
+ cast<scf::ParallelOp>(target), cast<scf::ParallelOp>(source), rewriter);
} else
return emitSilenceableFailure(target->getLoc())
- << "operations cannot be fused";
+ << "unsupported loop type for fusion";
assert(fusedLoop && "failed to fuse operations");
diff --git a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
index 5934d85373b03..b775f988576e3 100644
--- a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
@@ -16,6 +16,7 @@
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/SCF/Transforms/Transforms.h"
+#include "mlir/Dialect/SCF/Utils/Utils.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/OpDefinition.h"
@@ -37,24 +38,6 @@ static bool hasNestedParallelOp(ParallelOp ploop) {
return walkResult.wasInterrupted();
}
-/// Verify equal iteration spaces.
-static bool equalIterationSpaces(ParallelOp firstPloop,
- ParallelOp secondPloop) {
- if (firstPloop.getNumLoops() != secondPloop.getNumLoops())
- return false;
-
- auto matchOperands = [&](const OperandRange &lhs,
- const OperandRange &rhs) -> bool {
- // TODO: Extend this to support aliases and equal constants.
- return std::equal(lhs.begin(), lhs.end(), rhs.begin());
- };
- return matchOperands(firstPloop.getLowerBound(),
- secondPloop.getLowerBound()) &&
- matchOperands(firstPloop.getUpperBound(),
- secondPloop.getUpperBound()) &&
- matchOperands(firstPloop.getStep(), secondPloop.getStep());
-}
-
/// Checks if the parallel loops have mixed access to the same buffers. Returns
/// `true` if the first parallel loop writes to the same indices that the second
/// loop reads.
@@ -153,9 +136,10 @@ verifyDependencies(ParallelOp firstPloop, ParallelOp secondPloop,
static bool isFusionLegal(ParallelOp firstPloop, ParallelOp secondPloop,
const IRMapping &firstToSecondPloopIndices,
llvm::function_ref<bool(Value, Value)> mayAlias) {
+ Diagnostic diag(firstPloop.getLoc(), DiagnosticSeverity::Remark);
return !hasNestedParallelOp(firstPloop) &&
!hasNestedParallelOp(secondPloop) &&
- equalIterationSpaces(firstPloop, secondPloop) &&
+ checkFusionStructuralLegality(firstPloop, secondPloop, diag) &&
succeeded(verifyDependencies(firstPloop, secondPloop,
firstToSecondPloopIndices, mayAlias));
}
@@ -174,61 +158,9 @@ static void fuseIfLegal(ParallelOp firstPloop, ParallelOp &secondPloop,
mayAlias))
return;
- DominanceInfo dom;
- // We are fusing first loop into second, make sure there are no users of the
- // first loop results between loops.
- for (Operation *user : firstPloop->getUsers())
- if (!dom.properlyDominates(secondPloop, user, /*enclosingOpOk*/ false))
- return;
-
- ValueRange inits1 = firstPloop.getInitVals();
- ValueRange inits2 = secondPloop.getInitVals();
-
- SmallVector<Value> newInitVars(inits1.begin(), inits1.end());
- newInitVars.append(inits2.begin(), inits2.end());
-
- IRRewriter b(builder);
- b.setInsertionPoint(secondPloop);
- auto newSecondPloop = b.create<ParallelOp>(
- secondPloop.getLoc(), secondPloop.getLowerBound(),
- secondPloop.getUpperBound(), secondPloop.getStep(), newInitVars);
-
- Block *newBlock = newSecondPloop.getBody();
- auto term1 = cast<ReduceOp>(block1->getTerminator());
- auto term2 = cast<ReduceOp>(block2->getTerminator());
-
- b.inlineBlockBefore(block2, newBlock, newBlock->begin(),
- newBlock->getArguments());
- b.inlineBlockBefore(block1, newBlock, newBlock->begin(),
- newBlock->getArguments());
-
- ValueRange results = newSecondPloop.getResults();
- if (!results.empty()) {
- b.setInsertionPointToEnd(newBlock);
-
- ValueRange reduceArgs1 = term1.getOperands();
- ValueRange reduceArgs2 = term2.getOperands();
- SmallVector<Value> newReduceArgs(reduceArgs1.begin(), reduceArgs1.end());
- newReduceArgs.append(reduceArgs2.begin(), reduceArgs2.end());
-
- auto newReduceOp = b.create<scf::ReduceOp>(term2.getLoc(), newReduceArgs);
-
- for (auto &&[i, reg] : llvm::enumerate(llvm::concat<Region>(
- term1.getReductions(), term2.getReductions()))) {
- Block &oldRedBlock = reg.front();
- Block &newRedBlock = newReduceOp.getReductions()[i].front();
- b.inlineBlockBefore(&oldRedBlock, &newRedBlock, newRedBlock.begin(),
- newRedBlock.getArguments());
- }
-
- firstPloop.replaceAllUsesWith(results.take_front(inits1.size()));
- secondPloop.replaceAllUsesWith(results.take_back(inits2.size()));
- }
- term1->erase();
- term2->erase();
- firstPloop.erase();
- secondPloop.erase();
- secondPloop = newSecondPloop;
+ IRRewriter rewriter(builder);
+ secondPloop = mlir::fuseIndependentSiblingParallelLoops(
+ firstPloop, secondPloop, rewriter);
}
void mlir::scf::naivelyFuseParallelOps(
diff --git a/mlir/lib/Dialect/SCF/Utils/Utils.cpp b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
index c0ee9d2afe91c..abfc9a1b4d444 100644
--- a/mlir/lib/Dialect/SCF/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
@@ -17,6 +17,7 @@
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/IR/BuiltinOps.h"
+#include "mlir/IR/Dominance.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/PatternMatch.h"
@@ -1262,54 +1263,131 @@ TileLoops mlir::extractFixedOuterLoops(scf::ForOp rootForOp,
return tileLoops;
}
+//===----------------------------------------------------------------------===//
+// Fusion related helpers
+//===----------------------------------------------------------------------===//
+
+/// Check if `target` and `source` are siblings, in the context that `target`
+/// is being fused into `source`.
+///
+/// This is a simple check that just checks if both operations are in the same
+/// block and some checks to ensure that the fused IR does not violate
+/// dominance.
+static bool isOpSibling(Operation *target, Operation *source,
+ Diagnostic &diag) {
+ // Check if both operations are same.
+ if (target == source) {
+ diag << "target and source need to be different loops";
+ return false;
+ }
+
+ // Check if both operations are in the same block.
+ if (target->getBlock() != source->getBlock()) {
+ diag << "target and source are not in the same block";
+ return false;
+ }
+
+ // Check if fusion will violate dominance.
+ DominanceInfo domInfo(source);
+ if (target->isBeforeInBlock(source)) {
+ // Since `target` is before `source`, all users of results of `target`
+ // need to be dominated by `source`.
+ for (Operation *user : target->getUsers()) {
+ if (!domInfo.properlyDominates(source, user, /*enclosingOpOk=*/false)) {
+ diag << "user of results of target should "
+ "be properly dominated by source";
+ return false;
+ }
+ }
+ } else {
+ // Since `target` is after `source`, all values used by `target` need
+ // to dominate `source`.
+
+ // Check if operands of `target` are dominated by `source`.
+ for (Value operand : target->getOperands()) {
+ Operation *operandOp = operand.getDefiningOp();
+ // Operands without defining operations are block arguments. When `target`
+ // and `source` occur in the same block, these operands dominate `source`.
+ if (!operandOp)
+ continue;
+
+ // Operand's defining operation should properly dominate `source`.
+ if (!domInfo.properlyDominates(operandOp, source,
+ /*enclosingOpOk=*/false)) {
+ diag << "operands of target should be properly dominated by source";
+ return false;
+ }
+ }
+
+ // Check if values used by `target` are dominated by `source`.
+ bool failed = false;
+ OpOperand *failedValue = nullptr;
+ visitUsedValuesDefinedAbove(target->getRegions(), [&](OpOperand *operand) {
+ Operation *operandOp = operand->get().getDefiningOp();
+ if (operandOp && !domInfo.properlyDominates(operandOp, source,
+ /*enclosingOpOk=*/false)) {
+ // `operand` is not an argument of an enclosing block and the defining
+ // op of `operand` is outside `target` but does not dominate `source`.
+ failed = true;
+ failedValue = operand;
+ }
+ });
+
+ if (failed) {
+ diag << "values used inside regions of target should be properly "
+ "dominated by source";
+ diag.attachNote(failedValue->getOwner()->getLoc()) << "see operation";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool mlir::checkFusionStructuralLegality(LoopLikeOpInterface target,
+ LoopLikeOpInterface source,
+ Diagnostic &diag) {
+ if (target->getName() != source->getName()) {
+ diag << "target and source must be same loop type";
+ return false;
+ }
+
+ bool iterSpaceEq =
+ target.getLoopLowerBounds() == source.getLoopLowerBounds() &&
+ target.getLoopUpperBounds() == source.getLoopUpperBounds() &&
+ target.getLoopSteps() == source.getLoopSteps();
+ // TODO: Decouple checks on concrete loop types and move this function
+ // somewhere for general utility for `LoopLikeOpInterface`
+ if (auto forAllTarget = dyn_cast<scf::ForallOp>(*target))
+ iterSpaceEq = iterSpaceEq && forAllTarget.getMapping() ==
+ cast<scf::ForallOp>(*source).getMapping();
+ if (!iterSpaceEq) {
+ diag << "target and source iteration spaces must be equal";
+ return false;
+ }
+ return isOpSibling(target, source, diag);
+}
+
scf::ForallOp mlir::fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForallOp source,
RewriterBase &rewriter) {
- unsigned numTargetOuts = target.getNumResults();
- unsigned numSourceOuts = source.getNumResults();
-
- // Create fused shared_outs.
- SmallVector<Value> fusedOuts;
- llvm::append_range(fusedOuts, target.getOutputs());
- llvm::append_range(fusedOuts, source.getOutputs());
-
- // Create a new scf.forall op after the source loop.
- rewriter.setInsertionPointAfter(source);
- scf::ForallOp fusedLoop = rewriter.create<scf::ForallOp>(
- source.getLoc(), source.getMixedLowerBound(), source.getMixedUpperBound(),
- source.getMixedStep(), fusedOuts, source.getMapping());
-
- // Map control operands.
- IRMapping mapping;
- mapping.map(target.getInductionVars(), fusedLoop.getInductionVars());
- mapping.map(source.getInductionVars(), fusedLoop.getInductionVars());
-
- // Map shared outs.
- mapping.map(target.getRegionIterArgs(),
- fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
- mapping.map(source.getRegionIterArgs(),
- fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
-
- // Append everything except the terminator into the fused operation.
- rewriter.setInsertionPointToStart(fusedLoop.getBody());
- for (Operation &op : target.getBody()->without_terminator())
- rewriter.clone(op, mapping);
- for (Operation &op : source.getBody()->without_terminator())
- rewriter.clone(op, mapping);
-
- // Fuse the old terminator in_parallel ops into the new one.
- scf::InParallelOp targetTerm = target.getTerminator();
- scf::InParallelOp sourceTerm = source.getTerminator();
- scf::InParallelOp fusedTerm = fusedLoop.getTerminator();
- rewriter.setInsertionPointToStart(fusedTerm.getBody());
- for (Operation &op : targetTerm.getYieldingOps())
- rewriter.clone(op, mapping);
- for (Operation &op : sourceTerm.getYieldingOps())
- rewriter.clone(op, mapping);
-
- // Replace old loops by substituting their uses by results of the fused loop.
- rewriter.replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
- rewriter.replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
+ scf::ForallOp fusedLoop = cast<scf::ForallOp>(createFused(
+ target, source, rewriter,
+ [&](OpBuilder &b, Location loc, ArrayRef<BlockArgument> newBBArgs) {
+ // `ForallOp` does not have yields, rather an `InParallelOp` terminator.
+ return ValueRange{};
+ },
+ [&](RewriterBase &b, LoopLikeOpInterface source,
+ LoopLikeOpInterface &target, IRMapping mapping) {
+ auto sourceForall = cast<scf::ForallOp>(source);
+ auto targetForall = cast<scf::ForallOp>(target);
+ scf::InParallelOp fusedTerm = targetForall.getTerminator();
+ b.setInsertionPointToEnd(fusedTerm.getBody());
+ for (Operation &op : sourceForall.getTerminator().getYieldingOps())
+ b.clone(op, mapping);
+ }));
+ rewriter.replaceOp(source,
+ fusedLoop.getResults().take_back(source.getNumResults()));
return fusedLoop;
}
@@ -1317,49 +1395,74 @@ scf::ForallOp mlir::fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForOp mlir::fuseIndependentSiblingForLoops(scf::ForOp target,
scf::ForOp source,
RewriterBase &rewriter) {
- unsigned numTargetOuts = target.getNumResults();
- unsigned numSourceOuts = source.getNumResults();
-
- // Create fused init_args, with target's init_args before source's init_args.
- SmallVector<Value> fusedInitArgs;
- llvm::append_range(fusedInitArgs, target.getInitArgs());
- llvm::append_range(fusedInitArgs, source.getInitArgs());
-
- // Create a new scf.for op after the source loop (with scf.yield terminator
- // (without arguments) only in case its init_args is empty).
- rewriter.setInsertionPointAfter(source);
- scf::ForOp fusedLoop = rewriter.create<scf::ForOp>(
- source.getLoc(), source.getLowerBound(), source.getUpperBound(),
- source.getStep(), fusedInitArgs);
-
- // Map original induction variables and operands to those of the fused loop.
- IRMapping mapping;
- mapping.map(target.getInductionVar(), fusedLoop.getInductionVar());
- mapping.map(target.getRegionIterArgs(),
- fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
- mapping.map(source.getInductionVar(), fusedLoop.getInductionVar());
- mapping.map(source.getRegionIterArgs(),
- fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
-
- // Merge target's body into the new (fused) for loop and then source's body.
- rewriter.setInsertionPointToStart(fusedLoop.getBody());
- for (Operation &op : target.getBody()->without_terminator())
- rewriter.clone(op, mapping);
- for (Operation &op : source.getBody()->without_terminator())
- rewriter.clone(op, mapping);
-
- // Build fused yield results by appropriately mapping original yield operands.
- SmallVector<Value> yieldResults;
- for (Value operand : target.getBody()->getTerminator()->getOperands())
- yieldResults.push_back(mapping.lookupOrDefault(operand));
- for (Value operand : source.getBody()->getTerminator()->getOperands())
- yieldResults.push_back(mapping.lookupOrDefault(operand));
- if (!yieldResults.empty())
- rewriter.create<scf::YieldOp>(source.getLoc(), yieldResults);
-
- // Replace old loops by substituting their uses by results of the fused loop.
- rewriter.replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
- rewriter.replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
+ scf::ForOp fusedLoop = cast<scf::ForOp>(createFused(
+ target, source, rewriter,
+ [&](OpBuilder &b, Location loc, ArrayRef<BlockArgument> newBBArgs) {
+ return source.getYieldedValues();
+ },
+ [&](RewriterBase &b, LoopLikeOpInterface source,
+ LoopLikeOpInterface &target, IRMapping mapping) {
+ auto targetFor = cast<scf::ForOp>(target);
+ auto newTerm = b.clone(*targetFor.getBody()->getTerminator(), mapping);
+ b.replaceOp(targetFor.getBody()->getTerminator(), newTerm);
+ }));
+ rewriter.replaceOp(source,
+ fusedLoop.getResults().take_back(source.getNumResults()));
+ return fusedLoop;
+}
+
+// TODO: Finish refactoring this a la the above, but likely requires additional
+// interface methods.
+scf::ParallelOp mlir::fuseIndependentSiblingParallelLoops(
+ scf::ParallelOp target, scf::ParallelOp source, RewriterBase &rewriter) {
+ OpBuilder::InsertionGuard guard(rewriter);
+ Block *block1 = target.getBody();
+ Block *block2 = source.getBody();
+ auto term1 = cast<scf::ReduceOp>(block1->getTerminator());
+ auto term2 = cast<scf::ReduceOp>(block2->getTerminator());
+
+ ValueRange inits1 = target.getInitVals();
+ ValueRange inits2 = source.getInitVals();
+
+ SmallVector<Value> newInitVars(inits1.begin(), inits1.end());
+ newInitVars.append(inits2.begin(), inits2.end());
+
+ rewriter.setInsertionPoint(source);
+ auto fusedLoop = rewriter.create<scf::ParallelOp>(
+ rewriter.getFusedLoc(target.getLoc(), source.getLoc()),
+ source.getLowerBound(), source.getUpperBound(), source.getStep(),
+ newInitVars);
+ Block *newBlock = fusedLoop.getBody();
+ rewriter.inlineBlockBefore(block2, newBlock, newBlock->begin(),
+ newBlock->getArguments());
+ rewriter.inlineBlockBefore(block1, newBlock, newBlock->begin(),
+ newBlock->getArguments());
+
+ ValueRange results = fusedLoop.getResults();
+ if (!results.empty()) {
+ rewriter.setInsertionPointToEnd(newBlock);
+
+ ValueRange reduceArgs1 = term1.getOperands();
+ ValueRange reduceArgs2 = term2.getOperands();
+ SmallVector<Value> newReduceArgs(reduceArgs1.begin(), reduceArgs1.end());
+ newReduceArgs.append(reduceArgs2.begin(), reduceArgs2.end());
+
+ auto newReduceOp = rewriter.create<scf::ReduceOp>(
+ rewriter.getFusedLoc(term1.getLoc(), term2.getLoc()), newReduceArgs);
+
+ for (auto &&[i, reg] : llvm::enumerate(llvm::concat<Region>(
+ term1.getReductions(), term2.getReductions()))) {
+ Block &oldRedBlock = reg.front();
+ Block &newRedBlock = newReduceOp.getReductions()[i].front();
+ rewriter.inlineBlockBefore(&oldRedBlock, &newRedBlock,
+ newRedBlock.begin(),
+ newRedBlock.getArguments());
+ }
+ }
+ rewriter.replaceOp(target, results.take_front(inits1.size()));
+ rewriter.replaceOp(source, results.take_back(inits2.size()));
+ rewriter.eraseOp(term1);
+ rewriter.eraseOp(term2);
return fusedLoop;
}
diff --git a/mlir/lib/Interfaces/LoopLikeInterface.cpp b/mlir/lib/Interfaces/LoopLikeInterface.cpp
index 1e0e87b64e811..6f0ebec0519be 100644
--- a/mlir/lib/Interfaces/LoopLikeInterface.cpp
+++ b/mlir/lib/Interfaces/LoopLikeInterface.cpp
@@ -8,6 +8,8 @@
#include "mlir/Interfaces/LoopLikeInterface.h"
+#include "mlir/IR/IRMapping.h"
+#include "mlir/IR/PatternMatch.h"
#include "mlir/Interfaces/FunctionInterfaces.h"
#include "llvm/ADT/DenseSet.h"
@@ -113,3 +115,56 @@ LogicalResult detail::verifyLoopLikeOpInterface(Operation *op) {
return success();
}
+
+LoopLikeOpInterface mlir::createFused(LoopLikeOpInterface target,
+ LoopLikeOpInterface source,
+ RewriterBase &rewriter,
+ NewYieldValuesFn newYieldValuesFn,
+ FuseTerminatorFn fuseTerminatorFn) {
+ auto targetIterArgs = target.getRegionIterArgs();
+ std::optional<SmallVector<Value>> targetInductionVar =
+ target.getLoopInductionVars();
+ SmallVector<Value> targetYieldOperands(target.getYieldedValues());
+ auto sourceIterArgs = source.getRegionIterArgs();
+ std::optional<SmallVector<Value>> sourceInductionVar =
+ *source.getLoopInductionVars();
+ SmallVector<Value> sourceYieldOperands(source.getYieldedValues());
+ auto sourceRegion = source.getLoopRegions().front();
+
+ FailureOr<LoopLikeOpInterface> maybeFusedLoop =
+ target.replaceWithAdditionalYields(rewriter, source.getInits(),
+ /*replaceInitOperandUsesInLoop=*/false,
+ newYieldValuesFn);
+ if (failed(maybeFusedLoop))
+ llvm_unreachable("failed to replace loop");
+ LoopLikeOpInterface fusedLoop = *maybeFusedLoop;
+
+ // Map control operands.
+ IRMapping mapping;
+ std::optional<SmallVector<Value>> fusedInductionVar =
+ fusedLoop.getLoopInductionVars();
+ if (fusedInductionVar) {
+ if (!targetInductionVar || !sourceInductionVar)
+ llvm_unreachable("expected target and source loops to have induction vars");
+ mapping.map(*targetInductionVar, *fusedInductionVar);
+ mapping.map(*sourceInductionVar, *fusedInductionVar);
+ }
+ mapping.map(targetIterArgs,
+ fusedLoop.getRegionIterArgs().take_front(targetIterArgs.size()));
+ mapping.map(targetYieldOperands,
+ fusedLoop.getYieldedValues().take_front(targetIterArgs.size()));
+ mapping.map(sourceIterArgs,
+ fusedLoop.getRegionIterArgs().take_back(sourceIterArgs.size()));
+ mapping.map(sourceYieldOperands,
+ fusedLoop.getYieldedValues().take_back(sourceIterArgs.size()));
+ // Append everything except the terminator into the fused operation.
+ rewriter.setInsertionPoint(
+ fusedLoop.getLoopRegions().front()->front().getTerminator());
+ for (Operation &op : sourceRegion->front().without_terminator())
+ rewriter.clone(op, mapping);
+
+ // TODO: Replace with corresponding interface method if added
+ fuseTerminatorFn(rewriter, source, fusedLoop, mapping);
+
+ return fusedLoop;
+}
diff --git a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
index 54dd2bdf953ca..91ed2a5269d74 100644
--- a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
+++ b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
@@ -47,6 +47,169 @@ module attributes {transform.with_named_sequence} {
// -----
+// CHECK-LABEL: func @fuse_two_parallel
+// CHECK-SAME: ([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}) {
+func.func @fuse_two_parallel(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
+// CHECK-DAG: [[C2:%.*]] = arith.constant 2 : index
+// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
+// CHECK-DAG: [[C1:%.*]] = arith.constant 1 : index
+// CHECK-DAG: [[C1FP:%.*]] = arith.constant 1.
+ %c2 = arith.constant 2 : index
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1fp = arith.constant 1.0 : f32
+// CHECK: [[SUM:%.*]] = memref.alloc()
+ %sum = memref.alloc() : memref<2x2xf32>
+// CHECK: scf.parallel ([[I:%.*]], [[J:%.*]]) = ([[C0]], [[C0]])
+// CHECK-SAME: to ([[C2]], [[C2]]) step ([[C1]], [[C1]]) {
+// CHECK: [[B_ELEM:%.*]] = memref.load [[B]]{{\[}}[[I]], [[J]]]
+// CHECK: [[SUM_ELEM:%.*]] = arith.addf [[B_ELEM]], [[C1FP]]
+// CHECK: memref.store [[SUM_ELEM]], [[SUM]]{{\[}}[[I]], [[J]]]
+// CHECK-NOT: scf.parallel
+// CHECK: [[SUM_ELEM_:%.*]] = memref.load [[SUM]]{{\[}}[[I]], [[J]]]
+// CHECK: [[A_ELEM:%.*]] = memref.load [[A]]{{\[}}[[I]], [[J]]]
+// CHECK: [[PRODUCT_ELEM:%.*]] = arith.mulf [[SUM_ELEM_]], [[A_ELEM]]
+// CHECK: memref.store [[PRODUCT_ELEM]], [[B]]{{\[}}[[I]], [[J]]]
+// CHECK: scf.reduce
+// CHECK: }
+ scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
+ %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
+ %sum_elem = arith.addf %B_elem, %c1fp : f32
+ memref.store %sum_elem, %sum[%i, %j] : memref<2x2xf32>
+ scf.reduce
+ }
+ scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
+ %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
+ %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
+ %product_elem = arith.mulf %sum_elem, %A_elem : f32
+ memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
+ scf.reduce
+ }
+// CHECK: memref.dealloc [[SUM]]
+ memref.dealloc %sum : memref<2x2xf32>
+ return
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @fuse_two_parallel_reverse
+// CHECK-SAME: ([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}) {
+func.func @fuse_two_parallel_reverse(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
+// CHECK-DAG: [[C2:%.*]] = arith.constant 2 : index
+// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
+// CHECK-DAG: [[C1:%.*]] = arith.constant 1 : index
+// CHECK-DAG: [[C1FP:%.*]] = arith.constant 1.
+ %c2 = arith.constant 2 : index
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1fp = arith.constant 1.0 : f32
+// CHECK: [[SUM:%.*]] = memref.alloc()
+ %sum = memref.alloc() : memref<2x2xf32>
+// CHECK: scf.parallel ([[I:%.*]], [[J:%.*]]) = ([[C0]], [[C0]])
+// CHECK-SAME: to ([[C2]], [[C2]]) step ([[C1]], [[C1]]) {
+// CHECK: [[SUM_ELEM_:%.*]] = memref.load [[SUM]]{{\[}}[[I]], [[J]]]
+// CHECK: [[A_ELEM:%.*]] = memref.load [[A]]{{\[}}[[I]], [[J]]]
+// CHECK: [[PRODUCT_ELEM:%.*]] = arith.mulf [[SUM_ELEM_]], [[A_ELEM]]
+// CHECK: memref.store [[PRODUCT_ELEM]], [[B]]{{\[}}[[I]], [[J]]]
+// CHECK-NOT: scf.parallel
+// CHECK: [[B_ELEM:%.*]] = memref.load [[B]]{{\[}}[[I]], [[J]]]
+// CHECK: [[SUM_ELEM:%.*]] = arith.addf [[B_ELEM]], [[C1FP]]
+// CHECK: memref.store [[SUM_ELEM]], [[SUM]]{{\[}}[[I]], [[J]]]
+// CHECK: scf.reduce
+// CHECK: }
+ scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
+ %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
+ %sum_elem = arith.addf %B_elem, %c1fp : f32
+ memref.store %sum_elem, %sum[%i, %j] : memref<2x2xf32>
+ scf.reduce
+ }
+ scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
+ %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
+ %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
+ %product_elem = arith.mulf %sum_elem, %A_elem : f32
+ memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
+ scf.reduce
+ }
+// CHECK: memref.dealloc [[SUM]]
+ memref.dealloc %sum : memref<2x2xf32>
+ return
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %parallel#1 into %parallel#0 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @fuse_reductions_two
+// CHECK-SAME: (%[[A:.*]]: memref<2x2xf32>, %[[B:.*]]: memref<2x2xf32>) -> (f32, f32)
+func.func @fuse_reductions_two(%A: memref<2x2xf32>, %B: memref<2x2xf32>) -> (f32, f32) {
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
+// CHECK-DAG: %[[INIT1:.*]] = arith.constant 1.000000e+00 : f32
+// CHECK-DAG: %[[INIT2:.*]] = arith.constant 2.000000e+00 : f32
+// CHECK: %[[RES:.*]]:2 = scf.parallel (%[[I:.*]], %[[J:.*]]) = (%[[C0]], %[[C0]])
+// CHECK-SAME: to (%[[C2]], %[[C2]]) step (%[[C1]], %[[C1]])
+// CHECK-SAME: init (%[[INIT1]], %[[INIT2]]) -> (f32, f32)
+// CHECK: %[[VAL_A:.*]] = memref.load %[[A]][%[[I]], %[[J]]]
+// CHECK: %[[VAL_B:.*]] = memref.load %[[B]][%[[I]], %[[J]]]
+// CHECK: scf.reduce(%[[VAL_A]], %[[VAL_B]] : f32, f32) {
+// CHECK: ^bb0(%[[LHS:.*]]: f32, %[[RHS:.*]]: f32):
+// CHECK: %[[R:.*]] = arith.addf %[[LHS]], %[[RHS]] : f32
+// CHECK: scf.reduce.return %[[R]] : f32
+// CHECK: }
+// CHECK: ^bb0(%[[LHS:.*]]: f32, %[[RHS:.*]]: f32):
+// CHECK: %[[R:.*]] = arith.mulf %[[LHS]], %[[RHS]] : f32
+// CHECK: scf.reduce.return %[[R]] : f32
+// CHECK: }
+// CHECK: return %[[RES]]#0, %[[RES]]#1 : f32, f32
+ %c2 = arith.constant 2 : index
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %init1 = arith.constant 1.0 : f32
+ %init2 = arith.constant 2.0 : f32
+ %res1 = scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) init(%init1) -> f32 {
+ %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
+ scf.reduce(%A_elem : f32) {
+ ^bb0(%lhs: f32, %rhs: f32):
+ %1 = arith.addf %lhs, %rhs : f32
+ scf.reduce.return %1 : f32
+ }
+ }
+ %res2 = scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) init(%init2) -> f32 {
+ %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
+ scf.reduce(%B_elem : f32) {
+ ^bb0(%lhs: f32, %rhs: f32):
+ %1 = arith.mulf %lhs, %rhs : f32
+ scf.reduce.return %1 : f32
+ }
+ }
+ return %res1, %res2 : f32, f32
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
// CHECK: func.func @fuse_2nd_for_into_1st([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
func.func @fuse_2nd_for_into_1st(%A: tensor<128xf32>, %B: tensor<128xf32>) -> (tensor<128xf32>, tensor<128xf32>) {
// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
@@ -282,8 +445,9 @@ func.func @target_for_region_uses_result_of_source_for_err(%A: tensor<128xf32>,
%6 = vector.transfer_write %5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
scf.yield %6 : tensor<128xf32>
}
- %dup1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %B) -> (tensor<128xf32>) {
// expected-error @below {{values used inside regions of target should be properly dominated by source}}
+ %dup1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %B) -> (tensor<128xf32>) {
+ // expected-note @below {{see operation}}
%dup2 = vector.transfer_read %1[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
%dup3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
%dup5 = arith.addf %dup3, %dup2 : vector<16xf32>
@@ -328,6 +492,74 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
+
+// -----
+
+func.func @non_matching_iteration_spaces_err(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
+ %c2 = arith.constant 2 : index
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1fp = arith.constant 1.0 : f32
+ %sum = memref.alloc() : memref<2x2xf32>
+ // expected-error @below {{target and source iteration spaces must be equal}}
+ scf.parallel (%i) = (%c0) to (%c2) step (%c1) {
+ %B_elem = memref.load %B[%i, %c0] : memref<2x2xf32>
+ %sum_elem = arith.addf %B_elem, %c1fp : f32
+ memref.store %sum_elem, %sum[%i, %c0] : memref<2x2xf32>
+ scf.reduce
+ }
+ scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
+ %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
+ %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
+ %product_elem = arith.mulf %sum_elem, %A_elem : f32
+ memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
+ scf.reduce
+ }
+ memref.dealloc %sum : memref<2x2xf32>
+ return
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @non_matching_loop_types_err(%A: memref<2xf32>, %B: memref<2xf32>) {
+ %c2 = arith.constant 2 : index
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1fp = arith.constant 1.0 : f32
+ %sum = memref.alloc() : memref<2xf32>
+ // expected-error @below {{target and source must be same loop type}}
+ scf.for %i = %c0 to %c2 step %c1 {
+ %B_elem = memref.load %B[%i] : memref<2xf32>
+ %sum_elem = arith.addf %B_elem, %c1fp : f32
+ memref.store %sum_elem, %sum[%i] : memref<2xf32>
+ }
+ scf.parallel (%i) = (%c0) to (%c2) step (%c1) {
+ %sum_elem = memref.load %sum[%i] : memref<2xf32>
+ %A_elem = memref.load %A[%i] : memref<2xf32>
+ %product_elem = arith.mulf %sum_elem, %A_elem : f32
+ memref.store %product_elem, %B[%i] : memref<2xf32>
+ scf.reduce
+ }
+ memref.dealloc %sum : memref<2xf32>
+ return
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.for"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %fused = transform.loop.fuse_sibling %0 into %1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
// -----
// CHECK: func.func @foreach_loop_pair_fuse([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
>From 13be6ee7da048acff9953db92486f5c2147af3ed Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Tue, 2 Jul 2024 14:50:09 +0100
Subject: [PATCH 007/246] Fix MSVC discarded return value warnings. NFC.
"C4858 This function constructs an object wrapped by a smart pointer and has no other effects; it is not useful to call this function and discard the return value."
---
llvm/unittests/Analysis/MLModelRunnerTest.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/unittests/Analysis/MLModelRunnerTest.cpp b/llvm/unittests/Analysis/MLModelRunnerTest.cpp
index 87b37f81163a4..d3c9dcc56b4c3 100644
--- a/llvm/unittests/Analysis/MLModelRunnerTest.cpp
+++ b/llvm/unittests/Analysis/MLModelRunnerTest.cpp
@@ -198,7 +198,7 @@ TEST(ReleaseModelRunner, ModelSelectorNoInputFeaturePresent) {
LLVMContext Ctx;
std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}),
TensorSpec::createSpec<int64_t>("b", {1})};
- EXPECT_DEATH(std::make_unique<ReleaseModeModelRunner<AdditionAOTModel>>(
+ EXPECT_DEATH((void)std::make_unique<ReleaseModeModelRunner<AdditionAOTModel>>(
Ctx, Inputs, "", makeOptions().setModelSelector(M2Selector)),
"A model selector was specified but the underlying model does "
"not expose a model_selector input");
@@ -209,7 +209,7 @@ TEST(ReleaseModelRunner, ModelSelectorNoSelectorGiven) {
std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}),
TensorSpec::createSpec<int64_t>("b", {1})};
EXPECT_DEATH(
- std::make_unique<ReleaseModeModelRunner<ComposedAOTModel>>(
+ (void)std::make_unique<ReleaseModeModelRunner<ComposedAOTModel>>(
Ctx, Inputs, "", makeOptions()),
"A model selector was not specified but the underlying model requires "
"selecting one because it exposes a model_selector input");
>From 1f7d31e3420f71f3cbf5f455d78735a0f2bd4442 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Tue, 2 Jul 2024 16:04:19 +0100
Subject: [PATCH 008/246] [AMDGPU] Regenerate srem.ll tests - more closely
match the testing in sdiv.ll
---
llvm/test/CodeGen/AMDGPU/srem.ll | 8994 +++++++++++++++++++++++++++++-
1 file changed, 8981 insertions(+), 13 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index 5cf563dc61d7a..bcc67e974ae4a 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -1,12 +1,109 @@
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=SI,GFX9 -check-prefix=FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=r600 -mcpu=redwood -verify-machineinstrs < %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=GCN
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck %s --check-prefixes=TAHITI
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s --check-prefixes=TONGA
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck %s --check-prefixes=EG
-; FUNC-LABEL: {{^}}srem_i16_7:
-; GFX9: s_movk_i32 {{s[0-9]+}}, 0x4925
-; GFX9: v_mul_lo_u32
define amdgpu_kernel void @srem_i16_7(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_i16_7:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_ushort v1, v0, s[2:3]
+; GCN-NEXT: s_movk_i32 s2, 0x4925
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_bfe_i32 v2, v1, 0, 16
+; GCN-NEXT: v_mul_lo_u32 v2, v2, s2
+; GCN-NEXT: v_lshrrev_b32_e32 v3, 31, v2
+; GCN-NEXT: v_ashrrev_i32_e32 v2, 17, v2
+; GCN-NEXT: v_add_u16_e32 v2, v2, v3
+; GCN-NEXT: v_mul_lo_u16_e32 v2, 7, v2
+; GCN-NEXT: v_sub_u16_e32 v1, v1, v2
+; GCN-NEXT: global_store_short v0, v1, s[0:1]
+; GCN-NEXT: s_endpgm
+;
+; TAHITI-LABEL: srem_i16_7:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s7, 0xf000
+; TAHITI-NEXT: s_mov_b32 s6, -1
+; TAHITI-NEXT: s_mov_b32 s10, s6
+; TAHITI-NEXT: s_mov_b32 s11, s7
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s8, s2
+; TAHITI-NEXT: s_mov_b32 s9, s3
+; TAHITI-NEXT: buffer_load_sshort v0, off, s[8:11], 0
+; TAHITI-NEXT: s_mov_b32 s4, s0
+; TAHITI-NEXT: s_mov_b32 s5, s1
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_readfirstlane_b32 s0, v0
+; TAHITI-NEXT: s_mulk_i32 s0, 0x4925
+; TAHITI-NEXT: s_lshr_b32 s1, s0, 31
+; TAHITI-NEXT: s_ashr_i32 s0, s0, 17
+; TAHITI-NEXT: s_add_i32 s0, s0, s1
+; TAHITI-NEXT: s_mul_i32 s0, s0, 7
+; TAHITI-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0
+; TAHITI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; TAHITI-NEXT: s_endpgm
+;
+; TONGA-LABEL: srem_i16_7:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: flat_load_ushort v2, v[0:1]
+; TONGA-NEXT: s_movk_i32 s2, 0x4925
+; TONGA-NEXT: v_mov_b32_e32 v1, s1
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_bfe_i32 v0, v2, 0, 16
+; TONGA-NEXT: v_mul_lo_u32 v3, v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v0, s0
+; TONGA-NEXT: v_lshrrev_b32_e32 v4, 31, v3
+; TONGA-NEXT: v_ashrrev_i32_e32 v3, 17, v3
+; TONGA-NEXT: v_add_u16_e32 v3, v3, v4
+; TONGA-NEXT: v_mul_lo_u16_e32 v3, 7, v3
+; TONGA-NEXT: v_sub_u16_e32 v2, v2, v3
+; TONGA-NEXT: flat_store_short v[0:1], v2
+; TONGA-NEXT: s_endpgm
+;
+; EG-LABEL: srem_i16_7:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 22, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_16 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: BFE_INT * T0.W, T0.X, 0.0, literal.x,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, literal.x,
+; EG-NEXT: 18725(2.623931e-41), 0(0.000000e+00)
+; EG-NEXT: ASHR T0.W, PS, literal.x,
+; EG-NEXT: LSHR * T1.W, PS, literal.y,
+; EG-NEXT: 17(2.382207e-44), 31(4.344025e-44)
+; EG-NEXT: ADD_INT * T0.W, PV.W, PS,
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, literal.x,
+; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: SUB_INT * T1.W, T0.X, PS,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 65535(9.183409e-41), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%num = load i16, ptr addrspace(1) %in
%result = srem i16 %num, 7
store i16 %result, ptr addrspace(1) %out
@@ -14,6 +111,161 @@ define amdgpu_kernel void @srem_i16_7(ptr addrspace(1) %out, ptr addrspace(1) %i
}
define amdgpu_kernel void @srem_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v2, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx2 v[0:1], v2, s[2:3]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_readfirstlane_b32 s2, v1
+; GCN-NEXT: s_abs_i32 s2, s2
+; GCN-NEXT: v_cvt_f32_u32_e32 v1, s2
+; GCN-NEXT: v_readfirstlane_b32 s3, v0
+; GCN-NEXT: s_sub_i32 s5, 0, s2
+; GCN-NEXT: s_ashr_i32 s4, s3, 31
+; GCN-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; GCN-NEXT: s_abs_i32 s3, s3
+; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v1
+; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT: v_readfirstlane_b32 s6, v0
+; GCN-NEXT: s_mul_i32 s5, s5, s6
+; GCN-NEXT: s_mul_hi_u32 s5, s6, s5
+; GCN-NEXT: s_add_i32 s6, s6, s5
+; GCN-NEXT: s_mul_hi_u32 s5, s3, s6
+; GCN-NEXT: s_mul_i32 s5, s5, s2
+; GCN-NEXT: s_sub_i32 s3, s3, s5
+; GCN-NEXT: s_sub_i32 s5, s3, s2
+; GCN-NEXT: s_cmp_ge_u32 s3, s2
+; GCN-NEXT: s_cselect_b32 s3, s5, s3
+; GCN-NEXT: s_sub_i32 s5, s3, s2
+; GCN-NEXT: s_cmp_ge_u32 s3, s2
+; GCN-NEXT: s_cselect_b32 s2, s5, s3
+; GCN-NEXT: s_xor_b32 s2, s2, s4
+; GCN-NEXT: s_sub_i32 s2, s2, s4
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: global_store_dword v2, v0, s[0:1]
+; GCN-NEXT: s_endpgm
+;
+; TAHITI-LABEL: srem_i32:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s7, 0xf000
+; TAHITI-NEXT: s_mov_b32 s6, -1
+; TAHITI-NEXT: s_mov_b32 s10, s6
+; TAHITI-NEXT: s_mov_b32 s11, s7
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s8, s2
+; TAHITI-NEXT: s_mov_b32 s9, s3
+; TAHITI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; TAHITI-NEXT: s_mov_b32 s5, s1
+; TAHITI-NEXT: s_mov_b32 s4, s0
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_readfirstlane_b32 s2, v1
+; TAHITI-NEXT: s_abs_i32 s2, s2
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; TAHITI-NEXT: s_sub_i32 s3, 0, s2
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; TAHITI-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v1, v1
+; TAHITI-NEXT: v_mul_lo_u32 v2, s3, v1
+; TAHITI-NEXT: v_readfirstlane_b32 s3, v0
+; TAHITI-NEXT: s_abs_i32 s8, s3
+; TAHITI-NEXT: s_ashr_i32 s0, s3, 31
+; TAHITI-NEXT: v_mul_hi_u32 v2, v1, v2
+; TAHITI-NEXT: v_add_i32_e32 v0, vcc, v1, v2
+; TAHITI-NEXT: v_mul_hi_u32 v0, s8, v0
+; TAHITI-NEXT: v_readfirstlane_b32 s1, v0
+; TAHITI-NEXT: s_mul_i32 s1, s1, s2
+; TAHITI-NEXT: s_sub_i32 s1, s8, s1
+; TAHITI-NEXT: s_sub_i32 s3, s1, s2
+; TAHITI-NEXT: s_cmp_ge_u32 s1, s2
+; TAHITI-NEXT: s_cselect_b32 s1, s3, s1
+; TAHITI-NEXT: s_sub_i32 s3, s1, s2
+; TAHITI-NEXT: s_cmp_ge_u32 s1, s2
+; TAHITI-NEXT: s_cselect_b32 s1, s3, s1
+; TAHITI-NEXT: s_xor_b32 s1, s1, s0
+; TAHITI-NEXT: s_sub_i32 s0, s1, s0
+; TAHITI-NEXT: v_mov_b32_e32 v0, s0
+; TAHITI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; TAHITI-NEXT: s_endpgm
+;
+; TONGA-LABEL: srem_i32:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_readfirstlane_b32 s2, v1
+; TONGA-NEXT: s_abs_i32 s2, s2
+; TONGA-NEXT: v_cvt_f32_u32_e32 v1, s2
+; TONGA-NEXT: s_sub_i32 s3, 0, s2
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; TONGA-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; TONGA-NEXT: v_cvt_u32_f32_e32 v1, v1
+; TONGA-NEXT: v_mul_lo_u32 v2, s3, v1
+; TONGA-NEXT: v_readfirstlane_b32 s3, v0
+; TONGA-NEXT: s_abs_i32 s4, s3
+; TONGA-NEXT: v_mul_hi_u32 v2, v1, v2
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v1, v2
+; TONGA-NEXT: v_mul_hi_u32 v2, s4, v0
+; TONGA-NEXT: v_mov_b32_e32 v1, s1
+; TONGA-NEXT: v_mov_b32_e32 v0, s0
+; TONGA-NEXT: s_ashr_i32 s0, s3, 31
+; TONGA-NEXT: v_readfirstlane_b32 s1, v2
+; TONGA-NEXT: s_mul_i32 s1, s1, s2
+; TONGA-NEXT: s_sub_i32 s1, s4, s1
+; TONGA-NEXT: s_sub_i32 s3, s1, s2
+; TONGA-NEXT: s_cmp_ge_u32 s1, s2
+; TONGA-NEXT: s_cselect_b32 s1, s3, s1
+; TONGA-NEXT: s_sub_i32 s3, s1, s2
+; TONGA-NEXT: s_cmp_ge_u32 s1, s2
+; TONGA-NEXT: s_cselect_b32 s1, s3, s1
+; TONGA-NEXT: s_xor_b32 s1, s1, s0
+; TONGA-NEXT: s_sub_i32 s0, s1, s0
+; TONGA-NEXT: v_mov_b32_e32 v2, s0
+; TONGA-NEXT: flat_store_dword v[0:1], v2
+; TONGA-NEXT: s_endpgm
+;
+; EG-LABEL: srem_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 23, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: SETGT_INT * T0.W, 0.0, T0.Y,
+; EG-NEXT: ADD_INT * T1.W, T0.Y, PV.W,
+; EG-NEXT: XOR_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: SETGT_INT T2.W, 0.0, T0.X,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: ADD_INT T1.W, T0.X, PV.W,
+; EG-NEXT: MULHI * T0.X, T0.Y, PS,
+; EG-NEXT: ADD_INT T3.W, T0.Y, PS,
+; EG-NEXT: XOR_INT * T1.W, PV.W, T2.W,
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T3.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T4.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T3.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T0.W, PV.W, T1.W, PS,
+; EG-NEXT: XOR_INT * T0.W, PV.W, T2.W,
+; EG-NEXT: SUB_INT T0.X, PV.W, T2.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in
%den = load i32, ptr addrspace(1) %den_ptr
@@ -23,6 +275,84 @@ define amdgpu_kernel void @srem_i32(ptr addrspace(1) %out, ptr addrspace(1) %in)
}
define amdgpu_kernel void @srem_i32_4(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_i32_4:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dword v1, v0, s[2:3]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GCN-NEXT: v_lshrrev_b32_e32 v2, 30, v2
+; GCN-NEXT: v_add_u32_e32 v2, v1, v2
+; GCN-NEXT: v_and_b32_e32 v2, -4, v2
+; GCN-NEXT: v_sub_u32_e32 v1, v1, v2
+; GCN-NEXT: global_store_dword v0, v1, s[0:1]
+; GCN-NEXT: s_endpgm
+;
+; TAHITI-LABEL: srem_i32_4:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s7, 0xf000
+; TAHITI-NEXT: s_mov_b32 s6, -1
+; TAHITI-NEXT: s_mov_b32 s10, s6
+; TAHITI-NEXT: s_mov_b32 s11, s7
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s8, s2
+; TAHITI-NEXT: s_mov_b32 s9, s3
+; TAHITI-NEXT: buffer_load_dword v0, off, s[8:11], 0
+; TAHITI-NEXT: s_mov_b32 s4, s0
+; TAHITI-NEXT: s_mov_b32 s5, s1
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; TAHITI-NEXT: v_lshrrev_b32_e32 v1, 30, v1
+; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v1, v0
+; TAHITI-NEXT: v_and_b32_e32 v1, -4, v1
+; TAHITI-NEXT: v_subrev_i32_e32 v0, vcc, v1, v0
+; TAHITI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; TAHITI-NEXT: s_endpgm
+;
+; TONGA-LABEL: srem_i32_4:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: flat_load_dword v2, v[0:1]
+; TONGA-NEXT: v_mov_b32_e32 v0, s0
+; TONGA-NEXT: v_mov_b32_e32 v1, s1
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; TONGA-NEXT: v_lshrrev_b32_e32 v3, 30, v3
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v3, v2
+; TONGA-NEXT: v_and_b32_e32 v3, -4, v3
+; TONGA-NEXT: v_subrev_u32_e32 v2, vcc, v3, v2
+; TONGA-NEXT: flat_store_dword v[0:1], v2
+; TONGA-NEXT: s_endpgm
+;
+; EG-LABEL: srem_i32_4:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 9, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: ASHR * T0.W, T0.X, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: LSHR * T0.W, PV.W, literal.x,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, T0.X, PV.W,
+; EG-NEXT: AND_INT * T0.W, PV.W, literal.x,
+; EG-NEXT: -4(nan), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T0.X, T0.X, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%num = load i32, ptr addrspace(1) %in
%result = srem i32 %num, 4
store i32 %result, ptr addrspace(1) %out
@@ -30,13 +360,96 @@ define amdgpu_kernel void @srem_i32_4(ptr addrspace(1) %out, ptr addrspace(1) %i
}
; FIXME: uniform i16 srem should not use VALU instructions
-; FUNC-LABEL: {{^}}srem_i32_7:
-; SI: s_mov_b32 [[MAGIC:s[0-9]+]], 0x92492493
-; SI: v_mul_hi_i32 {{v[0-9]+}}, {{v[0-9]+}}, [[MAGIC]]
-; SI: v_mul_lo_u32
-; SI: v_sub_{{[iu]}}32
-; SI: s_endpgm
define amdgpu_kernel void @srem_i32_7(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_i32_7:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dword v1, v0, s[2:3]
+; GCN-NEXT: s_mov_b32 s2, 0x92492493
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_hi_i32 v2, v1, s2
+; GCN-NEXT: v_add_u32_e32 v2, v2, v1
+; GCN-NEXT: v_lshrrev_b32_e32 v3, 31, v2
+; GCN-NEXT: v_ashrrev_i32_e32 v2, 2, v2
+; GCN-NEXT: v_add_u32_e32 v2, v2, v3
+; GCN-NEXT: v_mul_lo_u32 v2, v2, 7
+; GCN-NEXT: v_sub_u32_e32 v1, v1, v2
+; GCN-NEXT: global_store_dword v0, v1, s[0:1]
+; GCN-NEXT: s_endpgm
+;
+; TAHITI-LABEL: srem_i32_7:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s7, 0xf000
+; TAHITI-NEXT: s_mov_b32 s6, -1
+; TAHITI-NEXT: s_mov_b32 s10, s6
+; TAHITI-NEXT: s_mov_b32 s11, s7
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s8, s2
+; TAHITI-NEXT: s_mov_b32 s9, s3
+; TAHITI-NEXT: buffer_load_dword v0, off, s[8:11], 0
+; TAHITI-NEXT: s_mov_b32 s2, 0x92492493
+; TAHITI-NEXT: s_mov_b32 s4, s0
+; TAHITI-NEXT: s_mov_b32 s5, s1
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_mul_hi_i32 v1, v0, s2
+; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v1, v0
+; TAHITI-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; TAHITI-NEXT: v_ashrrev_i32_e32 v1, 2, v1
+; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; TAHITI-NEXT: v_mul_lo_u32 v1, v1, 7
+; TAHITI-NEXT: v_subrev_i32_e32 v0, vcc, v1, v0
+; TAHITI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; TAHITI-NEXT: s_endpgm
+;
+; TONGA-LABEL: srem_i32_7:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: flat_load_dword v2, v[0:1]
+; TONGA-NEXT: s_mov_b32 s2, 0x92492493
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_mul_hi_i32 v0, v2, s2
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; TONGA-NEXT: v_lshrrev_b32_e32 v1, 31, v0
+; TONGA-NEXT: v_ashrrev_i32_e32 v0, 2, v0
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT: v_mul_lo_u32 v3, v0, 7
+; TONGA-NEXT: v_mov_b32_e32 v0, s0
+; TONGA-NEXT: v_mov_b32_e32 v1, s1
+; TONGA-NEXT: v_subrev_u32_e32 v2, vcc, v3, v2
+; TONGA-NEXT: flat_store_dword v[0:1], v2
+; TONGA-NEXT: s_endpgm
+;
+; EG-LABEL: srem_i32_7:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 11, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: MULHI_INT * T0.Y, T0.X, literal.x,
+; EG-NEXT: -1840700269(-6.346950e-28), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, T0.X,
+; EG-NEXT: ASHR T1.W, PV.W, literal.x,
+; EG-NEXT: LSHR * T0.W, PV.W, literal.y,
+; EG-NEXT: 2(2.802597e-45), 31(4.344025e-44)
+; EG-NEXT: ADD_INT * T0.W, PV.W, PS,
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, literal.x,
+; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T0.X, T0.X, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%num = load i32, ptr addrspace(1) %in
%result = srem i32 %num, 7
store i32 %result, ptr addrspace(1) %out
@@ -44,6 +457,261 @@ define amdgpu_kernel void @srem_i32_7(ptr addrspace(1) %out, ptr addrspace(1) %i
}
define amdgpu_kernel void @srem_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_v2i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v4, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_readfirstlane_b32 s2, v2
+; GCN-NEXT: s_abs_i32 s2, s2
+; GCN-NEXT: v_cvt_f32_u32_e32 v2, s2
+; GCN-NEXT: v_readfirstlane_b32 s3, v0
+; GCN-NEXT: s_sub_i32 s6, 0, s2
+; GCN-NEXT: s_ashr_i32 s5, s3, 31
+; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2
+; GCN-NEXT: s_abs_i32 s3, s3
+; GCN-NEXT: v_readfirstlane_b32 s4, v3
+; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v2
+; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT: v_readfirstlane_b32 s7, v0
+; GCN-NEXT: s_mul_i32 s6, s6, s7
+; GCN-NEXT: s_mul_hi_u32 s6, s7, s6
+; GCN-NEXT: s_add_i32 s7, s7, s6
+; GCN-NEXT: s_mul_hi_u32 s6, s3, s7
+; GCN-NEXT: s_mul_i32 s6, s6, s2
+; GCN-NEXT: s_sub_i32 s3, s3, s6
+; GCN-NEXT: s_sub_i32 s6, s3, s2
+; GCN-NEXT: s_cmp_ge_u32 s3, s2
+; GCN-NEXT: s_cselect_b32 s3, s6, s3
+; GCN-NEXT: s_sub_i32 s6, s3, s2
+; GCN-NEXT: s_cmp_ge_u32 s3, s2
+; GCN-NEXT: s_cselect_b32 s2, s6, s3
+; GCN-NEXT: s_abs_i32 s3, s4
+; GCN-NEXT: v_cvt_f32_u32_e32 v0, s3
+; GCN-NEXT: s_xor_b32 s2, s2, s5
+; GCN-NEXT: s_sub_i32 s7, 0, s3
+; GCN-NEXT: s_sub_i32 s2, s2, s5
+; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT: v_readfirstlane_b32 s4, v1
+; GCN-NEXT: s_ashr_i32 s6, s4, 31
+; GCN-NEXT: s_abs_i32 s4, s4
+; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT: v_readfirstlane_b32 s5, v0
+; GCN-NEXT: s_mul_i32 s7, s7, s5
+; GCN-NEXT: s_mul_hi_u32 s7, s5, s7
+; GCN-NEXT: s_add_i32 s5, s5, s7
+; GCN-NEXT: s_mul_hi_u32 s5, s4, s5
+; GCN-NEXT: s_mul_i32 s5, s5, s3
+; GCN-NEXT: s_sub_i32 s4, s4, s5
+; GCN-NEXT: s_sub_i32 s5, s4, s3
+; GCN-NEXT: s_cmp_ge_u32 s4, s3
+; GCN-NEXT: s_cselect_b32 s4, s5, s4
+; GCN-NEXT: s_sub_i32 s5, s4, s3
+; GCN-NEXT: s_cmp_ge_u32 s4, s3
+; GCN-NEXT: s_cselect_b32 s3, s5, s4
+; GCN-NEXT: s_xor_b32 s3, s3, s6
+; GCN-NEXT: s_sub_i32 s3, s3, s6
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: v_mov_b32_e32 v1, s3
+; GCN-NEXT: global_store_dwordx2 v4, v[0:1], s[0:1]
+; GCN-NEXT: s_endpgm
+;
+; TAHITI-LABEL: srem_v2i32:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s3, 0xf000
+; TAHITI-NEXT: s_mov_b32 s2, -1
+; TAHITI-NEXT: s_mov_b32 s10, s2
+; TAHITI-NEXT: s_mov_b32 s11, s3
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s8, s6
+; TAHITI-NEXT: s_mov_b32 s9, s7
+; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_readfirstlane_b32 s0, v2
+; TAHITI-NEXT: s_abs_i32 s0, s0
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v2, s0
+; TAHITI-NEXT: s_sub_i32 s1, 0, s0
+; TAHITI-NEXT: v_readfirstlane_b32 s7, v3
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v2, v2
+; TAHITI-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; TAHITI-NEXT: v_mul_lo_u32 v4, s1, v2
+; TAHITI-NEXT: v_readfirstlane_b32 s1, v0
+; TAHITI-NEXT: s_abs_i32 s6, s1
+; TAHITI-NEXT: s_ashr_i32 s8, s1, 31
+; TAHITI-NEXT: v_mul_hi_u32 v4, v2, v4
+; TAHITI-NEXT: v_add_i32_e32 v0, vcc, v2, v4
+; TAHITI-NEXT: v_mul_hi_u32 v0, s6, v0
+; TAHITI-NEXT: v_readfirstlane_b32 s1, v0
+; TAHITI-NEXT: s_mul_i32 s1, s1, s0
+; TAHITI-NEXT: s_sub_i32 s1, s6, s1
+; TAHITI-NEXT: s_sub_i32 s6, s1, s0
+; TAHITI-NEXT: s_cmp_ge_u32 s1, s0
+; TAHITI-NEXT: s_cselect_b32 s1, s6, s1
+; TAHITI-NEXT: s_sub_i32 s6, s1, s0
+; TAHITI-NEXT: s_cmp_ge_u32 s1, s0
+; TAHITI-NEXT: s_cselect_b32 s6, s6, s1
+; TAHITI-NEXT: s_abs_i32 s7, s7
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v0, s7
+; TAHITI-NEXT: s_sub_i32 s0, 0, s7
+; TAHITI-NEXT: s_mov_b32 s1, s5
+; TAHITI-NEXT: s_xor_b32 s6, s6, s8
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TAHITI-NEXT: s_sub_i32 s6, s6, s8
+; TAHITI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TAHITI-NEXT: v_mul_lo_u32 v2, s0, v0
+; TAHITI-NEXT: s_mov_b32 s0, s4
+; TAHITI-NEXT: v_readfirstlane_b32 s4, v1
+; TAHITI-NEXT: s_abs_i32 s5, s4
+; TAHITI-NEXT: v_mul_hi_u32 v2, v0, v2
+; TAHITI-NEXT: s_ashr_i32 s4, s4, 31
+; TAHITI-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; TAHITI-NEXT: v_mul_hi_u32 v0, s5, v0
+; TAHITI-NEXT: v_readfirstlane_b32 s8, v0
+; TAHITI-NEXT: s_mul_i32 s8, s8, s7
+; TAHITI-NEXT: s_sub_i32 s5, s5, s8
+; TAHITI-NEXT: s_sub_i32 s8, s5, s7
+; TAHITI-NEXT: s_cmp_ge_u32 s5, s7
+; TAHITI-NEXT: s_cselect_b32 s5, s8, s5
+; TAHITI-NEXT: s_sub_i32 s8, s5, s7
+; TAHITI-NEXT: s_cmp_ge_u32 s5, s7
+; TAHITI-NEXT: s_cselect_b32 s5, s8, s5
+; TAHITI-NEXT: s_xor_b32 s5, s5, s4
+; TAHITI-NEXT: s_sub_i32 s4, s5, s4
+; TAHITI-NEXT: v_mov_b32_e32 v0, s6
+; TAHITI-NEXT: v_mov_b32_e32 v1, s4
+; TAHITI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; TAHITI-NEXT: s_endpgm
+;
+; TONGA-LABEL: srem_v2i32:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_readfirstlane_b32 s2, v2
+; TONGA-NEXT: s_abs_i32 s2, s2
+; TONGA-NEXT: v_cvt_f32_u32_e32 v2, s2
+; TONGA-NEXT: s_sub_i32 s3, 0, s2
+; TONGA-NEXT: v_readfirstlane_b32 s5, v3
+; TONGA-NEXT: v_mov_b32_e32 v3, s1
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v2, v2
+; TONGA-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
+; TONGA-NEXT: v_cvt_u32_f32_e32 v2, v2
+; TONGA-NEXT: v_mul_lo_u32 v4, s3, v2
+; TONGA-NEXT: v_readfirstlane_b32 s3, v0
+; TONGA-NEXT: s_abs_i32 s4, s3
+; TONGA-NEXT: s_ashr_i32 s3, s3, 31
+; TONGA-NEXT: v_mul_hi_u32 v4, v2, v4
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v2, v4
+; TONGA-NEXT: v_mul_hi_u32 v0, s4, v0
+; TONGA-NEXT: v_mov_b32_e32 v2, s0
+; TONGA-NEXT: v_readfirstlane_b32 s0, v1
+; TONGA-NEXT: v_readfirstlane_b32 s6, v0
+; TONGA-NEXT: s_mul_i32 s6, s6, s2
+; TONGA-NEXT: s_sub_i32 s4, s4, s6
+; TONGA-NEXT: s_sub_i32 s6, s4, s2
+; TONGA-NEXT: s_cmp_ge_u32 s4, s2
+; TONGA-NEXT: s_cselect_b32 s4, s6, s4
+; TONGA-NEXT: s_sub_i32 s6, s4, s2
+; TONGA-NEXT: s_cmp_ge_u32 s4, s2
+; TONGA-NEXT: s_cselect_b32 s2, s6, s4
+; TONGA-NEXT: s_abs_i32 s4, s5
+; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s4
+; TONGA-NEXT: s_sub_i32 s5, 0, s4
+; TONGA-NEXT: s_abs_i32 s1, s0
+; TONGA-NEXT: s_xor_b32 s2, s2, s3
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TONGA-NEXT: s_sub_i32 s2, s2, s3
+; TONGA-NEXT: s_ashr_i32 s0, s0, 31
+; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TONGA-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TONGA-NEXT: v_mul_lo_u32 v4, s5, v0
+; TONGA-NEXT: v_mul_hi_u32 v4, v0, v4
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v4
+; TONGA-NEXT: v_mul_hi_u32 v0, s1, v0
+; TONGA-NEXT: v_readfirstlane_b32 s3, v0
+; TONGA-NEXT: s_mul_i32 s3, s3, s4
+; TONGA-NEXT: s_sub_i32 s1, s1, s3
+; TONGA-NEXT: s_sub_i32 s3, s1, s4
+; TONGA-NEXT: s_cmp_ge_u32 s1, s4
+; TONGA-NEXT: s_cselect_b32 s1, s3, s1
+; TONGA-NEXT: s_sub_i32 s3, s1, s4
+; TONGA-NEXT: s_cmp_ge_u32 s1, s4
+; TONGA-NEXT: s_cselect_b32 s1, s3, s1
+; TONGA-NEXT: s_xor_b32 s1, s1, s0
+; TONGA-NEXT: s_sub_i32 s0, s1, s0
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s0
+; TONGA-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; TONGA-NEXT: s_endpgm
+;
+; EG-LABEL: srem_v2i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 45, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: SETGT_INT * T1.W, 0.0, T0.W,
+; EG-NEXT: ADD_INT T0.W, T0.W, PV.W,
+; EG-NEXT: SETGT_INT * T2.W, 0.0, T0.Z,
+; EG-NEXT: XOR_INT * T0.W, PV.W, T1.W,
+; EG-NEXT: SUB_INT T1.Z, 0.0, PV.W,
+; EG-NEXT: ADD_INT T1.W, T0.Z, T2.W,
+; EG-NEXT: RECIP_UINT * T0.Z, PV.W,
+; EG-NEXT: XOR_INT T1.W, PV.W, T2.W,
+; EG-NEXT: MULLO_INT * T1.X, PV.Z, PS,
+; EG-NEXT: SUB_INT T2.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T1.Y, PV.W,
+; EG-NEXT: SETGT_INT T3.W, 0.0, T0.X,
+; EG-NEXT: MULLO_INT * T1.Z, PV.W, PS,
+; EG-NEXT: SETGT_INT T2.Z, 0.0, T0.Y,
+; EG-NEXT: ADD_INT T2.W, T0.X, PV.W,
+; EG-NEXT: MULHI * T0.X, T1.Y, PS,
+; EG-NEXT: ADD_INT T1.Y, T1.Y, PS,
+; EG-NEXT: XOR_INT T1.Z, PV.W, T3.W,
+; EG-NEXT: ADD_INT T2.W, T0.Y, PV.Z, BS:VEC_120/SCL_212
+; EG-NEXT: MULHI * T0.X, T0.Z, T1.X,
+; EG-NEXT: ADD_INT T0.Z, T0.Z, PS,
+; EG-NEXT: XOR_INT T2.W, PV.W, T2.Z,
+; EG-NEXT: MULHI * T0.X, PV.Z, PV.Y,
+; EG-NEXT: MULHI * T0.Y, PV.W, PV.Z,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT T2.W, T2.W, PS,
+; EG-NEXT: MULLO_INT * T0.X, T0.X, T1.W,
+; EG-NEXT: SUB_INT T0.Z, T1.Z, PS,
+; EG-NEXT: SETGE_UINT T4.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T5.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.Z, PV.W, T2.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T2.W, PV.Z, T1.W,
+; EG-NEXT: SUB_INT * T4.W, PV.Z, T1.W,
+; EG-NEXT: CNDE_INT T0.Z, PV.W, T0.Z, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.Z, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.Z, T0.W,
+; EG-NEXT: CNDE_INT T1.Z, PV.W, T1.Z, PS,
+; EG-NEXT: SETGE_UINT T0.W, PV.Z, T1.W,
+; EG-NEXT: SUB_INT * T1.W, PV.Z, T1.W,
+; EG-NEXT: CNDE_INT T0.W, PV.W, T0.Z, PS, BS:VEC_021/SCL_122
+; EG-NEXT: XOR_INT * T1.W, PV.Z, T2.Z,
+; EG-NEXT: SUB_INT T0.Y, PS, T2.Z,
+; EG-NEXT: XOR_INT * T0.W, PV.W, T3.W,
+; EG-NEXT: SUB_INT T0.X, PV.W, T3.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr <2 x i32>, ptr addrspace(1) %in, i32 1
%num = load <2 x i32>, ptr addrspace(1) %in
%den = load <2 x i32>, ptr addrspace(1) %den_ptr
@@ -53,6 +721,118 @@ define amdgpu_kernel void @srem_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %i
}
define amdgpu_kernel void @srem_v2i32_4(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_v2i32_4:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v2, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx2 v[0:1], v2, s[2:3]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_readfirstlane_b32 s2, v0
+; GCN-NEXT: v_readfirstlane_b32 s3, v1
+; GCN-NEXT: s_ashr_i32 s4, s2, 31
+; GCN-NEXT: s_ashr_i32 s5, s3, 31
+; GCN-NEXT: s_lshr_b32 s4, s4, 30
+; GCN-NEXT: s_lshr_b32 s5, s5, 30
+; GCN-NEXT: s_add_i32 s4, s2, s4
+; GCN-NEXT: s_add_i32 s5, s3, s5
+; GCN-NEXT: s_and_b32 s4, s4, -4
+; GCN-NEXT: s_and_b32 s5, s5, -4
+; GCN-NEXT: s_sub_i32 s2, s2, s4
+; GCN-NEXT: s_sub_i32 s3, s3, s5
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: v_mov_b32_e32 v1, s3
+; GCN-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GCN-NEXT: s_endpgm
+;
+; TAHITI-LABEL: srem_v2i32_4:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s7, 0xf000
+; TAHITI-NEXT: s_mov_b32 s6, -1
+; TAHITI-NEXT: s_mov_b32 s10, s6
+; TAHITI-NEXT: s_mov_b32 s11, s7
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s8, s2
+; TAHITI-NEXT: s_mov_b32 s9, s3
+; TAHITI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; TAHITI-NEXT: s_mov_b32 s4, s0
+; TAHITI-NEXT: s_mov_b32 s5, s1
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_readfirstlane_b32 s0, v0
+; TAHITI-NEXT: v_readfirstlane_b32 s1, v1
+; TAHITI-NEXT: s_ashr_i32 s2, s0, 31
+; TAHITI-NEXT: s_ashr_i32 s3, s1, 31
+; TAHITI-NEXT: s_lshr_b32 s2, s2, 30
+; TAHITI-NEXT: s_lshr_b32 s3, s3, 30
+; TAHITI-NEXT: s_add_i32 s2, s0, s2
+; TAHITI-NEXT: s_add_i32 s3, s1, s3
+; TAHITI-NEXT: s_and_b32 s2, s2, -4
+; TAHITI-NEXT: s_and_b32 s3, s3, -4
+; TAHITI-NEXT: s_sub_i32 s0, s0, s2
+; TAHITI-NEXT: s_sub_i32 s1, s1, s3
+; TAHITI-NEXT: v_mov_b32_e32 v0, s0
+; TAHITI-NEXT: v_mov_b32_e32 v1, s1
+; TAHITI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; TAHITI-NEXT: s_endpgm
+;
+; TONGA-LABEL: srem_v2i32_4:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; TONGA-NEXT: v_mov_b32_e32 v2, s0
+; TONGA-NEXT: v_mov_b32_e32 v3, s1
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_readfirstlane_b32 s0, v0
+; TONGA-NEXT: v_readfirstlane_b32 s1, v1
+; TONGA-NEXT: s_ashr_i32 s2, s0, 31
+; TONGA-NEXT: s_ashr_i32 s3, s1, 31
+; TONGA-NEXT: s_lshr_b32 s2, s2, 30
+; TONGA-NEXT: s_lshr_b32 s3, s3, 30
+; TONGA-NEXT: s_add_i32 s2, s0, s2
+; TONGA-NEXT: s_add_i32 s3, s1, s3
+; TONGA-NEXT: s_and_b32 s2, s2, -4
+; TONGA-NEXT: s_and_b32 s3, s3, -4
+; TONGA-NEXT: s_sub_i32 s0, s0, s2
+; TONGA-NEXT: s_sub_i32 s1, s1, s3
+; TONGA-NEXT: v_mov_b32_e32 v0, s0
+; TONGA-NEXT: v_mov_b32_e32 v1, s1
+; TONGA-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; TONGA-NEXT: s_endpgm
+;
+; EG-LABEL: srem_v2i32_4:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 16, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: ASHR T0.W, T0.Y, literal.x,
+; EG-NEXT: ASHR * T1.W, T0.X, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: LSHR * T0.W, PV.W, literal.x,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T0.W, T0.Y, PV.W,
+; EG-NEXT: LSHR * T1.W, T1.W, literal.x,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T1.W, T0.X, PS,
+; EG-NEXT: AND_INT * T0.W, PV.W, literal.x,
+; EG-NEXT: -4(nan), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T0.Y, T0.Y, PS,
+; EG-NEXT: AND_INT * T0.W, PV.W, literal.x,
+; EG-NEXT: -4(nan), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T0.X, T0.X, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%num = load <2 x i32>, ptr addrspace(1) %in
%result = srem <2 x i32> %num, <i32 4, i32 4>
store <2 x i32> %result, ptr addrspace(1) %out
@@ -60,6 +840,472 @@ define amdgpu_kernel void @srem_v2i32_4(ptr addrspace(1) %out, ptr addrspace(1)
}
define amdgpu_kernel void @srem_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_v4i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx4 v[1:4], v0, s[2:3] offset:16
+; GCN-NEXT: global_load_dwordx4 v[5:8], v0, s[2:3]
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_readfirstlane_b32 s2, v1
+; GCN-NEXT: s_abs_i32 s2, s2
+; GCN-NEXT: v_cvt_f32_u32_e32 v1, s2
+; GCN-NEXT: s_sub_i32 s6, 0, s2
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_readfirstlane_b32 s4, v5
+; GCN-NEXT: s_ashr_i32 s5, s4, 31
+; GCN-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; GCN-NEXT: s_abs_i32 s4, s4
+; GCN-NEXT: v_readfirstlane_b32 s3, v2
+; GCN-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT: v_readfirstlane_b32 s7, v1
+; GCN-NEXT: s_mul_i32 s6, s6, s7
+; GCN-NEXT: s_mul_hi_u32 s6, s7, s6
+; GCN-NEXT: s_add_i32 s7, s7, s6
+; GCN-NEXT: s_mul_hi_u32 s6, s4, s7
+; GCN-NEXT: s_mul_i32 s6, s6, s2
+; GCN-NEXT: s_sub_i32 s4, s4, s6
+; GCN-NEXT: s_sub_i32 s6, s4, s2
+; GCN-NEXT: s_cmp_ge_u32 s4, s2
+; GCN-NEXT: s_cselect_b32 s4, s6, s4
+; GCN-NEXT: s_sub_i32 s6, s4, s2
+; GCN-NEXT: s_cmp_ge_u32 s4, s2
+; GCN-NEXT: s_cselect_b32 s2, s6, s4
+; GCN-NEXT: s_abs_i32 s3, s3
+; GCN-NEXT: v_cvt_f32_u32_e32 v1, s3
+; GCN-NEXT: s_xor_b32 s2, s2, s5
+; GCN-NEXT: s_sub_i32 s8, 0, s3
+; GCN-NEXT: s_sub_i32 s2, s2, s5
+; GCN-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; GCN-NEXT: v_readfirstlane_b32 s6, v6
+; GCN-NEXT: s_ashr_i32 s7, s6, 31
+; GCN-NEXT: s_abs_i32 s6, s6
+; GCN-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT: v_readfirstlane_b32 s4, v3
+; GCN-NEXT: v_readfirstlane_b32 s5, v1
+; GCN-NEXT: s_mul_i32 s8, s8, s5
+; GCN-NEXT: s_mul_hi_u32 s8, s5, s8
+; GCN-NEXT: s_add_i32 s5, s5, s8
+; GCN-NEXT: s_mul_hi_u32 s5, s6, s5
+; GCN-NEXT: s_mul_i32 s5, s5, s3
+; GCN-NEXT: s_sub_i32 s5, s6, s5
+; GCN-NEXT: s_sub_i32 s6, s5, s3
+; GCN-NEXT: s_cmp_ge_u32 s5, s3
+; GCN-NEXT: s_cselect_b32 s5, s6, s5
+; GCN-NEXT: s_sub_i32 s6, s5, s3
+; GCN-NEXT: s_cmp_ge_u32 s5, s3
+; GCN-NEXT: s_cselect_b32 s3, s6, s5
+; GCN-NEXT: s_abs_i32 s4, s4
+; GCN-NEXT: v_cvt_f32_u32_e32 v1, s4
+; GCN-NEXT: s_xor_b32 s3, s3, s7
+; GCN-NEXT: s_sub_i32 s9, 0, s4
+; GCN-NEXT: s_sub_i32 s3, s3, s7
+; GCN-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; GCN-NEXT: v_readfirstlane_b32 s6, v7
+; GCN-NEXT: s_ashr_i32 s8, s6, 31
+; GCN-NEXT: s_abs_i32 s6, s6
+; GCN-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT: v_readfirstlane_b32 s5, v4
+; GCN-NEXT: v_readfirstlane_b32 s7, v1
+; GCN-NEXT: s_mul_i32 s9, s9, s7
+; GCN-NEXT: s_mul_hi_u32 s9, s7, s9
+; GCN-NEXT: s_add_i32 s7, s7, s9
+; GCN-NEXT: s_mul_hi_u32 s7, s6, s7
+; GCN-NEXT: s_mul_i32 s7, s7, s4
+; GCN-NEXT: s_sub_i32 s6, s6, s7
+; GCN-NEXT: s_sub_i32 s7, s6, s4
+; GCN-NEXT: s_cmp_ge_u32 s6, s4
+; GCN-NEXT: s_cselect_b32 s6, s7, s6
+; GCN-NEXT: s_sub_i32 s7, s6, s4
+; GCN-NEXT: s_cmp_ge_u32 s6, s4
+; GCN-NEXT: s_cselect_b32 s4, s7, s6
+; GCN-NEXT: s_abs_i32 s5, s5
+; GCN-NEXT: v_cvt_f32_u32_e32 v2, s5
+; GCN-NEXT: v_readfirstlane_b32 s6, v8
+; GCN-NEXT: v_mov_b32_e32 v1, s2
+; GCN-NEXT: s_ashr_i32 s2, s6, 31
+; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v2
+; GCN-NEXT: v_mov_b32_e32 v2, s3
+; GCN-NEXT: s_abs_i32 s3, s6
+; GCN-NEXT: s_sub_i32 s6, 0, s5
+; GCN-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
+; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3
+; GCN-NEXT: s_xor_b32 s4, s4, s8
+; GCN-NEXT: s_sub_i32 s4, s4, s8
+; GCN-NEXT: v_readfirstlane_b32 s7, v3
+; GCN-NEXT: s_mul_i32 s6, s6, s7
+; GCN-NEXT: s_mul_hi_u32 s6, s7, s6
+; GCN-NEXT: s_add_i32 s7, s7, s6
+; GCN-NEXT: s_mul_hi_u32 s6, s3, s7
+; GCN-NEXT: s_mul_i32 s6, s6, s5
+; GCN-NEXT: s_sub_i32 s3, s3, s6
+; GCN-NEXT: s_sub_i32 s6, s3, s5
+; GCN-NEXT: s_cmp_ge_u32 s3, s5
+; GCN-NEXT: s_cselect_b32 s3, s6, s3
+; GCN-NEXT: s_sub_i32 s6, s3, s5
+; GCN-NEXT: s_cmp_ge_u32 s3, s5
+; GCN-NEXT: s_cselect_b32 s3, s6, s3
+; GCN-NEXT: s_xor_b32 s3, s3, s2
+; GCN-NEXT: s_sub_i32 s2, s3, s2
+; GCN-NEXT: v_mov_b32_e32 v3, s4
+; GCN-NEXT: v_mov_b32_e32 v4, s2
+; GCN-NEXT: global_store_dwordx4 v0, v[1:4], s[0:1]
+; GCN-NEXT: s_endpgm
+;
+; TAHITI-LABEL: srem_v4i32:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s3, 0xf000
+; TAHITI-NEXT: s_mov_b32 s2, -1
+; TAHITI-NEXT: s_mov_b32 s10, s2
+; TAHITI-NEXT: s_mov_b32 s11, s3
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s8, s6
+; TAHITI-NEXT: s_mov_b32 s9, s7
+; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:16
+; TAHITI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0
+; TAHITI-NEXT: s_waitcnt vmcnt(1)
+; TAHITI-NEXT: v_readfirstlane_b32 s0, v0
+; TAHITI-NEXT: s_abs_i32 s0, s0
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v0, s0
+; TAHITI-NEXT: s_sub_i32 s1, 0, s0
+; TAHITI-NEXT: v_readfirstlane_b32 s7, v1
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TAHITI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TAHITI-NEXT: v_mul_lo_u32 v8, s1, v0
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_readfirstlane_b32 s1, v4
+; TAHITI-NEXT: s_abs_i32 s6, s1
+; TAHITI-NEXT: s_ashr_i32 s1, s1, 31
+; TAHITI-NEXT: v_mul_hi_u32 v8, v0, v8
+; TAHITI-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; TAHITI-NEXT: v_mul_hi_u32 v0, s6, v0
+; TAHITI-NEXT: v_readfirstlane_b32 s8, v0
+; TAHITI-NEXT: s_mul_i32 s8, s8, s0
+; TAHITI-NEXT: s_sub_i32 s6, s6, s8
+; TAHITI-NEXT: s_sub_i32 s8, s6, s0
+; TAHITI-NEXT: s_cmp_ge_u32 s6, s0
+; TAHITI-NEXT: s_cselect_b32 s6, s8, s6
+; TAHITI-NEXT: s_sub_i32 s8, s6, s0
+; TAHITI-NEXT: s_cmp_ge_u32 s6, s0
+; TAHITI-NEXT: s_cselect_b32 s0, s8, s6
+; TAHITI-NEXT: s_abs_i32 s6, s7
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v0, s6
+; TAHITI-NEXT: s_sub_i32 s7, 0, s6
+; TAHITI-NEXT: v_readfirstlane_b32 s8, v5
+; TAHITI-NEXT: s_abs_i32 s9, s8
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TAHITI-NEXT: s_xor_b32 s0, s0, s1
+; TAHITI-NEXT: s_sub_i32 s10, s0, s1
+; TAHITI-NEXT: s_ashr_i32 s8, s8, 31
+; TAHITI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TAHITI-NEXT: v_mul_lo_u32 v1, s7, v0
+; TAHITI-NEXT: v_readfirstlane_b32 s7, v2
+; TAHITI-NEXT: v_mul_hi_u32 v1, v0, v1
+; TAHITI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; TAHITI-NEXT: v_mul_hi_u32 v0, s9, v0
+; TAHITI-NEXT: v_readfirstlane_b32 s0, v0
+; TAHITI-NEXT: s_mul_i32 s0, s0, s6
+; TAHITI-NEXT: s_sub_i32 s0, s9, s0
+; TAHITI-NEXT: s_sub_i32 s1, s0, s6
+; TAHITI-NEXT: s_cmp_ge_u32 s0, s6
+; TAHITI-NEXT: s_cselect_b32 s0, s1, s0
+; TAHITI-NEXT: s_sub_i32 s1, s0, s6
+; TAHITI-NEXT: s_cmp_ge_u32 s0, s6
+; TAHITI-NEXT: s_cselect_b32 s0, s1, s0
+; TAHITI-NEXT: s_abs_i32 s1, s7
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v0, s1
+; TAHITI-NEXT: s_sub_i32 s6, 0, s1
+; TAHITI-NEXT: v_readfirstlane_b32 s7, v6
+; TAHITI-NEXT: s_abs_i32 s9, s7
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TAHITI-NEXT: s_xor_b32 s0, s0, s8
+; TAHITI-NEXT: s_sub_i32 s8, s0, s8
+; TAHITI-NEXT: s_ashr_i32 s7, s7, 31
+; TAHITI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TAHITI-NEXT: v_mul_lo_u32 v1, s6, v0
+; TAHITI-NEXT: v_readfirstlane_b32 s6, v3
+; TAHITI-NEXT: v_mul_hi_u32 v1, v0, v1
+; TAHITI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; TAHITI-NEXT: v_mul_hi_u32 v0, s9, v0
+; TAHITI-NEXT: v_readfirstlane_b32 s0, v0
+; TAHITI-NEXT: s_mul_i32 s0, s0, s1
+; TAHITI-NEXT: s_sub_i32 s0, s9, s0
+; TAHITI-NEXT: s_sub_i32 s9, s0, s1
+; TAHITI-NEXT: s_cmp_ge_u32 s0, s1
+; TAHITI-NEXT: s_cselect_b32 s0, s9, s0
+; TAHITI-NEXT: s_sub_i32 s9, s0, s1
+; TAHITI-NEXT: s_cmp_ge_u32 s0, s1
+; TAHITI-NEXT: s_cselect_b32 s9, s9, s0
+; TAHITI-NEXT: s_abs_i32 s6, s6
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v0, s6
+; TAHITI-NEXT: s_sub_i32 s1, 0, s6
+; TAHITI-NEXT: s_mov_b32 s0, s4
+; TAHITI-NEXT: v_readfirstlane_b32 s4, v7
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TAHITI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v2, v0
+; TAHITI-NEXT: v_mov_b32_e32 v0, s10
+; TAHITI-NEXT: v_mul_lo_u32 v1, s1, v2
+; TAHITI-NEXT: s_mov_b32 s1, s5
+; TAHITI-NEXT: s_abs_i32 s5, s4
+; TAHITI-NEXT: s_ashr_i32 s4, s4, 31
+; TAHITI-NEXT: v_mul_hi_u32 v3, v2, v1
+; TAHITI-NEXT: v_mov_b32_e32 v1, s8
+; TAHITI-NEXT: s_xor_b32 s8, s9, s7
+; TAHITI-NEXT: s_sub_i32 s7, s8, s7
+; TAHITI-NEXT: v_add_i32_e32 v2, vcc, v2, v3
+; TAHITI-NEXT: v_mul_hi_u32 v2, s5, v2
+; TAHITI-NEXT: v_readfirstlane_b32 s8, v2
+; TAHITI-NEXT: s_mul_i32 s8, s8, s6
+; TAHITI-NEXT: s_sub_i32 s5, s5, s8
+; TAHITI-NEXT: s_sub_i32 s8, s5, s6
+; TAHITI-NEXT: s_cmp_ge_u32 s5, s6
+; TAHITI-NEXT: s_cselect_b32 s5, s8, s5
+; TAHITI-NEXT: s_sub_i32 s8, s5, s6
+; TAHITI-NEXT: s_cmp_ge_u32 s5, s6
+; TAHITI-NEXT: s_cselect_b32 s5, s8, s5
+; TAHITI-NEXT: s_xor_b32 s5, s5, s4
+; TAHITI-NEXT: s_sub_i32 s4, s5, s4
+; TAHITI-NEXT: v_mov_b32_e32 v2, s7
+; TAHITI-NEXT: v_mov_b32_e32 v3, s4
+; TAHITI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; TAHITI-NEXT: s_endpgm
+;
+; TONGA-LABEL: srem_v4i32:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: s_add_u32 s4, s2, 16
+; TONGA-NEXT: s_addc_u32 s5, s3, 0
+; TONGA-NEXT: v_mov_b32_e32 v0, s4
+; TONGA-NEXT: v_mov_b32_e32 v1, s5
+; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; TONGA-NEXT: v_mov_b32_e32 v5, s3
+; TONGA-NEXT: v_mov_b32_e32 v4, s2
+; TONGA-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; TONGA-NEXT: s_waitcnt vmcnt(1)
+; TONGA-NEXT: v_readfirstlane_b32 s2, v0
+; TONGA-NEXT: s_abs_i32 s2, s2
+; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s2
+; TONGA-NEXT: s_sub_i32 s3, 0, s2
+; TONGA-NEXT: v_readfirstlane_b32 s5, v1
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TONGA-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TONGA-NEXT: v_mul_lo_u32 v8, s3, v0
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_readfirstlane_b32 s3, v4
+; TONGA-NEXT: s_abs_i32 s4, s3
+; TONGA-NEXT: s_ashr_i32 s3, s3, 31
+; TONGA-NEXT: v_mul_hi_u32 v8, v0, v8
+; TONGA-NEXT: v_mov_b32_e32 v4, s0
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v8
+; TONGA-NEXT: v_mul_hi_u32 v0, s4, v0
+; TONGA-NEXT: v_readfirstlane_b32 s6, v0
+; TONGA-NEXT: s_mul_i32 s6, s6, s2
+; TONGA-NEXT: s_sub_i32 s4, s4, s6
+; TONGA-NEXT: s_sub_i32 s6, s4, s2
+; TONGA-NEXT: s_cmp_ge_u32 s4, s2
+; TONGA-NEXT: s_cselect_b32 s4, s6, s4
+; TONGA-NEXT: s_sub_i32 s6, s4, s2
+; TONGA-NEXT: s_cmp_ge_u32 s4, s2
+; TONGA-NEXT: s_cselect_b32 s2, s6, s4
+; TONGA-NEXT: s_abs_i32 s4, s5
+; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s4
+; TONGA-NEXT: s_sub_i32 s5, 0, s4
+; TONGA-NEXT: v_readfirstlane_b32 s6, v5
+; TONGA-NEXT: s_abs_i32 s7, s6
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TONGA-NEXT: s_xor_b32 s2, s2, s3
+; TONGA-NEXT: s_sub_i32 s2, s2, s3
+; TONGA-NEXT: s_ashr_i32 s6, s6, 31
+; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TONGA-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TONGA-NEXT: v_mov_b32_e32 v5, s1
+; TONGA-NEXT: v_mul_lo_u32 v1, s5, v0
+; TONGA-NEXT: v_readfirstlane_b32 s5, v2
+; TONGA-NEXT: v_mul_hi_u32 v1, v0, v1
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT: v_mul_hi_u32 v0, s7, v0
+; TONGA-NEXT: v_readfirstlane_b32 s3, v0
+; TONGA-NEXT: s_mul_i32 s3, s3, s4
+; TONGA-NEXT: s_sub_i32 s3, s7, s3
+; TONGA-NEXT: s_sub_i32 s7, s3, s4
+; TONGA-NEXT: s_cmp_ge_u32 s3, s4
+; TONGA-NEXT: s_cselect_b32 s3, s7, s3
+; TONGA-NEXT: s_sub_i32 s7, s3, s4
+; TONGA-NEXT: s_cmp_ge_u32 s3, s4
+; TONGA-NEXT: s_cselect_b32 s3, s7, s3
+; TONGA-NEXT: s_abs_i32 s4, s5
+; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s4
+; TONGA-NEXT: s_sub_i32 s5, 0, s4
+; TONGA-NEXT: v_readfirstlane_b32 s7, v6
+; TONGA-NEXT: s_abs_i32 s8, s7
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TONGA-NEXT: s_xor_b32 s3, s3, s6
+; TONGA-NEXT: s_sub_i32 s3, s3, s6
+; TONGA-NEXT: s_ashr_i32 s7, s7, 31
+; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TONGA-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TONGA-NEXT: v_mul_lo_u32 v1, s5, v0
+; TONGA-NEXT: v_readfirstlane_b32 s5, v3
+; TONGA-NEXT: v_mul_hi_u32 v1, v0, v1
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT: v_mul_hi_u32 v0, s8, v0
+; TONGA-NEXT: v_readfirstlane_b32 s6, v0
+; TONGA-NEXT: s_mul_i32 s6, s6, s4
+; TONGA-NEXT: s_sub_i32 s6, s8, s6
+; TONGA-NEXT: s_sub_i32 s8, s6, s4
+; TONGA-NEXT: s_cmp_ge_u32 s6, s4
+; TONGA-NEXT: s_cselect_b32 s6, s8, s6
+; TONGA-NEXT: s_sub_i32 s8, s6, s4
+; TONGA-NEXT: s_cmp_ge_u32 s6, s4
+; TONGA-NEXT: s_cselect_b32 s4, s8, s6
+; TONGA-NEXT: s_abs_i32 s5, s5
+; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s5
+; TONGA-NEXT: s_sub_i32 s0, 0, s5
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TONGA-NEXT: v_cvt_u32_f32_e32 v2, v0
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: s_xor_b32 s2, s4, s7
+; TONGA-NEXT: s_sub_i32 s2, s2, s7
+; TONGA-NEXT: v_mul_lo_u32 v1, s0, v2
+; TONGA-NEXT: v_readfirstlane_b32 s0, v7
+; TONGA-NEXT: s_abs_i32 s1, s0
+; TONGA-NEXT: s_ashr_i32 s0, s0, 31
+; TONGA-NEXT: v_mul_hi_u32 v3, v2, v1
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: v_add_u32_e32 v2, vcc, v2, v3
+; TONGA-NEXT: v_mul_hi_u32 v2, s1, v2
+; TONGA-NEXT: v_readfirstlane_b32 s3, v2
+; TONGA-NEXT: s_mul_i32 s3, s3, s5
+; TONGA-NEXT: s_sub_i32 s1, s1, s3
+; TONGA-NEXT: s_sub_i32 s3, s1, s5
+; TONGA-NEXT: s_cmp_ge_u32 s1, s5
+; TONGA-NEXT: s_cselect_b32 s1, s3, s1
+; TONGA-NEXT: s_sub_i32 s3, s1, s5
+; TONGA-NEXT: s_cmp_ge_u32 s1, s5
+; TONGA-NEXT: s_cselect_b32 s1, s3, s1
+; TONGA-NEXT: s_xor_b32 s1, s1, s0
+; TONGA-NEXT: s_sub_i32 s0, s1, s0
+; TONGA-NEXT: v_mov_b32_e32 v2, s2
+; TONGA-NEXT: v_mov_b32_e32 v3, s0
+; TONGA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; TONGA-NEXT: s_endpgm
+;
+; EG-LABEL: srem_v4i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 89, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T2.XYZW, T0.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_128 T1.XYZW, T0.X, 16, #1
+; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: SETGT_INT * T2.W, 0.0, T1.Z,
+; EG-NEXT: ADD_INT T3.W, T1.Z, PV.W,
+; EG-NEXT: SETGT_INT * T4.W, 0.0, T1.Y,
+; EG-NEXT: XOR_INT * T2.W, PV.W, T2.W,
+; EG-NEXT: SUB_INT T1.Z, 0.0, PV.W,
+; EG-NEXT: ADD_INT T3.W, T1.Y, T4.W,
+; EG-NEXT: RECIP_UINT * T1.Y, PV.W,
+; EG-NEXT: XOR_INT T3.W, PV.W, T4.W,
+; EG-NEXT: MULLO_INT * T1.Z, PV.Z, PS,
+; EG-NEXT: SUB_INT T4.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T2.X, PV.W,
+; EG-NEXT: SETGT_INT T5.W, 0.0, T0.Y,
+; EG-NEXT: MULLO_INT * T2.Y, PV.W, PS,
+; EG-NEXT: SETGT_INT T2.Z, 0.0, T0.Z,
+; EG-NEXT: ADD_INT T4.W, T0.Y, PV.W,
+; EG-NEXT: MULHI * T0.Y, T2.X, PS,
+; EG-NEXT: ADD_INT T2.X, T2.X, PS,
+; EG-NEXT: XOR_INT T0.Y, PV.W, T5.W,
+; EG-NEXT: SETGT_INT T3.Z, 0.0, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: ADD_INT T4.W, T0.Z, PV.Z,
+; EG-NEXT: MULHI * T0.Z, T1.Y, T1.Z,
+; EG-NEXT: ADD_INT T1.Y, T1.Y, PS,
+; EG-NEXT: XOR_INT T0.Z, PV.W, T2.Z,
+; EG-NEXT: ADD_INT T1.W, T1.W, PV.Z,
+; EG-NEXT: MULHI * T1.Z, PV.Y, PV.X,
+; EG-NEXT: XOR_INT T1.W, PV.W, T3.Z,
+; EG-NEXT: MULHI * T1.Y, PV.Z, PV.Y,
+; EG-NEXT: SUB_INT T4.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T2.X, PV.W,
+; EG-NEXT: SETGT_INT T6.W, 0.0, T0.W,
+; EG-NEXT: MULLO_INT * T2.Y, PV.W, PS,
+; EG-NEXT: ADD_INT T0.W, T0.W, PV.W,
+; EG-NEXT: MULHI * T2.Y, T2.X, PS,
+; EG-NEXT: ADD_INT T2.Y, T2.X, PS,
+; EG-NEXT: XOR_INT T3.Z, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGT_INT T0.W, 0.0, T1.X,
+; EG-NEXT: MULLO_INT * T1.Y, T1.Y, T2.W,
+; EG-NEXT: ADD_INT T4.W, T1.X, PV.W,
+; EG-NEXT: MULHI * T1.X, PV.Z, PV.Y,
+; EG-NEXT: XOR_INT T0.W, PV.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: MULLO_INT * T1.X, PS, T1.W,
+; EG-NEXT: SUB_INT T4.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T2.X, PV.W,
+; EG-NEXT: SETGT_INT T7.W, 0.0, T0.X,
+; EG-NEXT: MULLO_INT * T2.Y, PV.W, PS,
+; EG-NEXT: ADD_INT T4.W, T0.X, PV.W,
+; EG-NEXT: MULHI * T0.X, T2.X, PS,
+; EG-NEXT: ADD_INT T0.X, T2.X, PS,
+; EG-NEXT: XOR_INT T2.Y, PV.W, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T3.Z, T3.Z, T1.X,
+; EG-NEXT: SUB_INT T4.W, T0.Z, T1.Y, BS:VEC_120/SCL_212
+; EG-NEXT: MULLO_INT * T0.Z, T1.Z, T3.W,
+; EG-NEXT: SETGE_UINT T1.X, PV.W, T2.W,
+; EG-NEXT: SUB_INT T0.Y, T0.Y, PS,
+; EG-NEXT: SETGE_UINT T0.Z, PV.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T8.W, PV.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: MULHI * T0.X, PV.Y, PV.X,
+; EG-NEXT: SUB_INT T2.X, T4.W, T2.W,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, T3.Z, PV.W,
+; EG-NEXT: SETGE_UINT T0.Z, PV.Y, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT * T8.W, PV.Y, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: MULLO_INT * T0.X, T0.X, T0.W,
+; EG-NEXT: CNDE_INT T3.X, T0.Z, T0.Y, T8.W,
+; EG-NEXT: SETGE_UINT T0.Y, T1.Y, T1.W,
+; EG-NEXT: SUB_INT T0.Z, T1.Y, T1.W,
+; EG-NEXT: CNDE_INT T1.W, T1.X, T4.W, T2.X, BS:VEC_102/SCL_221
+; EG-NEXT: SUB_INT * T4.W, T2.Y, PS,
+; EG-NEXT: SETGE_UINT T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT T2.Y, PS, T0.W,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T2.W, PV.W, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T8.W, PV.Y, T1.Y, PV.Z,
+; EG-NEXT: XOR_INT T1.X, PS, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.Z, T1.W, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T0.Z, PV.X, T4.W, PV.Y, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T1.W, T3.X, T3.W,
+; EG-NEXT: SUB_INT * T2.W, T3.X, T3.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T3.X, PS,
+; EG-NEXT: SETGE_UINT T1.Y, T0.Z, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T1.Z, T0.Z, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: XOR_INT T0.W, T0.Y, T2.Z,
+; EG-NEXT: SUB_INT * T2.W, T1.X, T6.W,
+; EG-NEXT: SUB_INT T2.Z, PV.W, T2.Z,
+; EG-NEXT: CNDE_INT T0.W, PV.Y, T0.Z, PV.Z, BS:VEC_021/SCL_122
+; EG-NEXT: XOR_INT * T1.W, PV.X, T5.W,
+; EG-NEXT: SUB_INT T2.Y, PS, T5.W,
+; EG-NEXT: XOR_INT * T0.W, PV.W, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T2.X, PV.W, T7.W,
+; EG-NEXT: LSHR * T0.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr <4 x i32>, ptr addrspace(1) %in, i32 1
%num = load <4 x i32>, ptr addrspace(1) %in
%den = load <4 x i32>, ptr addrspace(1) %den_ptr
@@ -69,6 +1315,173 @@ define amdgpu_kernel void @srem_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
}
define amdgpu_kernel void @srem_v4i32_4(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_v4i32_4:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v4, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_readfirstlane_b32 s2, v0
+; GCN-NEXT: v_readfirstlane_b32 s3, v1
+; GCN-NEXT: v_readfirstlane_b32 s4, v2
+; GCN-NEXT: v_readfirstlane_b32 s5, v3
+; GCN-NEXT: s_ashr_i32 s6, s2, 31
+; GCN-NEXT: s_ashr_i32 s7, s3, 31
+; GCN-NEXT: s_ashr_i32 s8, s4, 31
+; GCN-NEXT: s_ashr_i32 s9, s5, 31
+; GCN-NEXT: s_lshr_b32 s6, s6, 30
+; GCN-NEXT: s_lshr_b32 s7, s7, 30
+; GCN-NEXT: s_lshr_b32 s8, s8, 30
+; GCN-NEXT: s_lshr_b32 s9, s9, 30
+; GCN-NEXT: s_add_i32 s6, s2, s6
+; GCN-NEXT: s_add_i32 s7, s3, s7
+; GCN-NEXT: s_add_i32 s8, s4, s8
+; GCN-NEXT: s_add_i32 s9, s5, s9
+; GCN-NEXT: s_and_b32 s6, s6, -4
+; GCN-NEXT: s_and_b32 s7, s7, -4
+; GCN-NEXT: s_and_b32 s8, s8, -4
+; GCN-NEXT: s_and_b32 s9, s9, -4
+; GCN-NEXT: s_sub_i32 s2, s2, s6
+; GCN-NEXT: s_sub_i32 s3, s3, s7
+; GCN-NEXT: s_sub_i32 s4, s4, s8
+; GCN-NEXT: s_sub_i32 s5, s5, s9
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: v_mov_b32_e32 v1, s3
+; GCN-NEXT: v_mov_b32_e32 v2, s4
+; GCN-NEXT: v_mov_b32_e32 v3, s5
+; GCN-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
+; GCN-NEXT: s_endpgm
+;
+; TAHITI-LABEL: srem_v4i32_4:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s3, 0xf000
+; TAHITI-NEXT: s_mov_b32 s2, -1
+; TAHITI-NEXT: s_mov_b32 s10, s2
+; TAHITI-NEXT: s_mov_b32 s11, s3
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s8, s6
+; TAHITI-NEXT: s_mov_b32 s9, s7
+; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; TAHITI-NEXT: s_mov_b32 s0, s4
+; TAHITI-NEXT: s_mov_b32 s1, s5
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_readfirstlane_b32 s4, v0
+; TAHITI-NEXT: v_readfirstlane_b32 s5, v1
+; TAHITI-NEXT: v_readfirstlane_b32 s6, v2
+; TAHITI-NEXT: v_readfirstlane_b32 s7, v3
+; TAHITI-NEXT: s_ashr_i32 s8, s4, 31
+; TAHITI-NEXT: s_ashr_i32 s9, s5, 31
+; TAHITI-NEXT: s_ashr_i32 s10, s6, 31
+; TAHITI-NEXT: s_ashr_i32 s11, s7, 31
+; TAHITI-NEXT: s_lshr_b32 s8, s8, 30
+; TAHITI-NEXT: s_lshr_b32 s9, s9, 30
+; TAHITI-NEXT: s_lshr_b32 s10, s10, 30
+; TAHITI-NEXT: s_lshr_b32 s11, s11, 30
+; TAHITI-NEXT: s_add_i32 s8, s4, s8
+; TAHITI-NEXT: s_add_i32 s9, s5, s9
+; TAHITI-NEXT: s_add_i32 s10, s6, s10
+; TAHITI-NEXT: s_add_i32 s11, s7, s11
+; TAHITI-NEXT: s_and_b32 s8, s8, -4
+; TAHITI-NEXT: s_and_b32 s9, s9, -4
+; TAHITI-NEXT: s_and_b32 s10, s10, -4
+; TAHITI-NEXT: s_and_b32 s11, s11, -4
+; TAHITI-NEXT: s_sub_i32 s4, s4, s8
+; TAHITI-NEXT: s_sub_i32 s5, s5, s9
+; TAHITI-NEXT: s_sub_i32 s6, s6, s10
+; TAHITI-NEXT: s_sub_i32 s7, s7, s11
+; TAHITI-NEXT: v_mov_b32_e32 v0, s4
+; TAHITI-NEXT: v_mov_b32_e32 v1, s5
+; TAHITI-NEXT: v_mov_b32_e32 v2, s6
+; TAHITI-NEXT: v_mov_b32_e32 v3, s7
+; TAHITI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; TAHITI-NEXT: s_endpgm
+;
+; TONGA-LABEL: srem_v4i32_4:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; TONGA-NEXT: v_mov_b32_e32 v4, s0
+; TONGA-NEXT: v_mov_b32_e32 v5, s1
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_readfirstlane_b32 s0, v0
+; TONGA-NEXT: v_readfirstlane_b32 s1, v1
+; TONGA-NEXT: v_readfirstlane_b32 s2, v2
+; TONGA-NEXT: v_readfirstlane_b32 s3, v3
+; TONGA-NEXT: s_ashr_i32 s4, s0, 31
+; TONGA-NEXT: s_ashr_i32 s5, s1, 31
+; TONGA-NEXT: s_ashr_i32 s6, s2, 31
+; TONGA-NEXT: s_ashr_i32 s7, s3, 31
+; TONGA-NEXT: s_lshr_b32 s4, s4, 30
+; TONGA-NEXT: s_lshr_b32 s5, s5, 30
+; TONGA-NEXT: s_lshr_b32 s6, s6, 30
+; TONGA-NEXT: s_lshr_b32 s7, s7, 30
+; TONGA-NEXT: s_add_i32 s4, s0, s4
+; TONGA-NEXT: s_add_i32 s5, s1, s5
+; TONGA-NEXT: s_add_i32 s6, s2, s6
+; TONGA-NEXT: s_add_i32 s7, s3, s7
+; TONGA-NEXT: s_and_b32 s4, s4, -4
+; TONGA-NEXT: s_and_b32 s5, s5, -4
+; TONGA-NEXT: s_and_b32 s6, s6, -4
+; TONGA-NEXT: s_and_b32 s7, s7, -4
+; TONGA-NEXT: s_sub_i32 s0, s0, s4
+; TONGA-NEXT: s_sub_i32 s1, s1, s5
+; TONGA-NEXT: s_sub_i32 s2, s2, s6
+; TONGA-NEXT: s_sub_i32 s3, s3, s7
+; TONGA-NEXT: v_mov_b32_e32 v0, s0
+; TONGA-NEXT: v_mov_b32_e32 v1, s1
+; TONGA-NEXT: v_mov_b32_e32 v2, s2
+; TONGA-NEXT: v_mov_b32_e32 v3, s3
+; TONGA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; TONGA-NEXT: s_endpgm
+;
+; EG-LABEL: srem_v4i32_4:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 29, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: ASHR * T1.W, T0.W, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: LSHR T1.W, PV.W, literal.x,
+; EG-NEXT: ASHR * T2.W, T0.Z, literal.y,
+; EG-NEXT: 30(4.203895e-44), 31(4.344025e-44)
+; EG-NEXT: ASHR T1.Z, T0.Y, literal.x,
+; EG-NEXT: LSHR T2.W, PS, literal.y,
+; EG-NEXT: ADD_INT * T1.W, T0.W, PV.W,
+; EG-NEXT: 31(4.344025e-44), 30(4.203895e-44)
+; EG-NEXT: AND_INT T1.Y, PS, literal.x,
+; EG-NEXT: ADD_INT T2.Z, T0.Z, PV.W,
+; EG-NEXT: ASHR T1.W, T0.X, literal.y,
+; EG-NEXT: LSHR * T2.W, PV.Z, literal.z,
+; EG-NEXT: -4(nan), 31(4.344025e-44)
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T2.Y, T0.Y, PS,
+; EG-NEXT: LSHR T1.Z, PV.W, literal.x,
+; EG-NEXT: AND_INT T1.W, PV.Z, literal.y,
+; EG-NEXT: SUB_INT * T0.W, T0.W, PV.Y,
+; EG-NEXT: 30(4.203895e-44), -4(nan)
+; EG-NEXT: SUB_INT T0.Z, T0.Z, PV.W,
+; EG-NEXT: ADD_INT T1.W, T0.X, PV.Z,
+; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x,
+; EG-NEXT: -4(nan), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T0.Y, T0.Y, PS,
+; EG-NEXT: AND_INT * T1.W, PV.W, literal.x,
+; EG-NEXT: -4(nan), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T0.X, T0.X, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%num = load <4 x i32>, ptr addrspace(1) %in
%result = srem <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4>
store <4 x i32> %result, ptr addrspace(1) %out
@@ -76,6 +1489,1095 @@ define amdgpu_kernel void @srem_v4i32_4(ptr addrspace(1) %out, ptr addrspace(1)
}
define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_i64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx4 v[0:3], v0, s[6:7]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_readfirstlane_b32 s9, v1
+; GCN-NEXT: v_readfirstlane_b32 s8, v0
+; GCN-NEXT: v_readfirstlane_b32 s7, v3
+; GCN-NEXT: v_readfirstlane_b32 s6, v2
+; GCN-NEXT: s_or_b64 s[0:1], s[8:9], s[6:7]
+; GCN-NEXT: s_mov_b32 s0, 0
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_cbranch_scc0 .LBB8_4
+; GCN-NEXT: ; %bb.1:
+; GCN-NEXT: s_ashr_i32 s0, s7, 31
+; GCN-NEXT: s_add_u32 s2, s6, s0
+; GCN-NEXT: s_mov_b32 s1, s0
+; GCN-NEXT: s_addc_u32 s3, s7, s0
+; GCN-NEXT: s_xor_b64 s[12:13], s[2:3], s[0:1]
+; GCN-NEXT: v_cvt_f32_u32_e32 v0, s12
+; GCN-NEXT: v_cvt_f32_u32_e32 v1, s13
+; GCN-NEXT: s_sub_u32 s0, 0, s12
+; GCN-NEXT: s_subb_u32 s1, 0, s13
+; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
+; GCN-NEXT: v_rcp_f32_e32 v0, v0
+; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT: v_trunc_f32_e32 v1, v1
+; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
+; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT: v_readfirstlane_b32 s2, v1
+; GCN-NEXT: v_readfirstlane_b32 s3, v0
+; GCN-NEXT: s_mul_i32 s7, s0, s2
+; GCN-NEXT: s_mul_hi_u32 s15, s0, s3
+; GCN-NEXT: s_mul_i32 s14, s1, s3
+; GCN-NEXT: s_add_i32 s7, s15, s7
+; GCN-NEXT: s_add_i32 s7, s7, s14
+; GCN-NEXT: s_mul_i32 s16, s0, s3
+; GCN-NEXT: s_mul_hi_u32 s14, s3, s7
+; GCN-NEXT: s_mul_i32 s15, s3, s7
+; GCN-NEXT: s_mul_hi_u32 s3, s3, s16
+; GCN-NEXT: s_add_u32 s3, s3, s15
+; GCN-NEXT: s_addc_u32 s14, 0, s14
+; GCN-NEXT: s_mul_hi_u32 s17, s2, s16
+; GCN-NEXT: s_mul_i32 s16, s2, s16
+; GCN-NEXT: s_add_u32 s3, s3, s16
+; GCN-NEXT: s_mul_hi_u32 s15, s2, s7
+; GCN-NEXT: s_addc_u32 s3, s14, s17
+; GCN-NEXT: s_addc_u32 s14, s15, 0
+; GCN-NEXT: s_mul_i32 s7, s2, s7
+; GCN-NEXT: s_add_u32 s3, s3, s7
+; GCN-NEXT: s_addc_u32 s7, 0, s14
+; GCN-NEXT: v_add_co_u32_e32 v0, vcc, s3, v0
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_addc_u32 s2, s2, s7
+; GCN-NEXT: v_readfirstlane_b32 s7, v0
+; GCN-NEXT: s_mul_i32 s3, s0, s2
+; GCN-NEXT: s_mul_hi_u32 s14, s0, s7
+; GCN-NEXT: s_add_i32 s3, s14, s3
+; GCN-NEXT: s_mul_i32 s1, s1, s7
+; GCN-NEXT: s_add_i32 s3, s3, s1
+; GCN-NEXT: s_mul_i32 s0, s0, s7
+; GCN-NEXT: s_mul_hi_u32 s14, s2, s0
+; GCN-NEXT: s_mul_i32 s15, s2, s0
+; GCN-NEXT: s_mul_i32 s17, s7, s3
+; GCN-NEXT: s_mul_hi_u32 s0, s7, s0
+; GCN-NEXT: s_mul_hi_u32 s16, s7, s3
+; GCN-NEXT: s_add_u32 s0, s0, s17
+; GCN-NEXT: s_addc_u32 s7, 0, s16
+; GCN-NEXT: s_add_u32 s0, s0, s15
+; GCN-NEXT: s_mul_hi_u32 s1, s2, s3
+; GCN-NEXT: s_addc_u32 s0, s7, s14
+; GCN-NEXT: s_addc_u32 s1, s1, 0
+; GCN-NEXT: s_mul_i32 s3, s2, s3
+; GCN-NEXT: s_add_u32 s0, s0, s3
+; GCN-NEXT: s_addc_u32 s1, 0, s1
+; GCN-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_addc_u32 s2, s2, s1
+; GCN-NEXT: s_ashr_i32 s14, s9, 31
+; GCN-NEXT: s_add_u32 s0, s8, s14
+; GCN-NEXT: s_mov_b32 s15, s14
+; GCN-NEXT: s_addc_u32 s1, s9, s14
+; GCN-NEXT: s_xor_b64 s[16:17], s[0:1], s[14:15]
+; GCN-NEXT: v_readfirstlane_b32 s3, v0
+; GCN-NEXT: s_mul_i32 s1, s16, s2
+; GCN-NEXT: s_mul_hi_u32 s7, s16, s3
+; GCN-NEXT: s_mul_hi_u32 s0, s16, s2
+; GCN-NEXT: s_add_u32 s1, s7, s1
+; GCN-NEXT: s_addc_u32 s0, 0, s0
+; GCN-NEXT: s_mul_hi_u32 s9, s17, s3
+; GCN-NEXT: s_mul_i32 s3, s17, s3
+; GCN-NEXT: s_add_u32 s1, s1, s3
+; GCN-NEXT: s_mul_hi_u32 s7, s17, s2
+; GCN-NEXT: s_addc_u32 s0, s0, s9
+; GCN-NEXT: s_addc_u32 s1, s7, 0
+; GCN-NEXT: s_mul_i32 s2, s17, s2
+; GCN-NEXT: s_add_u32 s0, s0, s2
+; GCN-NEXT: s_addc_u32 s1, 0, s1
+; GCN-NEXT: s_mul_i32 s1, s12, s1
+; GCN-NEXT: s_mul_hi_u32 s2, s12, s0
+; GCN-NEXT: s_add_i32 s1, s2, s1
+; GCN-NEXT: s_mul_i32 s2, s13, s0
+; GCN-NEXT: s_mul_i32 s0, s12, s0
+; GCN-NEXT: s_add_i32 s7, s1, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_sub_i32 s1, s17, s7
+; GCN-NEXT: v_sub_co_u32_e32 v0, vcc, s16, v0
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_subb_u32 s9, s1, s13
+; GCN-NEXT: v_subrev_co_u32_e64 v1, s[0:1], s12, v0
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_subb_u32 s15, s9, 0
+; GCN-NEXT: s_cmp_ge_u32 s15, s13
+; GCN-NEXT: s_cselect_b32 s16, -1, 0
+; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s12, v1
+; GCN-NEXT: s_cmp_eq_u32 s15, s13
+; GCN-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[2:3]
+; GCN-NEXT: v_mov_b32_e32 v3, s16
+; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: v_cndmask_b32_e64 v2, v3, v2, s[2:3]
+; GCN-NEXT: s_subb_u32 s2, s9, s13
+; GCN-NEXT: v_subrev_co_u32_e64 v3, s[0:1], s12, v1
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_subb_u32 s2, s2, 0
+; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1]
+; GCN-NEXT: v_mov_b32_e32 v2, s15
+; GCN-NEXT: v_mov_b32_e32 v3, s2
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1]
+; GCN-NEXT: s_subb_u32 s0, s17, s7
+; GCN-NEXT: s_cmp_ge_u32 s0, s13
+; GCN-NEXT: s_cselect_b32 s1, -1, 0
+; GCN-NEXT: v_cmp_le_u32_e32 vcc, s12, v0
+; GCN-NEXT: s_cmp_eq_u32 s0, s13
+; GCN-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc
+; GCN-NEXT: v_mov_b32_e32 v4, s1
+; GCN-NEXT: s_cselect_b64 vcc, -1, 0
+; GCN-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
+; GCN-NEXT: v_mov_b32_e32 v4, s0
+; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GCN-NEXT: v_xor_b32_e32 v0, s14, v0
+; GCN-NEXT: v_xor_b32_e32 v1, s14, v2
+; GCN-NEXT: v_mov_b32_e32 v2, s14
+; GCN-NEXT: v_subrev_co_u32_e32 v0, vcc, s14, v0
+; GCN-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-NEXT: s_cbranch_execnz .LBB8_3
+; GCN-NEXT: .LBB8_2:
+; GCN-NEXT: v_cvt_f32_u32_e32 v0, s6
+; GCN-NEXT: s_sub_i32 s0, 0, s6
+; GCN-NEXT: s_mov_b32 s1, 0
+; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT: v_readfirstlane_b32 s2, v0
+; GCN-NEXT: s_mul_i32 s0, s0, s2
+; GCN-NEXT: s_mul_hi_u32 s0, s2, s0
+; GCN-NEXT: s_add_i32 s2, s2, s0
+; GCN-NEXT: s_mul_hi_u32 s0, s8, s2
+; GCN-NEXT: s_mul_i32 s0, s0, s6
+; GCN-NEXT: s_sub_i32 s0, s8, s0
+; GCN-NEXT: s_sub_i32 s2, s0, s6
+; GCN-NEXT: s_cmp_ge_u32 s0, s6
+; GCN-NEXT: s_cselect_b32 s0, s2, s0
+; GCN-NEXT: s_sub_i32 s2, s0, s6
+; GCN-NEXT: s_cmp_ge_u32 s0, s6
+; GCN-NEXT: s_cselect_b32 s0, s2, s0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: .LBB8_3:
+; GCN-NEXT: v_mov_b32_e32 v2, 0
+; GCN-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
+; GCN-NEXT: s_endpgm
+; GCN-NEXT: .LBB8_4:
+; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN-NEXT: s_branch .LBB8_2
+;
+; TAHITI-LABEL: srem_i64:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s3, 0xf000
+; TAHITI-NEXT: s_mov_b32 s2, -1
+; TAHITI-NEXT: v_mov_b32_e32 v4, 0
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s0, s6
+; TAHITI-NEXT: s_mov_b32 s1, s7
+; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_or_b32_e32 v5, v1, v3
+; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; TAHITI-NEXT: s_cbranch_vccz .LBB8_4
+; TAHITI-NEXT: ; %bb.1:
+; TAHITI-NEXT: v_ashrrev_i32_e32 v5, 31, v3
+; TAHITI-NEXT: v_add_i32_e32 v4, vcc, v2, v5
+; TAHITI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v4, v4, v5
+; TAHITI-NEXT: v_xor_b32_e32 v3, v3, v5
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v5, v4
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v6, v3
+; TAHITI-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
+; TAHITI-NEXT: v_subb_u32_e32 v8, vcc, 0, v3, vcc
+; TAHITI-NEXT: v_madmk_f32 v5, v6, 0x4f800000, v5
+; TAHITI-NEXT: v_rcp_f32_e32 v5, v5
+; TAHITI-NEXT: v_mul_f32_e32 v5, 0x5f7ffffc, v5
+; TAHITI-NEXT: v_mul_f32_e32 v6, 0x2f800000, v5
+; TAHITI-NEXT: v_trunc_f32_e32 v6, v6
+; TAHITI-NEXT: v_madmk_f32 v5, v6, 0xcf800000, v5
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v6, v6
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v5, v5
+; TAHITI-NEXT: v_mul_lo_u32 v10, v7, v6
+; TAHITI-NEXT: v_mul_hi_u32 v9, v7, v5
+; TAHITI-NEXT: v_mul_lo_u32 v11, v8, v5
+; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v10, v9
+; TAHITI-NEXT: v_mul_lo_u32 v10, v7, v5
+; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v9, v11
+; TAHITI-NEXT: v_mul_lo_u32 v11, v5, v9
+; TAHITI-NEXT: v_mul_hi_u32 v12, v5, v10
+; TAHITI-NEXT: v_mul_hi_u32 v13, v5, v9
+; TAHITI-NEXT: v_mul_hi_u32 v14, v6, v9
+; TAHITI-NEXT: v_mul_lo_u32 v9, v6, v9
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v12, v11
+; TAHITI-NEXT: v_addc_u32_e32 v12, vcc, 0, v13, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v13, v6, v10
+; TAHITI-NEXT: v_mul_hi_u32 v10, v6, v10
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v11, v13
+; TAHITI-NEXT: v_addc_u32_e32 v10, vcc, v12, v10, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v11, vcc, 0, v14, vcc
+; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v10, v9
+; TAHITI-NEXT: v_addc_u32_e32 v10, vcc, 0, v11, vcc
+; TAHITI-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; TAHITI-NEXT: v_addc_u32_e32 v6, vcc, v6, v10, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v9, v7, v6
+; TAHITI-NEXT: v_mul_hi_u32 v10, v7, v5
+; TAHITI-NEXT: v_mul_lo_u32 v8, v8, v5
+; TAHITI-NEXT: v_mul_lo_u32 v7, v7, v5
+; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v9, v10
+; TAHITI-NEXT: v_add_i32_e32 v8, vcc, v9, v8
+; TAHITI-NEXT: v_mul_lo_u32 v11, v5, v8
+; TAHITI-NEXT: v_mul_hi_u32 v12, v5, v7
+; TAHITI-NEXT: v_mul_hi_u32 v13, v5, v8
+; TAHITI-NEXT: v_mul_hi_u32 v10, v6, v7
+; TAHITI-NEXT: v_mul_lo_u32 v7, v6, v7
+; TAHITI-NEXT: v_mul_hi_u32 v9, v6, v8
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v12, v11
+; TAHITI-NEXT: v_addc_u32_e32 v12, vcc, 0, v13, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v8, v6, v8
+; TAHITI-NEXT: v_add_i32_e32 v7, vcc, v11, v7
+; TAHITI-NEXT: v_addc_u32_e32 v7, vcc, v12, v10, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; TAHITI-NEXT: v_add_i32_e32 v7, vcc, v7, v8
+; TAHITI-NEXT: v_addc_u32_e32 v8, vcc, 0, v9, vcc
+; TAHITI-NEXT: v_add_i32_e32 v5, vcc, v5, v7
+; TAHITI-NEXT: v_addc_u32_e32 v6, vcc, v6, v8, vcc
+; TAHITI-NEXT: v_ashrrev_i32_e32 v7, 31, v1
+; TAHITI-NEXT: v_add_i32_e32 v8, vcc, v0, v7
+; TAHITI-NEXT: v_xor_b32_e32 v8, v8, v7
+; TAHITI-NEXT: v_mul_lo_u32 v9, v8, v6
+; TAHITI-NEXT: v_mul_hi_u32 v10, v8, v5
+; TAHITI-NEXT: v_mul_hi_u32 v11, v8, v6
+; TAHITI-NEXT: v_addc_u32_e32 v1, vcc, v1, v7, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v1, v1, v7
+; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v10, v9
+; TAHITI-NEXT: v_addc_u32_e32 v10, vcc, 0, v11, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v11, v1, v5
+; TAHITI-NEXT: v_mul_hi_u32 v5, v1, v5
+; TAHITI-NEXT: v_mul_hi_u32 v12, v1, v6
+; TAHITI-NEXT: v_mul_lo_u32 v6, v1, v6
+; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v9, v11
+; TAHITI-NEXT: v_addc_u32_e32 v5, vcc, v10, v5, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v9, vcc, 0, v12, vcc
+; TAHITI-NEXT: v_add_i32_e32 v5, vcc, v5, v6
+; TAHITI-NEXT: v_addc_u32_e32 v6, vcc, 0, v9, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v6, v4, v6
+; TAHITI-NEXT: v_mul_hi_u32 v9, v4, v5
+; TAHITI-NEXT: v_mul_lo_u32 v10, v3, v5
+; TAHITI-NEXT: v_mul_lo_u32 v5, v4, v5
+; TAHITI-NEXT: v_add_i32_e32 v6, vcc, v6, v9
+; TAHITI-NEXT: v_add_i32_e32 v6, vcc, v10, v6
+; TAHITI-NEXT: v_sub_i32_e32 v9, vcc, v1, v6
+; TAHITI-NEXT: v_sub_i32_e32 v5, vcc, v8, v5
+; TAHITI-NEXT: v_subb_u32_e64 v8, s[0:1], v9, v3, vcc
+; TAHITI-NEXT: v_sub_i32_e64 v9, s[0:1], v5, v4
+; TAHITI-NEXT: v_subbrev_u32_e64 v10, s[2:3], 0, v8, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v10, v3
+; TAHITI-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v9, v4
+; TAHITI-NEXT: v_subb_u32_e32 v1, vcc, v1, v6, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_eq_u32_e64 s[2:3], v10, v3
+; TAHITI-NEXT: v_subb_u32_e64 v8, s[0:1], v8, v3, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
+; TAHITI-NEXT: v_cndmask_b32_e64 v11, v11, v12, s[2:3]
+; TAHITI-NEXT: v_sub_i32_e64 v12, s[0:1], v9, v4
+; TAHITI-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v5, v4
+; TAHITI-NEXT: v_subbrev_u32_e64 v8, s[0:1], 0, v8, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
+; TAHITI-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v11
+; TAHITI-NEXT: v_cndmask_b32_e32 v3, v6, v4, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v9, v9, v12, s[0:1]
+; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
+; TAHITI-NEXT: v_cndmask_b32_e64 v8, v10, v8, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e32 v3, v5, v9, vcc
+; TAHITI-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v3, v3, v7
+; TAHITI-NEXT: v_xor_b32_e32 v1, v1, v7
+; TAHITI-NEXT: v_sub_i32_e32 v3, vcc, v3, v7
+; TAHITI-NEXT: v_subb_u32_e32 v4, vcc, v1, v7, vcc
+; TAHITI-NEXT: s_cbranch_execnz .LBB8_3
+; TAHITI-NEXT: .LBB8_2:
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v1, v2
+; TAHITI-NEXT: v_sub_i32_e32 v3, vcc, 0, v2
+; TAHITI-NEXT: v_mov_b32_e32 v4, 0
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; TAHITI-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v1, v1
+; TAHITI-NEXT: v_mul_lo_u32 v3, v3, v1
+; TAHITI-NEXT: v_mul_hi_u32 v3, v1, v3
+; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; TAHITI-NEXT: v_mul_hi_u32 v1, v0, v1
+; TAHITI-NEXT: v_mul_lo_u32 v1, v1, v2
+; TAHITI-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; TAHITI-NEXT: v_subrev_i32_e32 v1, vcc, v2, v0
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TAHITI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, v0, v2
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TAHITI-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
+; TAHITI-NEXT: .LBB8_3:
+; TAHITI-NEXT: s_mov_b32 s7, 0xf000
+; TAHITI-NEXT: s_mov_b32 s6, -1
+; TAHITI-NEXT: buffer_store_dwordx2 v[3:4], off, s[4:7], 0
+; TAHITI-NEXT: s_endpgm
+; TAHITI-NEXT: .LBB8_4:
+; TAHITI-NEXT: ; implicit-def: $vgpr3_vgpr4
+; TAHITI-NEXT: s_branch .LBB8_2
+;
+; TONGA-LABEL: srem_i64:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; TONGA-NEXT: v_mov_b32_e32 v4, 0
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: v_mov_b32_e32 v0, s6
+; TONGA-NEXT: v_mov_b32_e32 v1, s7
+; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_or_b32_e32 v5, v1, v3
+; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; TONGA-NEXT: s_cbranch_vccz .LBB8_4
+; TONGA-NEXT: ; %bb.1:
+; TONGA-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; TONGA-NEXT: v_add_u32_e32 v5, vcc, v2, v4
+; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v3, v4, vcc
+; TONGA-NEXT: v_xor_b32_e32 v9, v5, v4
+; TONGA-NEXT: v_xor_b32_e32 v10, v3, v4
+; TONGA-NEXT: v_cvt_f32_u32_e32 v3, v9
+; TONGA-NEXT: v_cvt_f32_u32_e32 v4, v10
+; TONGA-NEXT: v_sub_u32_e32 v11, vcc, 0, v9
+; TONGA-NEXT: v_subb_u32_e32 v12, vcc, 0, v10, vcc
+; TONGA-NEXT: v_madmk_f32 v3, v4, 0x4f800000, v3
+; TONGA-NEXT: v_rcp_f32_e32 v3, v3
+; TONGA-NEXT: v_mul_f32_e32 v3, 0x5f7ffffc, v3
+; TONGA-NEXT: v_mul_f32_e32 v4, 0x2f800000, v3
+; TONGA-NEXT: v_trunc_f32_e32 v4, v4
+; TONGA-NEXT: v_madmk_f32 v3, v4, 0xcf800000, v3
+; TONGA-NEXT: v_cvt_u32_f32_e32 v7, v4
+; TONGA-NEXT: v_cvt_u32_f32_e32 v8, v3
+; TONGA-NEXT: v_mul_lo_u32 v5, v11, v7
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v11, v8, 0
+; TONGA-NEXT: v_mul_lo_u32 v6, v12, v8
+; TONGA-NEXT: v_add_u32_e32 v4, vcc, v4, v5
+; TONGA-NEXT: v_add_u32_e32 v6, vcc, v4, v6
+; TONGA-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v8, v6, 0
+; TONGA-NEXT: v_mul_hi_u32 v13, v8, v3
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v13, v4
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v7, v3, 0
+; TONGA-NEXT: v_addc_u32_e32 v14, vcc, 0, v5, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[5:6], s[0:1], v7, v6, 0
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v13, v3
+; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v14, v4, vcc
+; TONGA-NEXT: v_addc_u32_e32 v4, vcc, 0, v6, vcc
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v3, v5
+; TONGA-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v8, v3
+; TONGA-NEXT: v_addc_u32_e32 v14, vcc, v7, v4, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v11, v13, 0
+; TONGA-NEXT: v_mul_lo_u32 v7, v11, v14
+; TONGA-NEXT: v_mul_lo_u32 v8, v12, v13
+; TONGA-NEXT: v_mul_hi_u32 v11, v13, v3
+; TONGA-NEXT: v_mad_u64_u32 v[5:6], s[0:1], v14, v3, 0
+; TONGA-NEXT: v_add_u32_e32 v4, vcc, v7, v4
+; TONGA-NEXT: v_add_u32_e32 v4, vcc, v4, v8
+; TONGA-NEXT: v_mad_u64_u32 v[7:8], s[0:1], v13, v4, 0
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v14, v4, 0
+; TONGA-NEXT: v_add_u32_e32 v7, vcc, v11, v7
+; TONGA-NEXT: v_addc_u32_e32 v8, vcc, 0, v8, vcc
+; TONGA-NEXT: v_add_u32_e32 v5, vcc, v7, v5
+; TONGA-NEXT: v_addc_u32_e32 v5, vcc, v8, v6, vcc
+; TONGA-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v5, v3
+; TONGA-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; TONGA-NEXT: v_add_u32_e32 v5, vcc, v13, v3
+; TONGA-NEXT: v_addc_u32_e32 v6, vcc, v14, v4, vcc
+; TONGA-NEXT: v_ashrrev_i32_e32 v7, 31, v1
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v0, v7
+; TONGA-NEXT: v_xor_b32_e32 v8, v3, v7
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v8, v6, 0
+; TONGA-NEXT: v_mul_hi_u32 v11, v8, v5
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, v1, v7, vcc
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v7
+; TONGA-NEXT: v_add_u32_e32 v11, vcc, v11, v3
+; TONGA-NEXT: v_addc_u32_e32 v12, vcc, 0, v4, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v1, v5, 0
+; TONGA-NEXT: v_mad_u64_u32 v[5:6], s[0:1], v1, v6, 0
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v11, v3
+; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v12, v4, vcc
+; TONGA-NEXT: v_addc_u32_e32 v4, vcc, 0, v6, vcc
+; TONGA-NEXT: v_add_u32_e32 v5, vcc, v3, v5
+; TONGA-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
+; TONGA-NEXT: v_mul_lo_u32 v6, v9, v3
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v9, v5, 0
+; TONGA-NEXT: v_mul_lo_u32 v5, v10, v5
+; TONGA-NEXT: v_add_u32_e32 v4, vcc, v6, v4
+; TONGA-NEXT: v_add_u32_e32 v4, vcc, v5, v4
+; TONGA-NEXT: v_sub_u32_e32 v5, vcc, v1, v4
+; TONGA-NEXT: v_sub_u32_e32 v3, vcc, v8, v3
+; TONGA-NEXT: v_subb_u32_e64 v5, s[0:1], v5, v10, vcc
+; TONGA-NEXT: v_sub_u32_e64 v6, s[0:1], v3, v9
+; TONGA-NEXT: v_subbrev_u32_e64 v8, s[2:3], 0, v5, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v8, v10
+; TONGA-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v6, v9
+; TONGA-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v8, v10
+; TONGA-NEXT: v_subb_u32_e64 v5, s[0:1], v5, v10, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e64 v11, v11, v12, s[2:3]
+; TONGA-NEXT: v_sub_u32_e64 v12, s[0:1], v6, v9
+; TONGA-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc
+; TONGA-NEXT: v_subbrev_u32_e64 v5, s[0:1], 0, v5, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v10
+; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v11
+; TONGA-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v3, v9
+; TONGA-NEXT: v_cndmask_b32_e64 v5, v8, v5, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
+; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v1, v10
+; TONGA-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc
+; TONGA-NEXT: v_cndmask_b32_e64 v6, v6, v12, s[0:1]
+; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; TONGA-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc
+; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; TONGA-NEXT: v_xor_b32_e32 v3, v3, v7
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v7
+; TONGA-NEXT: v_sub_u32_e32 v3, vcc, v3, v7
+; TONGA-NEXT: v_subb_u32_e32 v4, vcc, v1, v7, vcc
+; TONGA-NEXT: s_cbranch_execnz .LBB8_3
+; TONGA-NEXT: .LBB8_2:
+; TONGA-NEXT: v_cvt_f32_u32_e32 v1, v2
+; TONGA-NEXT: v_sub_u32_e32 v3, vcc, 0, v2
+; TONGA-NEXT: v_mov_b32_e32 v4, 0
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; TONGA-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; TONGA-NEXT: v_cvt_u32_f32_e32 v1, v1
+; TONGA-NEXT: v_mul_lo_u32 v3, v3, v1
+; TONGA-NEXT: v_mul_hi_u32 v3, v1, v3
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v3
+; TONGA-NEXT: v_mul_hi_u32 v1, v0, v1
+; TONGA-NEXT: v_mul_lo_u32 v1, v1, v2
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v0, v2
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TONGA-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
+; TONGA-NEXT: .LBB8_3:
+; TONGA-NEXT: v_mov_b32_e32 v0, s4
+; TONGA-NEXT: v_mov_b32_e32 v1, s5
+; TONGA-NEXT: flat_store_dwordx2 v[0:1], v[3:4]
+; TONGA-NEXT: s_endpgm
+; TONGA-NEXT: .LBB8_4:
+; TONGA-NEXT: ; implicit-def: $vgpr3_vgpr4
+; TONGA-NEXT: s_branch .LBB8_2
+;
+; EG-LABEL: srem_i64:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @14, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @12
+; EG-NEXT: ALU_PUSH_BEFORE 4, @15, KC0[], KC1[]
+; EG-NEXT: JUMP @9 POP:1
+; EG-NEXT: ALU 114, @20, KC0[], KC1[]
+; EG-NEXT: ALU 115, @135, KC0[], KC1[]
+; EG-NEXT: ALU 115, @251, KC0[], KC1[]
+; EG-NEXT: ALU 115, @367, KC0[], KC1[]
+; EG-NEXT: ALU_POP_AFTER 82, @483, KC0[], KC1[]
+; EG-NEXT: ALU 20, @566, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.XY, T0.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: Fetch clause starting at 12:
+; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 14:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 15:
+; EG-NEXT: OR_INT T2.W, T0.Y, T0.W,
+; EG-NEXT: MOV * T1.W, literal.x,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: SETNE_INT * T2.W, PV.W, 0.0,
+; EG-NEXT: PRED_SETNE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+; EG-NEXT: ALU clause starting at 20:
+; EG-NEXT: ASHR * T3.W, T0.W, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T1.W, T0.Z, PV.W,
+; EG-NEXT: XOR_INT * T2.W, PV.W, T3.W,
+; EG-NEXT: SUB_INT T1.Z, 0.0, PV.W,
+; EG-NEXT: ASHR T1.W, T0.Y, literal.x,
+; EG-NEXT: RECIP_UINT * T1.X, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T2.Z, T0.Y, PV.W,
+; EG-NEXT: ADDC_UINT T4.W, T0.X, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PV.Z, PS,
+; EG-NEXT: ADD_INT T4.W, PV.Z, PV.W,
+; EG-NEXT: MULHI * T0.Y, T1.X, PS,
+; EG-NEXT: ADD_INT T5.W, T1.X, PS,
+; EG-NEXT: XOR_INT * T4.W, PV.W, T1.W,
+; EG-NEXT: MULHI * T0.Y, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T2.W,
+; EG-NEXT: SUB_INT * T5.W, T4.W, PS,
+; EG-NEXT: SETGE_UINT T6.W, PV.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, PV.W, T2.W,
+; EG-NEXT: CNDE_INT T1.Z, PV.W, T5.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: ADD_INT T0.W, T0.W, T3.W,
+; EG-NEXT: ADDC_UINT * T5.W, T0.Z, T3.W,
+; EG-NEXT: ADD_INT T2.Z, PV.W, PS,
+; EG-NEXT: SETGE_UINT T0.W, PV.Z, T2.W,
+; EG-NEXT: SUB_INT * T5.W, PV.Z, T2.W,
+; EG-NEXT: ADD_INT T3.Z, T0.X, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T5.W, PV.W, T1.Z, PS,
+; EG-NEXT: XOR_INT * T0.W, PV.Z, T3.W,
+; EG-NEXT: CNDE_INT T4.W, PS, PV.W, T4.W,
+; EG-NEXT: XOR_INT * T3.W, PV.Z, T1.W,
+; EG-NEXT: BIT_ALIGN_INT T5.W, PV.W, PS, literal.x,
+; EG-NEXT: LSHR * T4.W, PV.W, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T1.Z, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.W, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T2.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.Z, PV.W, PS,
+; EG-NEXT: SUB_INT * T1.Z, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T6.W, T4.W, T0.W,
+; EG-NEXT: SUBB_UINT * T7.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT T6.W, T6.W, PV.W,
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 29(4.063766e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 28(3.923636e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 27(3.783506e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 26(3.643376e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT * T1.Z, PS, T2.W,
+; EG-NEXT: ALU clause starting at 135:
+; EG-NEXT: SETE_INT T6.W, T4.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, T4.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, T1.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 25(3.503246e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 22(3.082857e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 21(2.942727e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 20(2.802597e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 19(2.662467e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T1.Z, T5.W, T2.W,
+; EG-NEXT: ALU clause starting at 251:
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 18(2.522337e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 17(2.382207e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 15(2.101948e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 14(1.961818e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 13(1.821688e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 12(1.681558e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: ALU clause starting at 367:
+; EG-NEXT: LSHL T1.Z, T5.W, 1,
+; EG-NEXT: BFE_UINT * T7.W, T3.W, literal.x, 1, BS:VEC_120/SCL_212
+; EG-NEXT: 11(1.541428e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T6.W,
+; EG-NEXT: BIT_ALIGN_INT T4.W, PV.W, T5.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T5.W, T1.Z, T7.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 10(1.401298e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 6(8.407791e-45), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 5(7.006492e-45), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00)
+; EG-NEXT: ALU clause starting at 483:
+; EG-NEXT: BIT_ALIGN_INT T4.W, T4.W, T5.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T5.W, T1.Z, T7.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: BFE_UINT T7.W, T3.W, 1, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T5.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.Z, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.W, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT * T7.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T5.W, T2.W,
+; EG-NEXT: SUBB_UINT * T6.W, T5.W, T2.W,
+; EG-NEXT: SUB_INT * T7.W, T4.W, T0.W,
+; EG-NEXT: SUB_INT T6.W, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T5.W, T0.Y, T5.W, T1.Z,
+; EG-NEXT: LSHL T1.Z, PS, 1,
+; EG-NEXT: AND_INT T3.W, T3.W, 1,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, PV.W,
+; EG-NEXT: BIT_ALIGN_INT T4.W, PS, T5.W, literal.x,
+; EG-NEXT: OR_INT * T3.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.X, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T0.Y, PV.W, T0.W,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T0.W,
+; EG-NEXT: SUBB_UINT T5.W, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT T2.Z, T3.W, T2.W,
+; EG-NEXT: SUB_INT T0.W, PS, PV.W,
+; EG-NEXT: CNDE_INT * T2.W, PV.Y, PV.Z, PV.X,
+; EG-NEXT: CNDE_INT T0.W, PS, T4.W, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T2.W, PS, T3.W, PV.Z,
+; EG-NEXT: XOR_INT T2.W, PS, T1.W,
+; EG-NEXT: XOR_INT * T0.W, PV.W, T1.W,
+; EG-NEXT: SUB_INT T0.W, PS, T1.W,
+; EG-NEXT: SUBB_UINT * T3.W, PV.W, T1.W,
+; EG-NEXT: SUB_INT * T1.Y, PV.W, PS,
+; EG-NEXT: SUB_INT T1.X, T2.W, T1.W,
+; EG-NEXT: MOV * T1.W, literal.x,
+; EG-NEXT: 0(0.000000e+00), 0(0.000000e+00)
+; EG-NEXT: ALU clause starting at 566:
+; EG-NEXT: MOV T0.W, KC0[2].Y,
+; EG-NEXT: SETE_INT * T1.W, T1.W, 0.0,
+; EG-NEXT: PRED_SETNE_INT * Pred,PredicateBit (MASKED), PS, 0.0,
+; EG-NEXT: SUB_INT T1.W, 0.0, T0.Z, Pred_sel_zero
+; EG-NEXT: RECIP_UINT * T0.Y, T0.Z, Pred_sel_zero
+; EG-NEXT: MULLO_INT * T1.X, T1.W, T0.Y, Pred_sel_zero
+; EG-NEXT: MULHI * T1.X, T0.Y, T1.X, Pred_sel_zero
+; EG-NEXT: ADD_INT * T1.W, T0.Y, T1.X, Pred_sel_zero
+; EG-NEXT: MULHI * T0.Y, T0.X, T1.W, Pred_sel_zero
+; EG-NEXT: MULLO_INT * T0.Y, T0.Y, T0.Z, Pred_sel_zero
+; EG-NEXT: SUB_INT * T1.W, T0.X, T0.Y, Pred_sel_zero
+; EG-NEXT: SETGE_UINT T2.W, T1.W, T0.Z, Pred_sel_zero
+; EG-NEXT: SUB_INT * T3.W, T1.W, T0.Z, Pred_sel_zero
+; EG-NEXT: CNDE_INT * T1.W, T2.W, T1.W, T3.W, Pred_sel_zero
+; EG-NEXT: SETGE_UINT T2.W, T1.W, T0.Z, Pred_sel_zero
+; EG-NEXT: SUB_INT * T3.W, T1.W, T0.Z, Pred_sel_zero
+; EG-NEXT: CNDE_INT T1.X, T2.W, T1.W, T3.W, Pred_sel_zero
+; EG-NEXT: MOV * T1.Y, literal.x, Pred_sel_zero
+; EG-NEXT: 0(0.000000e+00), 0(0.000000e+00)
+; EG-NEXT: LSHR * T0.X, T0.W, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i64, ptr addrspace(1) %in, i64 1
%num = load i64, ptr addrspace(1) %in
%den = load i64, ptr addrspace(1) %den_ptr
@@ -85,6 +2587,94 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
}
define amdgpu_kernel void @srem_i64_4(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_i64_4:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v2, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx2 v[0:1], v2, s[2:3]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_ashrrev_i32_e32 v3, 31, v1
+; GCN-NEXT: v_lshrrev_b32_e32 v3, 30, v3
+; GCN-NEXT: v_add_co_u32_e32 v3, vcc, v0, v3
+; GCN-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v1, vcc
+; GCN-NEXT: v_and_b32_e32 v3, -4, v3
+; GCN-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v3
+; GCN-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v4, vcc
+; GCN-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GCN-NEXT: s_endpgm
+;
+; TAHITI-LABEL: srem_i64_4:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s7, 0xf000
+; TAHITI-NEXT: s_mov_b32 s6, -1
+; TAHITI-NEXT: s_mov_b32 s10, s6
+; TAHITI-NEXT: s_mov_b32 s11, s7
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s8, s2
+; TAHITI-NEXT: s_mov_b32 s9, s3
+; TAHITI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; TAHITI-NEXT: s_mov_b32 s4, s0
+; TAHITI-NEXT: s_mov_b32 s5, s1
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; TAHITI-NEXT: v_lshrrev_b32_e32 v2, 30, v2
+; TAHITI-NEXT: v_add_i32_e32 v2, vcc, v0, v2
+; TAHITI-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
+; TAHITI-NEXT: v_and_b32_e32 v2, -4, v2
+; TAHITI-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; TAHITI-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; TAHITI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; TAHITI-NEXT: s_endpgm
+;
+; TONGA-LABEL: srem_i64_4:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; TONGA-NEXT: v_mov_b32_e32 v2, s0
+; TONGA-NEXT: v_mov_b32_e32 v3, s1
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; TONGA-NEXT: v_lshrrev_b32_e32 v4, 30, v4
+; TONGA-NEXT: v_add_u32_e32 v4, vcc, v0, v4
+; TONGA-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; TONGA-NEXT: v_and_b32_e32 v4, -4, v4
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v4
+; TONGA-NEXT: v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; TONGA-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; TONGA-NEXT: s_endpgm
+;
+; EG-LABEL: srem_i64_4:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 13, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: ASHR * T0.W, T0.Y, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: LSHR * T0.W, PV.W, literal.x,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T1.W, T0.X, PV.W,
+; EG-NEXT: AND_INT T1.W, PV.W, literal.x,
+; EG-NEXT: ADDC_UINT * T0.W, T0.X, T0.W,
+; EG-NEXT: -4(nan), 0(0.000000e+00)
+; EG-NEXT: BFE_INT T0.W, PS, 0.0, 1,
+; EG-NEXT: SUBB_UINT * T2.W, T0.X, PV.W,
+; EG-NEXT: SUB_INT * T0.Y, PV.W, PS,
+; EG-NEXT: SUB_INT T0.X, T0.X, T1.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%num = load i64, ptr addrspace(1) %in
%result = srem i64 %num, 4
store i64 %result, ptr addrspace(1) %out
@@ -92,6 +2682,2046 @@ define amdgpu_kernel void @srem_i64_4(ptr addrspace(1) %out, ptr addrspace(1) %i
}
define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_v2i64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v8, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx4 v[0:3], v8, s[6:7] offset:16
+; GCN-NEXT: global_load_dwordx4 v[4:7], v8, s[6:7]
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_readfirstlane_b32 s11, v1
+; GCN-NEXT: v_readfirstlane_b32 s10, v0
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_readfirstlane_b32 s13, v5
+; GCN-NEXT: v_readfirstlane_b32 s12, v4
+; GCN-NEXT: s_or_b64 s[0:1], s[12:13], s[10:11]
+; GCN-NEXT: s_mov_b32 s0, 0
+; GCN-NEXT: v_readfirstlane_b32 s7, v3
+; GCN-NEXT: v_readfirstlane_b32 s6, v2
+; GCN-NEXT: v_readfirstlane_b32 s9, v7
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: v_readfirstlane_b32 s8, v6
+; GCN-NEXT: s_cbranch_scc0 .LBB10_7
+; GCN-NEXT: ; %bb.1:
+; GCN-NEXT: s_ashr_i32 s0, s11, 31
+; GCN-NEXT: s_add_u32 s2, s10, s0
+; GCN-NEXT: s_mov_b32 s1, s0
+; GCN-NEXT: s_addc_u32 s3, s11, s0
+; GCN-NEXT: s_xor_b64 s[16:17], s[2:3], s[0:1]
+; GCN-NEXT: v_cvt_f32_u32_e32 v0, s16
+; GCN-NEXT: v_cvt_f32_u32_e32 v1, s17
+; GCN-NEXT: s_sub_u32 s0, 0, s16
+; GCN-NEXT: s_subb_u32 s1, 0, s17
+; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
+; GCN-NEXT: v_rcp_f32_e32 v0, v0
+; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT: v_trunc_f32_e32 v1, v1
+; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
+; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT: v_readfirstlane_b32 s2, v1
+; GCN-NEXT: v_readfirstlane_b32 s3, v0
+; GCN-NEXT: s_mul_i32 s11, s0, s2
+; GCN-NEXT: s_mul_hi_u32 s19, s0, s3
+; GCN-NEXT: s_mul_i32 s18, s1, s3
+; GCN-NEXT: s_add_i32 s11, s19, s11
+; GCN-NEXT: s_add_i32 s11, s11, s18
+; GCN-NEXT: s_mul_i32 s20, s0, s3
+; GCN-NEXT: s_mul_hi_u32 s18, s3, s11
+; GCN-NEXT: s_mul_i32 s19, s3, s11
+; GCN-NEXT: s_mul_hi_u32 s3, s3, s20
+; GCN-NEXT: s_add_u32 s3, s3, s19
+; GCN-NEXT: s_addc_u32 s18, 0, s18
+; GCN-NEXT: s_mul_hi_u32 s21, s2, s20
+; GCN-NEXT: s_mul_i32 s20, s2, s20
+; GCN-NEXT: s_add_u32 s3, s3, s20
+; GCN-NEXT: s_mul_hi_u32 s19, s2, s11
+; GCN-NEXT: s_addc_u32 s3, s18, s21
+; GCN-NEXT: s_addc_u32 s18, s19, 0
+; GCN-NEXT: s_mul_i32 s11, s2, s11
+; GCN-NEXT: s_add_u32 s3, s3, s11
+; GCN-NEXT: s_addc_u32 s11, 0, s18
+; GCN-NEXT: v_add_co_u32_e32 v0, vcc, s3, v0
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_addc_u32 s2, s2, s11
+; GCN-NEXT: v_readfirstlane_b32 s11, v0
+; GCN-NEXT: s_mul_i32 s3, s0, s2
+; GCN-NEXT: s_mul_hi_u32 s18, s0, s11
+; GCN-NEXT: s_add_i32 s3, s18, s3
+; GCN-NEXT: s_mul_i32 s1, s1, s11
+; GCN-NEXT: s_add_i32 s3, s3, s1
+; GCN-NEXT: s_mul_i32 s0, s0, s11
+; GCN-NEXT: s_mul_hi_u32 s18, s2, s0
+; GCN-NEXT: s_mul_i32 s19, s2, s0
+; GCN-NEXT: s_mul_i32 s21, s11, s3
+; GCN-NEXT: s_mul_hi_u32 s0, s11, s0
+; GCN-NEXT: s_mul_hi_u32 s20, s11, s3
+; GCN-NEXT: s_add_u32 s0, s0, s21
+; GCN-NEXT: s_addc_u32 s11, 0, s20
+; GCN-NEXT: s_add_u32 s0, s0, s19
+; GCN-NEXT: s_mul_hi_u32 s1, s2, s3
+; GCN-NEXT: s_addc_u32 s0, s11, s18
+; GCN-NEXT: s_addc_u32 s1, s1, 0
+; GCN-NEXT: s_mul_i32 s3, s2, s3
+; GCN-NEXT: s_add_u32 s0, s0, s3
+; GCN-NEXT: s_addc_u32 s1, 0, s1
+; GCN-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_addc_u32 s2, s2, s1
+; GCN-NEXT: s_ashr_i32 s18, s13, 31
+; GCN-NEXT: s_add_u32 s0, s12, s18
+; GCN-NEXT: s_mov_b32 s19, s18
+; GCN-NEXT: s_addc_u32 s1, s13, s18
+; GCN-NEXT: s_xor_b64 s[20:21], s[0:1], s[18:19]
+; GCN-NEXT: v_readfirstlane_b32 s3, v0
+; GCN-NEXT: s_mul_i32 s1, s20, s2
+; GCN-NEXT: s_mul_hi_u32 s11, s20, s3
+; GCN-NEXT: s_mul_hi_u32 s0, s20, s2
+; GCN-NEXT: s_add_u32 s1, s11, s1
+; GCN-NEXT: s_addc_u32 s0, 0, s0
+; GCN-NEXT: s_mul_hi_u32 s13, s21, s3
+; GCN-NEXT: s_mul_i32 s3, s21, s3
+; GCN-NEXT: s_add_u32 s1, s1, s3
+; GCN-NEXT: s_mul_hi_u32 s11, s21, s2
+; GCN-NEXT: s_addc_u32 s0, s0, s13
+; GCN-NEXT: s_addc_u32 s1, s11, 0
+; GCN-NEXT: s_mul_i32 s2, s21, s2
+; GCN-NEXT: s_add_u32 s0, s0, s2
+; GCN-NEXT: s_addc_u32 s1, 0, s1
+; GCN-NEXT: s_mul_i32 s1, s16, s1
+; GCN-NEXT: s_mul_hi_u32 s2, s16, s0
+; GCN-NEXT: s_add_i32 s1, s2, s1
+; GCN-NEXT: s_mul_i32 s2, s17, s0
+; GCN-NEXT: s_mul_i32 s0, s16, s0
+; GCN-NEXT: s_add_i32 s11, s1, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_sub_i32 s1, s21, s11
+; GCN-NEXT: v_sub_co_u32_e32 v0, vcc, s20, v0
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_subb_u32 s13, s1, s17
+; GCN-NEXT: v_subrev_co_u32_e64 v1, s[0:1], s16, v0
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_subb_u32 s19, s13, 0
+; GCN-NEXT: s_cmp_ge_u32 s19, s17
+; GCN-NEXT: s_cselect_b32 s20, -1, 0
+; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s16, v1
+; GCN-NEXT: s_cmp_eq_u32 s19, s17
+; GCN-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[2:3]
+; GCN-NEXT: v_mov_b32_e32 v3, s20
+; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: v_cndmask_b32_e64 v2, v3, v2, s[2:3]
+; GCN-NEXT: s_subb_u32 s2, s13, s17
+; GCN-NEXT: v_subrev_co_u32_e64 v3, s[0:1], s16, v1
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_subb_u32 s2, s2, 0
+; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1]
+; GCN-NEXT: v_mov_b32_e32 v2, s19
+; GCN-NEXT: v_mov_b32_e32 v3, s2
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1]
+; GCN-NEXT: s_subb_u32 s0, s21, s11
+; GCN-NEXT: s_cmp_ge_u32 s0, s17
+; GCN-NEXT: s_cselect_b32 s1, -1, 0
+; GCN-NEXT: v_cmp_le_u32_e32 vcc, s16, v0
+; GCN-NEXT: s_cmp_eq_u32 s0, s17
+; GCN-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc
+; GCN-NEXT: v_mov_b32_e32 v4, s1
+; GCN-NEXT: s_cselect_b64 vcc, -1, 0
+; GCN-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
+; GCN-NEXT: v_mov_b32_e32 v4, s0
+; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GCN-NEXT: v_xor_b32_e32 v0, s18, v0
+; GCN-NEXT: v_xor_b32_e32 v1, s18, v2
+; GCN-NEXT: v_mov_b32_e32 v2, s18
+; GCN-NEXT: v_subrev_co_u32_e32 v0, vcc, s18, v0
+; GCN-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-NEXT: s_cbranch_execnz .LBB10_3
+; GCN-NEXT: .LBB10_2:
+; GCN-NEXT: v_cvt_f32_u32_e32 v0, s10
+; GCN-NEXT: s_sub_i32 s0, 0, s10
+; GCN-NEXT: s_mov_b32 s1, 0
+; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT: v_readfirstlane_b32 s2, v0
+; GCN-NEXT: s_mul_i32 s0, s0, s2
+; GCN-NEXT: s_mul_hi_u32 s0, s2, s0
+; GCN-NEXT: s_add_i32 s2, s2, s0
+; GCN-NEXT: s_mul_hi_u32 s0, s12, s2
+; GCN-NEXT: s_mul_i32 s0, s0, s10
+; GCN-NEXT: s_sub_i32 s0, s12, s0
+; GCN-NEXT: s_sub_i32 s2, s0, s10
+; GCN-NEXT: s_cmp_ge_u32 s0, s10
+; GCN-NEXT: s_cselect_b32 s0, s2, s0
+; GCN-NEXT: s_sub_i32 s2, s0, s10
+; GCN-NEXT: s_cmp_ge_u32 s0, s10
+; GCN-NEXT: s_cselect_b32 s0, s2, s0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: .LBB10_3:
+; GCN-NEXT: s_or_b64 s[0:1], s[8:9], s[6:7]
+; GCN-NEXT: s_mov_b32 s0, 0
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_cbranch_scc0 .LBB10_8
+; GCN-NEXT: ; %bb.4:
+; GCN-NEXT: s_ashr_i32 s0, s7, 31
+; GCN-NEXT: s_add_u32 s2, s6, s0
+; GCN-NEXT: s_mov_b32 s1, s0
+; GCN-NEXT: s_addc_u32 s3, s7, s0
+; GCN-NEXT: s_xor_b64 s[12:13], s[2:3], s[0:1]
+; GCN-NEXT: v_cvt_f32_u32_e32 v2, s12
+; GCN-NEXT: v_cvt_f32_u32_e32 v3, s13
+; GCN-NEXT: s_sub_u32 s0, 0, s12
+; GCN-NEXT: s_subb_u32 s1, 0, s13
+; GCN-NEXT: v_madmk_f32 v2, v3, 0x4f800000, v2
+; GCN-NEXT: v_rcp_f32_e32 v2, v2
+; GCN-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2
+; GCN-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2
+; GCN-NEXT: v_trunc_f32_e32 v3, v3
+; GCN-NEXT: v_madmk_f32 v2, v3, 0xcf800000, v2
+; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3
+; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT: v_readfirstlane_b32 s2, v3
+; GCN-NEXT: v_readfirstlane_b32 s3, v2
+; GCN-NEXT: s_mul_i32 s7, s0, s2
+; GCN-NEXT: s_mul_hi_u32 s15, s0, s3
+; GCN-NEXT: s_mul_i32 s14, s1, s3
+; GCN-NEXT: s_add_i32 s7, s15, s7
+; GCN-NEXT: s_add_i32 s7, s7, s14
+; GCN-NEXT: s_mul_i32 s16, s0, s3
+; GCN-NEXT: s_mul_hi_u32 s14, s3, s7
+; GCN-NEXT: s_mul_i32 s15, s3, s7
+; GCN-NEXT: s_mul_hi_u32 s3, s3, s16
+; GCN-NEXT: s_add_u32 s3, s3, s15
+; GCN-NEXT: s_addc_u32 s14, 0, s14
+; GCN-NEXT: s_mul_hi_u32 s17, s2, s16
+; GCN-NEXT: s_mul_i32 s16, s2, s16
+; GCN-NEXT: s_add_u32 s3, s3, s16
+; GCN-NEXT: s_mul_hi_u32 s15, s2, s7
+; GCN-NEXT: s_addc_u32 s3, s14, s17
+; GCN-NEXT: s_addc_u32 s14, s15, 0
+; GCN-NEXT: s_mul_i32 s7, s2, s7
+; GCN-NEXT: s_add_u32 s3, s3, s7
+; GCN-NEXT: s_addc_u32 s7, 0, s14
+; GCN-NEXT: v_add_co_u32_e32 v2, vcc, s3, v2
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_addc_u32 s2, s2, s7
+; GCN-NEXT: v_readfirstlane_b32 s7, v2
+; GCN-NEXT: s_mul_i32 s3, s0, s2
+; GCN-NEXT: s_mul_hi_u32 s14, s0, s7
+; GCN-NEXT: s_add_i32 s3, s14, s3
+; GCN-NEXT: s_mul_i32 s1, s1, s7
+; GCN-NEXT: s_add_i32 s3, s3, s1
+; GCN-NEXT: s_mul_i32 s0, s0, s7
+; GCN-NEXT: s_mul_hi_u32 s14, s2, s0
+; GCN-NEXT: s_mul_i32 s15, s2, s0
+; GCN-NEXT: s_mul_i32 s17, s7, s3
+; GCN-NEXT: s_mul_hi_u32 s0, s7, s0
+; GCN-NEXT: s_mul_hi_u32 s16, s7, s3
+; GCN-NEXT: s_add_u32 s0, s0, s17
+; GCN-NEXT: s_addc_u32 s7, 0, s16
+; GCN-NEXT: s_add_u32 s0, s0, s15
+; GCN-NEXT: s_mul_hi_u32 s1, s2, s3
+; GCN-NEXT: s_addc_u32 s0, s7, s14
+; GCN-NEXT: s_addc_u32 s1, s1, 0
+; GCN-NEXT: s_mul_i32 s3, s2, s3
+; GCN-NEXT: s_add_u32 s0, s0, s3
+; GCN-NEXT: s_addc_u32 s1, 0, s1
+; GCN-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_addc_u32 s2, s2, s1
+; GCN-NEXT: s_ashr_i32 s14, s9, 31
+; GCN-NEXT: s_add_u32 s0, s8, s14
+; GCN-NEXT: s_mov_b32 s15, s14
+; GCN-NEXT: s_addc_u32 s1, s9, s14
+; GCN-NEXT: s_xor_b64 s[16:17], s[0:1], s[14:15]
+; GCN-NEXT: v_readfirstlane_b32 s3, v2
+; GCN-NEXT: s_mul_i32 s1, s16, s2
+; GCN-NEXT: s_mul_hi_u32 s7, s16, s3
+; GCN-NEXT: s_mul_hi_u32 s0, s16, s2
+; GCN-NEXT: s_add_u32 s1, s7, s1
+; GCN-NEXT: s_addc_u32 s0, 0, s0
+; GCN-NEXT: s_mul_hi_u32 s9, s17, s3
+; GCN-NEXT: s_mul_i32 s3, s17, s3
+; GCN-NEXT: s_add_u32 s1, s1, s3
+; GCN-NEXT: s_mul_hi_u32 s7, s17, s2
+; GCN-NEXT: s_addc_u32 s0, s0, s9
+; GCN-NEXT: s_addc_u32 s1, s7, 0
+; GCN-NEXT: s_mul_i32 s2, s17, s2
+; GCN-NEXT: s_add_u32 s0, s0, s2
+; GCN-NEXT: s_addc_u32 s1, 0, s1
+; GCN-NEXT: s_mul_i32 s1, s12, s1
+; GCN-NEXT: s_mul_hi_u32 s2, s12, s0
+; GCN-NEXT: s_add_i32 s1, s2, s1
+; GCN-NEXT: s_mul_i32 s2, s13, s0
+; GCN-NEXT: s_mul_i32 s0, s12, s0
+; GCN-NEXT: s_add_i32 s7, s1, s2
+; GCN-NEXT: v_mov_b32_e32 v2, s0
+; GCN-NEXT: s_sub_i32 s1, s17, s7
+; GCN-NEXT: v_sub_co_u32_e32 v2, vcc, s16, v2
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_subb_u32 s9, s1, s13
+; GCN-NEXT: v_subrev_co_u32_e64 v3, s[0:1], s12, v2
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_subb_u32 s15, s9, 0
+; GCN-NEXT: s_cmp_ge_u32 s15, s13
+; GCN-NEXT: s_cselect_b32 s16, -1, 0
+; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s12, v3
+; GCN-NEXT: s_cmp_eq_u32 s15, s13
+; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[2:3]
+; GCN-NEXT: v_mov_b32_e32 v5, s16
+; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: v_cndmask_b32_e64 v4, v5, v4, s[2:3]
+; GCN-NEXT: s_subb_u32 s2, s9, s13
+; GCN-NEXT: v_subrev_co_u32_e64 v5, s[0:1], s12, v3
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_subb_u32 s2, s2, 0
+; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4
+; GCN-NEXT: v_cndmask_b32_e64 v3, v3, v5, s[0:1]
+; GCN-NEXT: v_mov_b32_e32 v4, s15
+; GCN-NEXT: v_mov_b32_e32 v5, s2
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[0:1]
+; GCN-NEXT: s_subb_u32 s0, s17, s7
+; GCN-NEXT: s_cmp_ge_u32 s0, s13
+; GCN-NEXT: s_cselect_b32 s1, -1, 0
+; GCN-NEXT: v_cmp_le_u32_e32 vcc, s12, v2
+; GCN-NEXT: s_cmp_eq_u32 s0, s13
+; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
+; GCN-NEXT: v_mov_b32_e32 v6, s1
+; GCN-NEXT: s_cselect_b64 vcc, -1, 0
+; GCN-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
+; GCN-NEXT: v_mov_b32_e32 v6, s0
+; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
+; GCN-NEXT: v_xor_b32_e32 v2, s14, v2
+; GCN-NEXT: v_xor_b32_e32 v3, s14, v4
+; GCN-NEXT: v_mov_b32_e32 v4, s14
+; GCN-NEXT: v_subrev_co_u32_e32 v2, vcc, s14, v2
+; GCN-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v4, vcc
+; GCN-NEXT: s_cbranch_execnz .LBB10_6
+; GCN-NEXT: .LBB10_5:
+; GCN-NEXT: v_cvt_f32_u32_e32 v2, s6
+; GCN-NEXT: s_sub_i32 s0, 0, s6
+; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2
+; GCN-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
+; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT: v_mul_lo_u32 v3, s0, v2
+; GCN-NEXT: v_mul_hi_u32 v3, v2, v3
+; GCN-NEXT: v_add_u32_e32 v2, v2, v3
+; GCN-NEXT: v_mul_hi_u32 v2, s8, v2
+; GCN-NEXT: v_mul_lo_u32 v2, v2, s6
+; GCN-NEXT: v_sub_u32_e32 v2, s8, v2
+; GCN-NEXT: v_subrev_u32_e32 v3, s6, v2
+; GCN-NEXT: v_cmp_le_u32_e32 vcc, s6, v2
+; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GCN-NEXT: v_subrev_u32_e32 v3, s6, v2
+; GCN-NEXT: v_cmp_le_u32_e32 vcc, s6, v2
+; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GCN-NEXT: v_mov_b32_e32 v3, 0
+; GCN-NEXT: .LBB10_6:
+; GCN-NEXT: v_mov_b32_e32 v4, 0
+; GCN-NEXT: global_store_dwordx4 v4, v[0:3], s[4:5]
+; GCN-NEXT: s_endpgm
+; GCN-NEXT: .LBB10_7:
+; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN-NEXT: s_branch .LBB10_2
+; GCN-NEXT: .LBB10_8:
+; GCN-NEXT: s_branch .LBB10_5
+;
+; TAHITI-LABEL: srem_v2i64:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s3, 0xf000
+; TAHITI-NEXT: s_mov_b32 s2, -1
+; TAHITI-NEXT: v_mov_b32_e32 v8, 0
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s0, s6
+; TAHITI-NEXT: s_mov_b32 s1, s7
+; TAHITI-NEXT: buffer_load_dwordx4 v[4:7], off, s[0:3], 0
+; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_or_b32_e32 v9, v5, v1
+; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; TAHITI-NEXT: s_cbranch_vccz .LBB10_7
+; TAHITI-NEXT: ; %bb.1:
+; TAHITI-NEXT: v_ashrrev_i32_e32 v9, 31, v1
+; TAHITI-NEXT: v_add_i32_e32 v8, vcc, v0, v9
+; TAHITI-NEXT: v_addc_u32_e32 v1, vcc, v1, v9, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v8, v8, v9
+; TAHITI-NEXT: v_xor_b32_e32 v1, v1, v9
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v9, v8
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v10, v1
+; TAHITI-NEXT: v_sub_i32_e32 v11, vcc, 0, v8
+; TAHITI-NEXT: v_subb_u32_e32 v12, vcc, 0, v1, vcc
+; TAHITI-NEXT: v_madmk_f32 v9, v10, 0x4f800000, v9
+; TAHITI-NEXT: v_rcp_f32_e32 v9, v9
+; TAHITI-NEXT: v_mul_f32_e32 v9, 0x5f7ffffc, v9
+; TAHITI-NEXT: v_mul_f32_e32 v10, 0x2f800000, v9
+; TAHITI-NEXT: v_trunc_f32_e32 v10, v10
+; TAHITI-NEXT: v_madmk_f32 v9, v10, 0xcf800000, v9
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v10, v10
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v9, v9
+; TAHITI-NEXT: v_mul_lo_u32 v14, v11, v10
+; TAHITI-NEXT: v_mul_hi_u32 v13, v11, v9
+; TAHITI-NEXT: v_mul_lo_u32 v15, v12, v9
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v14, v13
+; TAHITI-NEXT: v_mul_lo_u32 v14, v11, v9
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v13, v15
+; TAHITI-NEXT: v_mul_lo_u32 v15, v9, v13
+; TAHITI-NEXT: v_mul_hi_u32 v16, v9, v14
+; TAHITI-NEXT: v_mul_hi_u32 v17, v9, v13
+; TAHITI-NEXT: v_mul_hi_u32 v18, v10, v13
+; TAHITI-NEXT: v_mul_lo_u32 v13, v10, v13
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v16, v15
+; TAHITI-NEXT: v_addc_u32_e32 v16, vcc, 0, v17, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v17, v10, v14
+; TAHITI-NEXT: v_mul_hi_u32 v14, v10, v14
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v15, v17
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, v16, v14, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v15, vcc, 0, v18, vcc
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v14, v13
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, 0, v15, vcc
+; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v9, v13
+; TAHITI-NEXT: v_addc_u32_e32 v10, vcc, v10, v14, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v13, v11, v10
+; TAHITI-NEXT: v_mul_hi_u32 v14, v11, v9
+; TAHITI-NEXT: v_mul_lo_u32 v12, v12, v9
+; TAHITI-NEXT: v_mul_lo_u32 v11, v11, v9
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v13, v14
+; TAHITI-NEXT: v_add_i32_e32 v12, vcc, v13, v12
+; TAHITI-NEXT: v_mul_lo_u32 v15, v9, v12
+; TAHITI-NEXT: v_mul_hi_u32 v16, v9, v11
+; TAHITI-NEXT: v_mul_hi_u32 v17, v9, v12
+; TAHITI-NEXT: v_mul_hi_u32 v14, v10, v11
+; TAHITI-NEXT: v_mul_lo_u32 v11, v10, v11
+; TAHITI-NEXT: v_mul_hi_u32 v13, v10, v12
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v16, v15
+; TAHITI-NEXT: v_addc_u32_e32 v16, vcc, 0, v17, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v12, v10, v12
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v15, v11
+; TAHITI-NEXT: v_addc_u32_e32 v11, vcc, v16, v14, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v11, v12
+; TAHITI-NEXT: v_addc_u32_e32 v12, vcc, 0, v13, vcc
+; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v9, v11
+; TAHITI-NEXT: v_addc_u32_e32 v10, vcc, v10, v12, vcc
+; TAHITI-NEXT: v_ashrrev_i32_e32 v11, 31, v5
+; TAHITI-NEXT: v_add_i32_e32 v12, vcc, v4, v11
+; TAHITI-NEXT: v_xor_b32_e32 v12, v12, v11
+; TAHITI-NEXT: v_mul_lo_u32 v13, v12, v10
+; TAHITI-NEXT: v_mul_hi_u32 v14, v12, v9
+; TAHITI-NEXT: v_mul_hi_u32 v15, v12, v10
+; TAHITI-NEXT: v_addc_u32_e32 v5, vcc, v5, v11, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v5, v5, v11
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v14, v13
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, 0, v15, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v15, v5, v9
+; TAHITI-NEXT: v_mul_hi_u32 v9, v5, v9
+; TAHITI-NEXT: v_mul_hi_u32 v16, v5, v10
+; TAHITI-NEXT: v_mul_lo_u32 v10, v5, v10
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v13, v15
+; TAHITI-NEXT: v_addc_u32_e32 v9, vcc, v14, v9, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v13, vcc, 0, v16, vcc
+; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v9, v10
+; TAHITI-NEXT: v_addc_u32_e32 v10, vcc, 0, v13, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v10, v8, v10
+; TAHITI-NEXT: v_mul_hi_u32 v13, v8, v9
+; TAHITI-NEXT: v_mul_lo_u32 v14, v1, v9
+; TAHITI-NEXT: v_mul_lo_u32 v9, v8, v9
+; TAHITI-NEXT: v_add_i32_e32 v10, vcc, v10, v13
+; TAHITI-NEXT: v_add_i32_e32 v10, vcc, v14, v10
+; TAHITI-NEXT: v_sub_i32_e32 v13, vcc, v5, v10
+; TAHITI-NEXT: v_sub_i32_e32 v9, vcc, v12, v9
+; TAHITI-NEXT: v_subb_u32_e64 v12, s[0:1], v13, v1, vcc
+; TAHITI-NEXT: v_sub_i32_e64 v13, s[0:1], v9, v8
+; TAHITI-NEXT: v_subbrev_u32_e64 v14, s[2:3], 0, v12, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v14, v1
+; TAHITI-NEXT: v_cndmask_b32_e64 v15, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v13, v8
+; TAHITI-NEXT: v_subb_u32_e32 v5, vcc, v5, v10, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_eq_u32_e64 s[2:3], v14, v1
+; TAHITI-NEXT: v_subb_u32_e64 v12, s[0:1], v12, v1, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v5, v1
+; TAHITI-NEXT: v_cndmask_b32_e64 v15, v15, v16, s[2:3]
+; TAHITI-NEXT: v_sub_i32_e64 v16, s[0:1], v13, v8
+; TAHITI-NEXT: v_cndmask_b32_e64 v10, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v9, v8
+; TAHITI-NEXT: v_subbrev_u32_e64 v12, s[0:1], 0, v12, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
+; TAHITI-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v15
+; TAHITI-NEXT: v_cndmask_b32_e32 v1, v10, v8, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v13, v13, v16, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e64 v12, v14, v12, s[0:1]
+; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
+; TAHITI-NEXT: v_cndmask_b32_e32 v1, v5, v12, vcc
+; TAHITI-NEXT: v_cndmask_b32_e32 v5, v9, v13, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v5, v5, v11
+; TAHITI-NEXT: v_xor_b32_e32 v1, v1, v11
+; TAHITI-NEXT: v_sub_i32_e32 v8, vcc, v5, v11
+; TAHITI-NEXT: v_subb_u32_e32 v9, vcc, v1, v11, vcc
+; TAHITI-NEXT: s_cbranch_execnz .LBB10_3
+; TAHITI-NEXT: .LBB10_2:
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v1, v0
+; TAHITI-NEXT: v_sub_i32_e32 v5, vcc, 0, v0
+; TAHITI-NEXT: v_mov_b32_e32 v9, 0
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; TAHITI-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v1, v1
+; TAHITI-NEXT: v_mul_lo_u32 v5, v5, v1
+; TAHITI-NEXT: v_mul_hi_u32 v5, v1, v5
+; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v1, v5
+; TAHITI-NEXT: v_mul_hi_u32 v1, v4, v1
+; TAHITI-NEXT: v_mul_lo_u32 v1, v1, v0
+; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, v4, v1
+; TAHITI-NEXT: v_sub_i32_e32 v4, vcc, v1, v0
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
+; TAHITI-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; TAHITI-NEXT: v_sub_i32_e32 v4, vcc, v1, v0
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
+; TAHITI-NEXT: v_cndmask_b32_e32 v8, v1, v4, vcc
+; TAHITI-NEXT: .LBB10_3:
+; TAHITI-NEXT: v_or_b32_e32 v1, v7, v3
+; TAHITI-NEXT: v_mov_b32_e32 v0, 0
+; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; TAHITI-NEXT: s_cbranch_vccz .LBB10_8
+; TAHITI-NEXT: ; %bb.4:
+; TAHITI-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v2, v0
+; TAHITI-NEXT: v_addc_u32_e32 v3, vcc, v3, v0, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v1, v1, v0
+; TAHITI-NEXT: v_xor_b32_e32 v0, v3, v0
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v3, v1
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v4, v0
+; TAHITI-NEXT: v_sub_i32_e32 v5, vcc, 0, v1
+; TAHITI-NEXT: v_subb_u32_e32 v10, vcc, 0, v0, vcc
+; TAHITI-NEXT: v_madmk_f32 v3, v4, 0x4f800000, v3
+; TAHITI-NEXT: v_rcp_f32_e32 v3, v3
+; TAHITI-NEXT: v_mul_f32_e32 v3, 0x5f7ffffc, v3
+; TAHITI-NEXT: v_mul_f32_e32 v4, 0x2f800000, v3
+; TAHITI-NEXT: v_trunc_f32_e32 v4, v4
+; TAHITI-NEXT: v_madmk_f32 v3, v4, 0xcf800000, v3
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v4, v4
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v3, v3
+; TAHITI-NEXT: v_mul_lo_u32 v12, v5, v4
+; TAHITI-NEXT: v_mul_hi_u32 v11, v5, v3
+; TAHITI-NEXT: v_mul_lo_u32 v13, v10, v3
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v12, v11
+; TAHITI-NEXT: v_mul_lo_u32 v12, v5, v3
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v11, v13
+; TAHITI-NEXT: v_mul_lo_u32 v13, v3, v11
+; TAHITI-NEXT: v_mul_hi_u32 v14, v3, v12
+; TAHITI-NEXT: v_mul_hi_u32 v15, v3, v11
+; TAHITI-NEXT: v_mul_hi_u32 v16, v4, v11
+; TAHITI-NEXT: v_mul_lo_u32 v11, v4, v11
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v14, v13
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, 0, v15, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v15, v4, v12
+; TAHITI-NEXT: v_mul_hi_u32 v12, v4, v12
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v13, v15
+; TAHITI-NEXT: v_addc_u32_e32 v12, vcc, v14, v12, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v13, vcc, 0, v16, vcc
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v12, v11
+; TAHITI-NEXT: v_addc_u32_e32 v12, vcc, 0, v13, vcc
+; TAHITI-NEXT: v_add_i32_e32 v3, vcc, v3, v11
+; TAHITI-NEXT: v_addc_u32_e32 v4, vcc, v4, v12, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v11, v5, v4
+; TAHITI-NEXT: v_mul_hi_u32 v12, v5, v3
+; TAHITI-NEXT: v_mul_lo_u32 v10, v10, v3
+; TAHITI-NEXT: v_mul_lo_u32 v5, v5, v3
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v11, v12
+; TAHITI-NEXT: v_add_i32_e32 v10, vcc, v11, v10
+; TAHITI-NEXT: v_mul_lo_u32 v13, v3, v10
+; TAHITI-NEXT: v_mul_hi_u32 v14, v3, v5
+; TAHITI-NEXT: v_mul_hi_u32 v15, v3, v10
+; TAHITI-NEXT: v_mul_hi_u32 v12, v4, v5
+; TAHITI-NEXT: v_mul_lo_u32 v5, v4, v5
+; TAHITI-NEXT: v_mul_hi_u32 v11, v4, v10
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v14, v13
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, 0, v15, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v10, v4, v10
+; TAHITI-NEXT: v_add_i32_e32 v5, vcc, v13, v5
+; TAHITI-NEXT: v_addc_u32_e32 v5, vcc, v14, v12, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; TAHITI-NEXT: v_add_i32_e32 v5, vcc, v5, v10
+; TAHITI-NEXT: v_addc_u32_e32 v10, vcc, 0, v11, vcc
+; TAHITI-NEXT: v_add_i32_e32 v3, vcc, v3, v5
+; TAHITI-NEXT: v_addc_u32_e32 v4, vcc, v4, v10, vcc
+; TAHITI-NEXT: v_ashrrev_i32_e32 v5, 31, v7
+; TAHITI-NEXT: v_add_i32_e32 v10, vcc, v6, v5
+; TAHITI-NEXT: v_xor_b32_e32 v10, v10, v5
+; TAHITI-NEXT: v_mul_lo_u32 v11, v10, v4
+; TAHITI-NEXT: v_mul_hi_u32 v12, v10, v3
+; TAHITI-NEXT: v_mul_hi_u32 v13, v10, v4
+; TAHITI-NEXT: v_addc_u32_e32 v7, vcc, v7, v5, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v7, v7, v5
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v12, v11
+; TAHITI-NEXT: v_addc_u32_e32 v12, vcc, 0, v13, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v13, v7, v3
+; TAHITI-NEXT: v_mul_hi_u32 v3, v7, v3
+; TAHITI-NEXT: v_mul_hi_u32 v14, v7, v4
+; TAHITI-NEXT: v_mul_lo_u32 v4, v7, v4
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v11, v13
+; TAHITI-NEXT: v_addc_u32_e32 v3, vcc, v12, v3, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v11, vcc, 0, v14, vcc
+; TAHITI-NEXT: v_add_i32_e32 v3, vcc, v3, v4
+; TAHITI-NEXT: v_addc_u32_e32 v4, vcc, 0, v11, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v4, v1, v4
+; TAHITI-NEXT: v_mul_hi_u32 v11, v1, v3
+; TAHITI-NEXT: v_mul_lo_u32 v12, v0, v3
+; TAHITI-NEXT: v_mul_lo_u32 v3, v1, v3
+; TAHITI-NEXT: v_add_i32_e32 v4, vcc, v4, v11
+; TAHITI-NEXT: v_add_i32_e32 v4, vcc, v12, v4
+; TAHITI-NEXT: v_sub_i32_e32 v11, vcc, v7, v4
+; TAHITI-NEXT: v_sub_i32_e32 v3, vcc, v10, v3
+; TAHITI-NEXT: v_subb_u32_e64 v10, s[0:1], v11, v0, vcc
+; TAHITI-NEXT: v_sub_i32_e64 v11, s[0:1], v3, v1
+; TAHITI-NEXT: v_subbrev_u32_e64 v12, s[2:3], 0, v10, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v12, v0
+; TAHITI-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v11, v1
+; TAHITI-NEXT: v_subb_u32_e32 v4, vcc, v7, v4, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_eq_u32_e64 s[2:3], v12, v0
+; TAHITI-NEXT: v_subb_u32_e64 v10, s[0:1], v10, v0, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v4, v0
+; TAHITI-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[2:3]
+; TAHITI-NEXT: v_sub_i32_e64 v14, s[0:1], v11, v1
+; TAHITI-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v3, v1
+; TAHITI-NEXT: v_subbrev_u32_e64 v10, s[0:1], 0, v10, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_eq_u32_e32 vcc, v4, v0
+; TAHITI-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v13
+; TAHITI-NEXT: v_cndmask_b32_e32 v0, v7, v1, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v11, v11, v14, s[0:1]
+; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; TAHITI-NEXT: v_cndmask_b32_e64 v10, v12, v10, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e32 v1, v3, v11, vcc
+; TAHITI-NEXT: v_cndmask_b32_e32 v0, v4, v10, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v1, v1, v5
+; TAHITI-NEXT: v_xor_b32_e32 v0, v0, v5
+; TAHITI-NEXT: v_sub_i32_e32 v10, vcc, v1, v5
+; TAHITI-NEXT: v_subb_u32_e32 v11, vcc, v0, v5, vcc
+; TAHITI-NEXT: s_cbranch_execnz .LBB10_6
+; TAHITI-NEXT: .LBB10_5:
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v0, v2
+; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, 0, v2
+; TAHITI-NEXT: v_mov_b32_e32 v11, 0
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TAHITI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TAHITI-NEXT: v_mul_lo_u32 v1, v1, v0
+; TAHITI-NEXT: v_mul_hi_u32 v1, v0, v1
+; TAHITI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; TAHITI-NEXT: v_mul_hi_u32 v0, v6, v0
+; TAHITI-NEXT: v_mul_lo_u32 v0, v0, v2
+; TAHITI-NEXT: v_sub_i32_e32 v0, vcc, v6, v0
+; TAHITI-NEXT: v_subrev_i32_e32 v1, vcc, v2, v0
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TAHITI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; TAHITI-NEXT: v_subrev_i32_e32 v1, vcc, v2, v0
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TAHITI-NEXT: v_cndmask_b32_e32 v10, v0, v1, vcc
+; TAHITI-NEXT: .LBB10_6:
+; TAHITI-NEXT: s_mov_b32 s7, 0xf000
+; TAHITI-NEXT: s_mov_b32 s6, -1
+; TAHITI-NEXT: buffer_store_dwordx4 v[8:11], off, s[4:7], 0
+; TAHITI-NEXT: s_endpgm
+; TAHITI-NEXT: .LBB10_7:
+; TAHITI-NEXT: ; implicit-def: $vgpr8_vgpr9
+; TAHITI-NEXT: s_branch .LBB10_2
+; TAHITI-NEXT: .LBB10_8:
+; TAHITI-NEXT: s_branch .LBB10_5
+;
+; TONGA-LABEL: srem_v2i64:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; TONGA-NEXT: v_mov_b32_e32 v8, 0
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: s_add_u32 s0, s6, 16
+; TONGA-NEXT: v_mov_b32_e32 v4, s6
+; TONGA-NEXT: s_addc_u32 s1, s7, 0
+; TONGA-NEXT: v_mov_b32_e32 v0, s0
+; TONGA-NEXT: v_mov_b32_e32 v5, s7
+; TONGA-NEXT: v_mov_b32_e32 v1, s1
+; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; TONGA-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_or_b32_e32 v9, v5, v1
+; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; TONGA-NEXT: s_cbranch_vccz .LBB10_7
+; TONGA-NEXT: ; %bb.1:
+; TONGA-NEXT: v_ashrrev_i32_e32 v8, 31, v1
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v0, v8
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, v1, v8, vcc
+; TONGA-NEXT: v_xor_b32_e32 v14, v9, v8
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v8
+; TONGA-NEXT: v_cvt_f32_u32_e32 v8, v14
+; TONGA-NEXT: v_cvt_f32_u32_e32 v9, v1
+; TONGA-NEXT: v_sub_u32_e32 v15, vcc, 0, v14
+; TONGA-NEXT: v_subb_u32_e32 v16, vcc, 0, v1, vcc
+; TONGA-NEXT: v_madmk_f32 v8, v9, 0x4f800000, v8
+; TONGA-NEXT: v_rcp_f32_e32 v8, v8
+; TONGA-NEXT: v_mul_f32_e32 v8, 0x5f7ffffc, v8
+; TONGA-NEXT: v_mul_f32_e32 v9, 0x2f800000, v8
+; TONGA-NEXT: v_trunc_f32_e32 v9, v9
+; TONGA-NEXT: v_madmk_f32 v8, v9, 0xcf800000, v8
+; TONGA-NEXT: v_cvt_u32_f32_e32 v12, v9
+; TONGA-NEXT: v_cvt_u32_f32_e32 v13, v8
+; TONGA-NEXT: v_mul_lo_u32 v10, v15, v12
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v15, v13, 0
+; TONGA-NEXT: v_mul_lo_u32 v11, v16, v13
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v9, v10
+; TONGA-NEXT: v_add_u32_e32 v11, vcc, v9, v11
+; TONGA-NEXT: v_mad_u64_u32 v[9:10], s[0:1], v13, v11, 0
+; TONGA-NEXT: v_mul_hi_u32 v17, v13, v8
+; TONGA-NEXT: v_add_u32_e32 v17, vcc, v17, v9
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v12, v8, 0
+; TONGA-NEXT: v_addc_u32_e32 v18, vcc, 0, v10, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v12, v11, 0
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, v17, v8
+; TONGA-NEXT: v_addc_u32_e32 v8, vcc, v18, v9, vcc
+; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v11, vcc
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, v8, v10
+; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; TONGA-NEXT: v_add_u32_e32 v17, vcc, v13, v8
+; TONGA-NEXT: v_addc_u32_e32 v18, vcc, v12, v9, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v15, v17, 0
+; TONGA-NEXT: v_mul_lo_u32 v12, v15, v18
+; TONGA-NEXT: v_mul_lo_u32 v13, v16, v17
+; TONGA-NEXT: v_mul_hi_u32 v15, v17, v8
+; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v18, v8, 0
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v12, v9
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v9, v13
+; TONGA-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v17, v9, 0
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v18, v9, 0
+; TONGA-NEXT: v_add_u32_e32 v12, vcc, v15, v12
+; TONGA-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v12, v10
+; TONGA-NEXT: v_addc_u32_e32 v10, vcc, v13, v11, vcc
+; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, v10, v8
+; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v17, v8
+; TONGA-NEXT: v_addc_u32_e32 v11, vcc, v18, v9, vcc
+; TONGA-NEXT: v_ashrrev_i32_e32 v12, 31, v5
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, v4, v12
+; TONGA-NEXT: v_xor_b32_e32 v13, v8, v12
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v13, v11, 0
+; TONGA-NEXT: v_mul_hi_u32 v15, v13, v10
+; TONGA-NEXT: v_addc_u32_e32 v5, vcc, v5, v12, vcc
+; TONGA-NEXT: v_xor_b32_e32 v5, v5, v12
+; TONGA-NEXT: v_add_u32_e32 v15, vcc, v15, v8
+; TONGA-NEXT: v_addc_u32_e32 v16, vcc, 0, v9, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v5, v10, 0
+; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v5, v11, 0
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, v15, v8
+; TONGA-NEXT: v_addc_u32_e32 v8, vcc, v16, v9, vcc
+; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v11, vcc
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v8, v10
+; TONGA-NEXT: v_addc_u32_e32 v8, vcc, 0, v9, vcc
+; TONGA-NEXT: v_mul_lo_u32 v11, v14, v8
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v14, v10, 0
+; TONGA-NEXT: v_mul_lo_u32 v10, v1, v10
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v11, v9
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v10, v9
+; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v5, v9
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v13, v8
+; TONGA-NEXT: v_subb_u32_e64 v10, s[0:1], v10, v1, vcc
+; TONGA-NEXT: v_sub_u32_e64 v11, s[0:1], v8, v14
+; TONGA-NEXT: v_subbrev_u32_e64 v13, s[2:3], 0, v10, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v13, v1
+; TONGA-NEXT: v_cndmask_b32_e64 v15, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v11, v14
+; TONGA-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v13, v1
+; TONGA-NEXT: v_subb_u32_e64 v10, s[0:1], v10, v1, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e64 v15, v15, v16, s[2:3]
+; TONGA-NEXT: v_sub_u32_e64 v16, s[0:1], v11, v14
+; TONGA-NEXT: v_subb_u32_e32 v5, vcc, v5, v9, vcc
+; TONGA-NEXT: v_subbrev_u32_e64 v10, s[0:1], 0, v10, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v5, v1
+; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v15
+; TONGA-NEXT: v_cndmask_b32_e64 v9, 0, -1, vcc
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v8, v14
+; TONGA-NEXT: v_cndmask_b32_e64 v10, v13, v10, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e64 v13, 0, -1, vcc
+; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
+; TONGA-NEXT: v_cndmask_b32_e32 v1, v9, v13, vcc
+; TONGA-NEXT: v_cndmask_b32_e64 v11, v11, v16, s[0:1]
+; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
+; TONGA-NEXT: v_cndmask_b32_e32 v1, v5, v10, vcc
+; TONGA-NEXT: v_cndmask_b32_e32 v5, v8, v11, vcc
+; TONGA-NEXT: v_xor_b32_e32 v5, v5, v12
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v12
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v5, v12
+; TONGA-NEXT: v_subb_u32_e32 v9, vcc, v1, v12, vcc
+; TONGA-NEXT: s_cbranch_execnz .LBB10_3
+; TONGA-NEXT: .LBB10_2:
+; TONGA-NEXT: v_cvt_f32_u32_e32 v1, v0
+; TONGA-NEXT: v_sub_u32_e32 v5, vcc, 0, v0
+; TONGA-NEXT: v_mov_b32_e32 v9, 0
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; TONGA-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; TONGA-NEXT: v_cvt_u32_f32_e32 v1, v1
+; TONGA-NEXT: v_mul_lo_u32 v5, v5, v1
+; TONGA-NEXT: v_mul_hi_u32 v5, v1, v5
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v5
+; TONGA-NEXT: v_mul_hi_u32 v1, v4, v1
+; TONGA-NEXT: v_mul_lo_u32 v1, v1, v0
+; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v4, v1
+; TONGA-NEXT: v_subrev_u32_e32 v4, vcc, v0, v1
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
+; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; TONGA-NEXT: v_sub_u32_e32 v4, vcc, v1, v0
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
+; TONGA-NEXT: v_cndmask_b32_e32 v8, v1, v4, vcc
+; TONGA-NEXT: .LBB10_3:
+; TONGA-NEXT: v_or_b32_e32 v1, v7, v3
+; TONGA-NEXT: v_mov_b32_e32 v0, 0
+; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; TONGA-NEXT: s_cbranch_vccz .LBB10_8
+; TONGA-NEXT: ; %bb.4:
+; TONGA-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v2, v0
+; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v3, v0, vcc
+; TONGA-NEXT: v_xor_b32_e32 v5, v1, v0
+; TONGA-NEXT: v_xor_b32_e32 v12, v3, v0
+; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v5
+; TONGA-NEXT: v_cvt_f32_u32_e32 v1, v12
+; TONGA-NEXT: v_sub_u32_e32 v13, vcc, 0, v5
+; TONGA-NEXT: v_subb_u32_e32 v14, vcc, 0, v12, vcc
+; TONGA-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
+; TONGA-NEXT: v_rcp_f32_e32 v0, v0
+; TONGA-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; TONGA-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
+; TONGA-NEXT: v_trunc_f32_e32 v1, v1
+; TONGA-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
+; TONGA-NEXT: v_cvt_u32_f32_e32 v10, v1
+; TONGA-NEXT: v_cvt_u32_f32_e32 v11, v0
+; TONGA-NEXT: v_mul_lo_u32 v3, v13, v10
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v13, v11, 0
+; TONGA-NEXT: v_mul_lo_u32 v4, v14, v11
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v3
+; TONGA-NEXT: v_add_u32_e32 v15, vcc, v1, v4
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v11, v15, 0
+; TONGA-NEXT: v_mul_hi_u32 v1, v11, v0
+; TONGA-NEXT: v_add_u32_e32 v16, vcc, v1, v3
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v10, v0, 0
+; TONGA-NEXT: v_addc_u32_e32 v17, vcc, 0, v4, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v10, v15, 0
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v16, v0
+; TONGA-NEXT: v_addc_u32_e32 v0, vcc, v17, v1, vcc
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v4, vcc
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v3
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT: v_add_u32_e32 v15, vcc, v11, v0
+; TONGA-NEXT: v_addc_u32_e32 v16, vcc, v10, v1, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v13, v15, 0
+; TONGA-NEXT: v_mul_lo_u32 v10, v13, v16
+; TONGA-NEXT: v_mul_lo_u32 v11, v14, v15
+; TONGA-NEXT: v_mul_hi_u32 v13, v15, v0
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v16, v0, 0
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v10, v1
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v11
+; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v15, v1, 0
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v16, v1, 0
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v13, v10
+; TONGA-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v10, v3
+; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v11, v4, vcc
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v3, v0
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v15, v0
+; TONGA-NEXT: v_addc_u32_e32 v4, vcc, v16, v1, vcc
+; TONGA-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v6, v11
+; TONGA-NEXT: v_xor_b32_e32 v10, v0, v11
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v10, v4, 0
+; TONGA-NEXT: v_mul_hi_u32 v13, v10, v3
+; TONGA-NEXT: v_addc_u32_e32 v7, vcc, v7, v11, vcc
+; TONGA-NEXT: v_xor_b32_e32 v7, v7, v11
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v13, v0
+; TONGA-NEXT: v_addc_u32_e32 v14, vcc, 0, v1, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v7, v3, 0
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v7, v4, 0
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v13, v0
+; TONGA-NEXT: v_addc_u32_e32 v0, vcc, v14, v1, vcc
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v4, vcc
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v0, v3
+; TONGA-NEXT: v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; TONGA-NEXT: v_mul_lo_u32 v4, v5, v0
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v5, v3, 0
+; TONGA-NEXT: v_mul_lo_u32 v3, v12, v3
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v4, v1
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v3, v1
+; TONGA-NEXT: v_sub_u32_e32 v3, vcc, v7, v1
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v10, v0
+; TONGA-NEXT: v_subb_u32_e64 v3, s[0:1], v3, v12, vcc
+; TONGA-NEXT: v_sub_u32_e64 v4, s[0:1], v0, v5
+; TONGA-NEXT: v_subbrev_u32_e64 v10, s[2:3], 0, v3, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v10, v12
+; TONGA-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v4, v5
+; TONGA-NEXT: v_subb_u32_e32 v1, vcc, v7, v1, vcc
+; TONGA-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v10, v12
+; TONGA-NEXT: v_subb_u32_e64 v3, s[0:1], v3, v12, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v12
+; TONGA-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[2:3]
+; TONGA-NEXT: v_sub_u32_e64 v14, s[0:1], v4, v5
+; TONGA-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5
+; TONGA-NEXT: v_subbrev_u32_e64 v3, s[0:1], 0, v3, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
+; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v1, v12
+; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v13
+; TONGA-NEXT: v_cndmask_b32_e32 v5, v7, v5, vcc
+; TONGA-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[0:1]
+; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
+; TONGA-NEXT: v_cndmask_b32_e64 v3, v10, v3, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; TONGA-NEXT: v_xor_b32_e32 v0, v0, v11
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v11
+; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v0, v11
+; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v1, v11, vcc
+; TONGA-NEXT: s_cbranch_execnz .LBB10_6
+; TONGA-NEXT: .LBB10_5:
+; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v2
+; TONGA-NEXT: v_sub_u32_e32 v1, vcc, 0, v2
+; TONGA-NEXT: v_mov_b32_e32 v11, 0
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TONGA-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TONGA-NEXT: v_mul_lo_u32 v1, v1, v0
+; TONGA-NEXT: v_mul_hi_u32 v1, v0, v1
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT: v_mul_hi_u32 v0, v6, v0
+; TONGA-NEXT: v_mul_lo_u32 v0, v0, v2
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v6, v0
+; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TONGA-NEXT: v_cndmask_b32_e32 v10, v0, v1, vcc
+; TONGA-NEXT: .LBB10_6:
+; TONGA-NEXT: v_mov_b32_e32 v0, s4
+; TONGA-NEXT: v_mov_b32_e32 v1, s5
+; TONGA-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; TONGA-NEXT: s_endpgm
+; TONGA-NEXT: .LBB10_7:
+; TONGA-NEXT: ; implicit-def: $vgpr8_vgpr9
+; TONGA-NEXT: s_branch .LBB10_2
+; TONGA-NEXT: .LBB10_8:
+; TONGA-NEXT: s_branch .LBB10_5
+;
+; EG-LABEL: srem_v2i64:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @18, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @14
+; EG-NEXT: ALU 112, @19, KC0[], KC1[]
+; EG-NEXT: ALU 111, @132, KC0[], KC1[]
+; EG-NEXT: ALU 112, @244, KC0[], KC1[]
+; EG-NEXT: ALU 111, @357, KC0[], KC1[]
+; EG-NEXT: ALU 112, @469, KC0[], KC1[]
+; EG-NEXT: ALU 112, @582, KC0[], KC1[]
+; EG-NEXT: ALU 111, @695, KC0[], KC1[]
+; EG-NEXT: ALU 112, @807, KC0[], KC1[]
+; EG-NEXT: ALU 112, @920, KC0[], KC1[]
+; EG-NEXT: ALU 47, @1033, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T5.XYZW, T0.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: Fetch clause starting at 14:
+; EG-NEXT: VTX_READ_128 T4.XYZW, T0.X, 16, #1
+; EG-NEXT: VTX_READ_128 T1.XYZW, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 18:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 19:
+; EG-NEXT: ASHR * T5.W, T4.Y, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T0.W, T4.X, PV.W,
+; EG-NEXT: ASHR * T6.W, T4.W, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: XOR_INT * T2.W, PV.W, T5.W,
+; EG-NEXT: SUB_INT T0.Z, 0.0, PV.W,
+; EG-NEXT: ADD_INT T0.W, T4.Z, T6.W,
+; EG-NEXT: RECIP_UINT * T0.X, PV.W,
+; EG-NEXT: XOR_INT T3.W, PV.W, T6.W,
+; EG-NEXT: MULLO_INT * T0.Y, PV.Z, PS,
+; EG-NEXT: SUB_INT T0.Z, 0.0, PV.W,
+; EG-NEXT: ASHR T0.W, T1.W, literal.x,
+; EG-NEXT: RECIP_UINT * T2.X, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T2.Y, T1.W, PV.W,
+; EG-NEXT: ADDC_UINT T2.Z, T1.Z, PV.W,
+; EG-NEXT: ASHR T1.W, T1.Y, literal.x,
+; EG-NEXT: MULLO_INT * T0.Z, PV.Z, PS,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T1.Y, T1.Y, PV.W,
+; EG-NEXT: ADDC_UINT T3.Z, T1.X, PV.W,
+; EG-NEXT: ADD_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: MULHI * T0.Z, T2.X, PS,
+; EG-NEXT: ADD_INT T2.Y, T2.X, PS,
+; EG-NEXT: XOR_INT T0.Z, PV.W, T0.W,
+; EG-NEXT: ADD_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: MULHI * T0.Y, T0.X, T0.Y,
+; EG-NEXT: ADD_INT T2.Z, T0.X, PS,
+; EG-NEXT: XOR_INT T7.W, PV.W, T1.W,
+; EG-NEXT: MULHI * T0.X, PV.Z, PV.Y,
+; EG-NEXT: MULHI * T0.Y, PV.W, PV.Z,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T2.W,
+; EG-NEXT: SUB_INT T8.W, T7.W, PS,
+; EG-NEXT: MULLO_INT * T0.X, T0.X, T3.W,
+; EG-NEXT: SETGE_UINT T2.X, PV.W, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T0.Y, PV.W, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T2.Z, T0.Z, PS,
+; EG-NEXT: ADD_INT T9.W, T4.Y, T5.W,
+; EG-NEXT: ADDC_UINT * T10.W, T4.X, T5.W,
+; EG-NEXT: ADD_INT T1.Y, PV.W, PS,
+; EG-NEXT: SETGE_UINT T3.Z, PV.Z, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T9.W, PV.Z, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T8.W, PV.X, T8.W, PV.Y,
+; EG-NEXT: SETGE_UINT T0.X, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T0.Y, PS, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T2.Z, PV.Z, T2.Z, PV.W,
+; EG-NEXT: ADD_INT T4.W, T4.W, T6.W,
+; EG-NEXT: ADDC_UINT * T9.W, T4.Z, T6.W,
+; EG-NEXT: ADD_INT T2.X, PV.W, PS,
+; EG-NEXT: SETGE_UINT T2.Y, PV.Z, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T3.Z, PV.Z, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T8.W, PV.X, T8.W, PV.Y, BS:VEC_102/SCL_221
+; EG-NEXT: XOR_INT * T4.W, T1.Y, T5.W,
+; EG-NEXT: CNDE_INT T0.X, PS, PV.W, T7.W,
+; EG-NEXT: ADD_INT T1.Y, T1.Z, T0.W,
+; EG-NEXT: CNDE_INT T1.Z, PV.Y, T2.Z, PV.Z,
+; EG-NEXT: XOR_INT * T5.W, PV.X, T6.W, BS:VEC_102/SCL_221
+; EG-NEXT: ADD_INT * T6.W, T1.X, T1.W,
+; EG-NEXT: XOR_INT T0.Y, PV.W, T1.W,
+; EG-NEXT: CNDE_INT T0.Z, T5.W, T1.Z, T0.Z,
+; EG-NEXT: XOR_INT T6.W, T1.Y, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: LSHR * T7.W, T0.X, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T1.Y, PS, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.Z, PV.W, literal.x,
+; EG-NEXT: BIT_ALIGN_INT T8.W, T0.X, PV.Y, literal.x,
+; EG-NEXT: LSHR * T9.W, PV.Z, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T0.X, T7.W, T4.W,
+; EG-NEXT: SETGE_UINT * T2.Y, PV.W, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T0.Z, T9.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T10.W, T9.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT * T11.W, T1.Z, T3.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.Z, PV.W, PS,
+; EG-NEXT: SUB_INT T0.Z, T1.Z, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT T10.W, T1.Y, T0.X, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T8.W, T2.W,
+; EG-NEXT: SUB_INT * T0.X, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT * T1.Y, T8.W, T2.W,
+; EG-NEXT: SUB_INT T2.Z, T9.W, T5.W,
+; EG-NEXT: SUBB_UINT * T12.W, T1.Z, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T8.W, T10.W, T8.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T2.Z, T12.W, BS:VEC_201
+; EG-NEXT: SUB_INT T11.W, T0.X, T1.Y,
+; EG-NEXT: CNDE_INT * T12.W, T3.Y, T1.Z, T0.Z,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T9.W, T2.Z,
+; EG-NEXT: OR_INT * T9.W, T1.X, T2.Y,
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T12.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, T0.Z, T8.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, T0.X, T1.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: ALU clause starting at 132:
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 29(4.063766e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 29(4.063766e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 28(3.923636e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 28(3.923636e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 27(3.783506e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 27(3.783506e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 26(3.643376e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 26(3.643376e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT * T0.X, PV.W, T4.W,
+; EG-NEXT: ALU clause starting at 244:
+; EG-NEXT: SETGE_UINT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SETGE_UINT T0.Z, T8.W, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T10.W, T1.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, T1.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, T0.X, T1.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 25(3.503246e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 25(3.503246e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 22(3.082857e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL * T0.X, PS, 1,
+; EG-NEXT: ALU clause starting at 357:
+; EG-NEXT: BFE_UINT * T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: 22(3.082857e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, T11.W,
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, T2.Z,
+; EG-NEXT: OR_INT * T9.W, T1.X, T2.Y,
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, T0.X, T1.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 21(2.942727e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 21(2.942727e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 20(2.802597e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 20(2.802597e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 19(2.662467e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 19(2.662467e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: ALU clause starting at 469:
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 18(2.522337e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 18(2.522337e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 17(2.382207e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 17(2.382207e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 15(2.101948e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 15(2.101948e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT * T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ALU clause starting at 582:
+; EG-NEXT: BIT_ALIGN_INT T7.W, T0.Z, T9.W, literal.x,
+; EG-NEXT: OR_INT * T8.W, T0.X, T1.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, T1.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, T1.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 14(1.961818e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 14(1.961818e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 13(1.821688e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 13(1.821688e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 12(1.681558e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 12(1.681558e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT * T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: 11(1.541428e-44), 0(0.000000e+00)
+; EG-NEXT: ALU clause starting at 695:
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X,
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, T1.X, T2.Y,
+; EG-NEXT: 11(1.541428e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 10(1.401298e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 10(1.401298e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: ALU clause starting at 807:
+; EG-NEXT: CNDE_INT * T3.Y, T10.W, T11.W, T0.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, T0.X, T1.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 6(8.407791e-45), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 6(8.407791e-45), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 5(7.006492e-45), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 5(7.006492e-45), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00)
+; EG-NEXT: ALU clause starting at 920:
+; EG-NEXT: OR_INT * T9.W, T1.X, T2.Y,
+; EG-NEXT: SETGE_UINT T2.Y, PV.W, T2.W,
+; EG-NEXT: BIT_ALIGN_INT * T1.Z, T7.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T7.W, T0.Z, T10.W, literal.x,
+; EG-NEXT: OR_INT * T8.W, T0.X, T1.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, T1.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, T1.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T9.W, T10.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, literal.x, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T0.Z, T9.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T10.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T10.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T3.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT * T11.W, T9.W, T2.W,
+; EG-NEXT: SUBB_UINT * T0.X, T9.W, T2.W,
+; EG-NEXT: SUB_INT * T1.Y, T7.W, T4.W,
+; EG-NEXT: SUBB_UINT T2.Z, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T12.W, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, T10.W, T9.W, T11.W,
+; EG-NEXT: LSHL T1.X, PV.W, 1,
+; EG-NEXT: BFE_UINT T2.Y, T0.Y, 1, 1,
+; EG-NEXT: SUB_INT T2.Z, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T0.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: LSHL T0.X, PS, 1,
+; EG-NEXT: BFE_UINT T1.Y, T6.W, 1, 1,
+; EG-NEXT: CNDE_INT T0.Z, T10.W, T7.W, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, T3.Y, T1.Z, PV.Z,
+; EG-NEXT: OR_INT * T10.W, PV.X, PV.Y,
+; EG-NEXT: SETGE_UINT T2.Y, PS, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T9.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T8.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T0.X, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PS, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T5.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T5.W,
+; EG-NEXT: CNDE_INT T1.X, PV.W, PS, PV.Z,
+; EG-NEXT: CNDE_INT T1.Y, PV.X, PV.Y, T2.Y,
+; EG-NEXT: SUB_INT T0.Z, T10.W, T2.W,
+; EG-NEXT: SUBB_UINT * T9.W, T10.W, T2.W,
+; EG-NEXT: SUB_INT * T11.W, T7.W, T4.W,
+; EG-NEXT: SUB_INT T0.X, T8.W, T3.W,
+; EG-NEXT: SUBB_UINT T2.Y, T8.W, T3.W,
+; EG-NEXT: SUB_INT * T2.Z, T1.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T9.W, T11.W, T9.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T1.Y, T10.W, T0.Z,
+; EG-NEXT: ALU clause starting at 1033:
+; EG-NEXT: LSHL T2.X, T10.W, 1,
+; EG-NEXT: AND_INT T0.Y, T0.Y, 1, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T0.Z, T1.Y, T7.W, T9.W,
+; EG-NEXT: SUB_INT * T7.W, T2.Z, T2.Y, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T8.W, T1.X, T8.W, T0.X,
+; EG-NEXT: LSHL T0.X, PV.W, 1,
+; EG-NEXT: AND_INT T1.Y, T6.W, 1,
+; EG-NEXT: CNDE_INT T1.Z, T1.X, T1.Z, T7.W,
+; EG-NEXT: BIT_ALIGN_INT T6.W, T0.Z, T10.W, literal.x,
+; EG-NEXT: OR_INT * T7.W, T2.X, T0.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T1.X, PS, T2.W,
+; EG-NEXT: SETE_INT T0.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T8.W, PV.Z, T8.W, literal.x, BS:VEC_102/SCL_221
+; EG-NEXT: OR_INT * T9.W, PV.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T0.X, PS, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T1.Y, PV.W, T5.W,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T5.W,
+; EG-NEXT: SUBB_UINT T10.W, PS, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT * T5.W, PV.W, T5.W,
+; EG-NEXT: SUBB_UINT * T2.X, T7.W, T2.W,
+; EG-NEXT: SUB_INT * T2.Y, T6.W, T4.W,
+; EG-NEXT: SUB_INT * T2.Z, T9.W, T3.W,
+; EG-NEXT: SUB_INT T3.W, T5.W, T10.W,
+; EG-NEXT: CNDE_INT * T4.W, T1.Y, T1.Z, T0.X,
+; EG-NEXT: CNDE_INT T0.X, PS, T8.W, PV.W,
+; EG-NEXT: CNDE_INT * T1.Y, PS, T9.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T1.Z, T7.W, T2.W,
+; EG-NEXT: SUB_INT T2.W, T2.Y, T2.X,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T0.Z, T1.X,
+; EG-NEXT: CNDE_INT T0.Y, PS, T6.W, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T0.Z, PS, T7.W, PV.Z, BS:VEC_102/SCL_221
+; EG-NEXT: XOR_INT T2.W, T1.Y, T0.W,
+; EG-NEXT: XOR_INT * T3.W, T0.X, T0.W,
+; EG-NEXT: SUB_INT T1.Y, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUBB_UINT T1.Z, PV.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: XOR_INT T3.W, PV.Z, T1.W,
+; EG-NEXT: XOR_INT * T4.W, PV.Y, T1.W,
+; EG-NEXT: SUB_INT T0.Z, PS, T1.W,
+; EG-NEXT: SUBB_UINT T4.W, PV.W, T1.W,
+; EG-NEXT: SUB_INT * T5.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT T5.Y, PV.Z, PV.W,
+; EG-NEXT: SUB_INT * T5.Z, T2.W, T0.W,
+; EG-NEXT: SUB_INT T5.X, T3.W, T1.W,
+; EG-NEXT: LSHR * T0.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr <2 x i64>, ptr addrspace(1) %in, i64 1
%num = load <2 x i64>, ptr addrspace(1) %in
%den = load <2 x i64>, ptr addrspace(1) %den_ptr
@@ -101,6 +4731,126 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
}
define amdgpu_kernel void @srem_v2i64_4(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_v2i64_4:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v4, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_ashrrev_i32_e32 v5, 31, v1
+; GCN-NEXT: v_lshrrev_b32_e32 v5, 30, v5
+; GCN-NEXT: v_ashrrev_i32_e32 v6, 31, v3
+; GCN-NEXT: v_add_co_u32_e32 v5, vcc, v0, v5
+; GCN-NEXT: v_lshrrev_b32_e32 v6, 30, v6
+; GCN-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
+; GCN-NEXT: v_add_co_u32_e32 v6, vcc, v2, v6
+; GCN-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc
+; GCN-NEXT: v_and_b32_e32 v5, -4, v5
+; GCN-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v5
+; GCN-NEXT: v_and_b32_e32 v6, -4, v6
+; GCN-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v7, vcc
+; GCN-NEXT: v_sub_co_u32_e32 v2, vcc, v2, v6
+; GCN-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v8, vcc
+; GCN-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
+; GCN-NEXT: s_endpgm
+;
+; TAHITI-LABEL: srem_v2i64_4:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s7, 0xf000
+; TAHITI-NEXT: s_mov_b32 s6, -1
+; TAHITI-NEXT: s_mov_b32 s10, s6
+; TAHITI-NEXT: s_mov_b32 s11, s7
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s8, s2
+; TAHITI-NEXT: s_mov_b32 s9, s3
+; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; TAHITI-NEXT: s_mov_b32 s4, s0
+; TAHITI-NEXT: s_mov_b32 s5, s1
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; TAHITI-NEXT: v_lshrrev_b32_e32 v4, 30, v4
+; TAHITI-NEXT: v_ashrrev_i32_e32 v5, 31, v3
+; TAHITI-NEXT: v_add_i32_e32 v4, vcc, v0, v4
+; TAHITI-NEXT: v_lshrrev_b32_e32 v5, 30, v5
+; TAHITI-NEXT: v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; TAHITI-NEXT: v_add_i32_e32 v5, vcc, v2, v5
+; TAHITI-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; TAHITI-NEXT: v_and_b32_e32 v4, -4, v4
+; TAHITI-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
+; TAHITI-NEXT: v_and_b32_e32 v5, -4, v5
+; TAHITI-NEXT: v_subb_u32_e32 v1, vcc, v1, v6, vcc
+; TAHITI-NEXT: v_sub_i32_e32 v2, vcc, v2, v5
+; TAHITI-NEXT: v_subb_u32_e32 v3, vcc, v3, v7, vcc
+; TAHITI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; TAHITI-NEXT: s_endpgm
+;
+; TONGA-LABEL: srem_v2i64_4:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; TONGA-NEXT: v_mov_b32_e32 v4, s0
+; TONGA-NEXT: v_mov_b32_e32 v5, s1
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_ashrrev_i32_e32 v6, 31, v1
+; TONGA-NEXT: v_lshrrev_b32_e32 v6, 30, v6
+; TONGA-NEXT: v_ashrrev_i32_e32 v7, 31, v3
+; TONGA-NEXT: v_add_u32_e32 v6, vcc, v0, v6
+; TONGA-NEXT: v_lshrrev_b32_e32 v7, 30, v7
+; TONGA-NEXT: v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; TONGA-NEXT: v_add_u32_e32 v7, vcc, v2, v7
+; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v3, vcc
+; TONGA-NEXT: v_and_b32_e32 v6, -4, v6
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v6
+; TONGA-NEXT: v_and_b32_e32 v7, -4, v7
+; TONGA-NEXT: v_subb_u32_e32 v1, vcc, v1, v8, vcc
+; TONGA-NEXT: v_sub_u32_e32 v2, vcc, v2, v7
+; TONGA-NEXT: v_subb_u32_e32 v3, vcc, v3, v9, vcc
+; TONGA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; TONGA-NEXT: s_endpgm
+;
+; EG-LABEL: srem_v2i64_4:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 24, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T3.XYZW, T0.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: ASHR T0.W, T0.W, literal.x,
+; EG-NEXT: ASHR * T1.W, T0.Y, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: LSHR * T0.W, PV.W, literal.x,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T2.W, T0.Z, PV.W,
+; EG-NEXT: LSHR * T1.W, T1.W, literal.x,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T1.Z, T0.X, PS,
+; EG-NEXT: ADDC_UINT T0.W, T0.Z, T0.W,
+; EG-NEXT: AND_INT * T2.W, PV.W, literal.x,
+; EG-NEXT: -4(nan), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T0.Y, T0.Z, PS,
+; EG-NEXT: BFE_INT T2.Z, PV.W, 0.0, 1,
+; EG-NEXT: ADDC_UINT T0.W, T0.X, T1.W,
+; EG-NEXT: AND_INT * T1.W, PV.Z, literal.x,
+; EG-NEXT: -4(nan), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T1.Z, T0.X, PS,
+; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, 1,
+; EG-NEXT: SUB_INT * T3.W, PV.Z, PV.Y,
+; EG-NEXT: SUB_INT T3.Y, PV.W, PV.Z,
+; EG-NEXT: SUB_INT * T3.Z, T0.Z, T2.W,
+; EG-NEXT: SUB_INT T3.X, T0.X, T1.W,
+; EG-NEXT: LSHR * T0.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%num = load <2 x i64>, ptr addrspace(1) %in
%result = srem <2 x i64> %num, <i64 4, i64 4>
store <2 x i64> %result, ptr addrspace(1) %out
@@ -108,6 +4858,4020 @@ define amdgpu_kernel void @srem_v2i64_4(ptr addrspace(1) %out, ptr addrspace(1)
}
define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_v4i64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v8, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx4 v[10:13], v8, s[6:7] offset:32
+; GCN-NEXT: global_load_dwordx4 v[14:17], v8, s[6:7]
+; GCN-NEXT: global_load_dwordx4 v[0:3], v8, s[6:7] offset:48
+; GCN-NEXT: global_load_dwordx4 v[4:7], v8, s[6:7] offset:16
+; GCN-NEXT: s_waitcnt vmcnt(3)
+; GCN-NEXT: v_readfirstlane_b32 s7, v11
+; GCN-NEXT: v_readfirstlane_b32 s6, v10
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_readfirstlane_b32 s9, v15
+; GCN-NEXT: v_readfirstlane_b32 s8, v14
+; GCN-NEXT: s_or_b64 s[0:1], s[8:9], s[6:7]
+; GCN-NEXT: s_mov_b32 s0, 0
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_cbranch_scc0 .LBB12_13
+; GCN-NEXT: ; %bb.1:
+; GCN-NEXT: s_ashr_i32 s0, s7, 31
+; GCN-NEXT: s_add_u32 s2, s6, s0
+; GCN-NEXT: s_mov_b32 s1, s0
+; GCN-NEXT: s_addc_u32 s3, s7, s0
+; GCN-NEXT: s_xor_b64 s[12:13], s[2:3], s[0:1]
+; GCN-NEXT: v_cvt_f32_u32_e32 v8, s12
+; GCN-NEXT: v_cvt_f32_u32_e32 v9, s13
+; GCN-NEXT: s_sub_u32 s0, 0, s12
+; GCN-NEXT: s_subb_u32 s1, 0, s13
+; GCN-NEXT: v_madmk_f32 v8, v9, 0x4f800000, v8
+; GCN-NEXT: v_rcp_f32_e32 v8, v8
+; GCN-NEXT: v_mul_f32_e32 v8, 0x5f7ffffc, v8
+; GCN-NEXT: v_mul_f32_e32 v9, 0x2f800000, v8
+; GCN-NEXT: v_trunc_f32_e32 v9, v9
+; GCN-NEXT: v_madmk_f32 v8, v9, 0xcf800000, v8
+; GCN-NEXT: v_cvt_u32_f32_e32 v9, v9
+; GCN-NEXT: v_cvt_u32_f32_e32 v8, v8
+; GCN-NEXT: v_readfirstlane_b32 s2, v9
+; GCN-NEXT: v_readfirstlane_b32 s3, v8
+; GCN-NEXT: s_mul_i32 s7, s0, s2
+; GCN-NEXT: s_mul_hi_u32 s15, s0, s3
+; GCN-NEXT: s_mul_i32 s14, s1, s3
+; GCN-NEXT: s_add_i32 s7, s15, s7
+; GCN-NEXT: s_add_i32 s7, s7, s14
+; GCN-NEXT: s_mul_i32 s16, s0, s3
+; GCN-NEXT: s_mul_hi_u32 s14, s3, s7
+; GCN-NEXT: s_mul_i32 s15, s3, s7
+; GCN-NEXT: s_mul_hi_u32 s3, s3, s16
+; GCN-NEXT: s_add_u32 s3, s3, s15
+; GCN-NEXT: s_addc_u32 s14, 0, s14
+; GCN-NEXT: s_mul_hi_u32 s17, s2, s16
+; GCN-NEXT: s_mul_i32 s16, s2, s16
+; GCN-NEXT: s_add_u32 s3, s3, s16
+; GCN-NEXT: s_mul_hi_u32 s15, s2, s7
+; GCN-NEXT: s_addc_u32 s3, s14, s17
+; GCN-NEXT: s_addc_u32 s14, s15, 0
+; GCN-NEXT: s_mul_i32 s7, s2, s7
+; GCN-NEXT: s_add_u32 s3, s3, s7
+; GCN-NEXT: s_addc_u32 s7, 0, s14
+; GCN-NEXT: v_add_co_u32_e32 v8, vcc, s3, v8
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_addc_u32 s2, s2, s7
+; GCN-NEXT: v_readfirstlane_b32 s7, v8
+; GCN-NEXT: s_mul_i32 s3, s0, s2
+; GCN-NEXT: s_mul_hi_u32 s14, s0, s7
+; GCN-NEXT: s_add_i32 s3, s14, s3
+; GCN-NEXT: s_mul_i32 s1, s1, s7
+; GCN-NEXT: s_add_i32 s3, s3, s1
+; GCN-NEXT: s_mul_i32 s0, s0, s7
+; GCN-NEXT: s_mul_hi_u32 s14, s2, s0
+; GCN-NEXT: s_mul_i32 s15, s2, s0
+; GCN-NEXT: s_mul_i32 s17, s7, s3
+; GCN-NEXT: s_mul_hi_u32 s0, s7, s0
+; GCN-NEXT: s_mul_hi_u32 s16, s7, s3
+; GCN-NEXT: s_add_u32 s0, s0, s17
+; GCN-NEXT: s_addc_u32 s7, 0, s16
+; GCN-NEXT: s_add_u32 s0, s0, s15
+; GCN-NEXT: s_mul_hi_u32 s1, s2, s3
+; GCN-NEXT: s_addc_u32 s0, s7, s14
+; GCN-NEXT: s_addc_u32 s1, s1, 0
+; GCN-NEXT: s_mul_i32 s3, s2, s3
+; GCN-NEXT: s_add_u32 s0, s0, s3
+; GCN-NEXT: s_addc_u32 s1, 0, s1
+; GCN-NEXT: v_add_co_u32_e32 v8, vcc, s0, v8
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_addc_u32 s2, s2, s1
+; GCN-NEXT: s_ashr_i32 s14, s9, 31
+; GCN-NEXT: s_add_u32 s0, s8, s14
+; GCN-NEXT: s_mov_b32 s15, s14
+; GCN-NEXT: s_addc_u32 s1, s9, s14
+; GCN-NEXT: s_xor_b64 s[16:17], s[0:1], s[14:15]
+; GCN-NEXT: v_readfirstlane_b32 s3, v8
+; GCN-NEXT: s_mul_i32 s1, s16, s2
+; GCN-NEXT: s_mul_hi_u32 s7, s16, s3
+; GCN-NEXT: s_mul_hi_u32 s0, s16, s2
+; GCN-NEXT: s_add_u32 s1, s7, s1
+; GCN-NEXT: s_addc_u32 s0, 0, s0
+; GCN-NEXT: s_mul_hi_u32 s9, s17, s3
+; GCN-NEXT: s_mul_i32 s3, s17, s3
+; GCN-NEXT: s_add_u32 s1, s1, s3
+; GCN-NEXT: s_mul_hi_u32 s7, s17, s2
+; GCN-NEXT: s_addc_u32 s0, s0, s9
+; GCN-NEXT: s_addc_u32 s1, s7, 0
+; GCN-NEXT: s_mul_i32 s2, s17, s2
+; GCN-NEXT: s_add_u32 s0, s0, s2
+; GCN-NEXT: s_addc_u32 s1, 0, s1
+; GCN-NEXT: s_mul_i32 s1, s12, s1
+; GCN-NEXT: s_mul_hi_u32 s2, s12, s0
+; GCN-NEXT: s_add_i32 s1, s2, s1
+; GCN-NEXT: s_mul_i32 s2, s13, s0
+; GCN-NEXT: s_mul_i32 s0, s12, s0
+; GCN-NEXT: s_add_i32 s7, s1, s2
+; GCN-NEXT: v_mov_b32_e32 v8, s0
+; GCN-NEXT: s_sub_i32 s1, s17, s7
+; GCN-NEXT: v_sub_co_u32_e32 v8, vcc, s16, v8
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: s_subb_u32 s9, s1, s13
+; GCN-NEXT: v_subrev_co_u32_e64 v9, s[0:1], s12, v8
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_subb_u32 s15, s9, 0
+; GCN-NEXT: s_cmp_ge_u32 s15, s13
+; GCN-NEXT: s_cselect_b32 s16, -1, 0
+; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s12, v9
+; GCN-NEXT: s_cmp_eq_u32 s15, s13
+; GCN-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[2:3]
+; GCN-NEXT: v_mov_b32_e32 v11, s16
+; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: v_cndmask_b32_e64 v10, v11, v10, s[2:3]
+; GCN-NEXT: s_subb_u32 s2, s9, s13
+; GCN-NEXT: v_subrev_co_u32_e64 v11, s[0:1], s12, v9
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_subb_u32 s2, s2, 0
+; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v10
+; GCN-NEXT: v_cndmask_b32_e64 v9, v9, v11, s[0:1]
+; GCN-NEXT: v_mov_b32_e32 v10, s15
+; GCN-NEXT: v_mov_b32_e32 v11, s2
+; GCN-NEXT: s_cmp_lg_u64 vcc, 0
+; GCN-NEXT: v_cndmask_b32_e64 v10, v10, v11, s[0:1]
+; GCN-NEXT: s_subb_u32 s0, s17, s7
+; GCN-NEXT: s_cmp_ge_u32 s0, s13
+; GCN-NEXT: s_cselect_b32 s1, -1, 0
+; GCN-NEXT: v_cmp_le_u32_e32 vcc, s12, v8
+; GCN-NEXT: s_cmp_eq_u32 s0, s13
+; GCN-NEXT: v_cndmask_b32_e64 v11, 0, -1, vcc
+; GCN-NEXT: v_mov_b32_e32 v14, s1
+; GCN-NEXT: s_cselect_b64 vcc, -1, 0
+; GCN-NEXT: v_cndmask_b32_e32 v11, v14, v11, vcc
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; GCN-NEXT: v_mov_b32_e32 v14, s0
+; GCN-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v10, v14, v10, vcc
+; GCN-NEXT: v_xor_b32_e32 v8, s14, v8
+; GCN-NEXT: v_xor_b32_e32 v9, s14, v10
+; GCN-NEXT: v_mov_b32_e32 v10, s14
+; GCN-NEXT: v_subrev_co_u32_e32 v8, vcc, s14, v8
+; GCN-NEXT: v_subb_co_u32_e32 v9, vcc, v9, v10, vcc
+; GCN-NEXT: s_cbranch_execnz .LBB12_3
+; GCN-NEXT: .LBB12_2:
+; GCN-NEXT: v_cvt_f32_u32_e32 v8, s6
+; GCN-NEXT: s_sub_i32 s0, 0, s6
+; GCN-NEXT: s_mov_b32 s1, 0
+; GCN-NEXT: v_rcp_iflag_f32_e32 v8, v8
+; GCN-NEXT: v_mul_f32_e32 v8, 0x4f7ffffe, v8
+; GCN-NEXT: v_cvt_u32_f32_e32 v8, v8
+; GCN-NEXT: v_readfirstlane_b32 s2, v8
+; GCN-NEXT: s_mul_i32 s0, s0, s2
+; GCN-NEXT: s_mul_hi_u32 s0, s2, s0
+; GCN-NEXT: s_add_i32 s2, s2, s0
+; GCN-NEXT: s_mul_hi_u32 s0, s8, s2
+; GCN-NEXT: s_mul_i32 s0, s0, s6
+; GCN-NEXT: s_sub_i32 s0, s8, s0
+; GCN-NEXT: s_sub_i32 s2, s0, s6
+; GCN-NEXT: s_cmp_ge_u32 s0, s6
+; GCN-NEXT: s_cselect_b32 s0, s2, s0
+; GCN-NEXT: s_sub_i32 s2, s0, s6
+; GCN-NEXT: s_cmp_ge_u32 s0, s6
+; GCN-NEXT: s_cselect_b32 s0, s2, s0
+; GCN-NEXT: v_mov_b32_e32 v9, s1
+; GCN-NEXT: v_mov_b32_e32 v8, s0
+; GCN-NEXT: .LBB12_3:
+; GCN-NEXT: v_or_b32_e32 v11, v17, v13
+; GCN-NEXT: v_mov_b32_e32 v10, 0
+; GCN-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; GCN-NEXT: s_cbranch_vccz .LBB12_14
+; GCN-NEXT: ; %bb.4:
+; GCN-NEXT: v_ashrrev_i32_e32 v10, 31, v13
+; GCN-NEXT: v_add_co_u32_e32 v11, vcc, v12, v10
+; GCN-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v10, vcc
+; GCN-NEXT: v_xor_b32_e32 v11, v11, v10
+; GCN-NEXT: v_xor_b32_e32 v10, v13, v10
+; GCN-NEXT: v_cvt_f32_u32_e32 v13, v11
+; GCN-NEXT: v_cvt_f32_u32_e32 v14, v10
+; GCN-NEXT: v_sub_co_u32_e32 v15, vcc, 0, v11
+; GCN-NEXT: v_subb_co_u32_e32 v18, vcc, 0, v10, vcc
+; GCN-NEXT: v_madmk_f32 v13, v14, 0x4f800000, v13
+; GCN-NEXT: v_rcp_f32_e32 v13, v13
+; GCN-NEXT: v_mul_f32_e32 v13, 0x5f7ffffc, v13
+; GCN-NEXT: v_mul_f32_e32 v14, 0x2f800000, v13
+; GCN-NEXT: v_trunc_f32_e32 v14, v14
+; GCN-NEXT: v_madmk_f32 v13, v14, 0xcf800000, v13
+; GCN-NEXT: v_cvt_u32_f32_e32 v14, v14
+; GCN-NEXT: v_cvt_u32_f32_e32 v13, v13
+; GCN-NEXT: v_mul_lo_u32 v20, v15, v14
+; GCN-NEXT: v_mul_hi_u32 v19, v15, v13
+; GCN-NEXT: v_mul_lo_u32 v21, v18, v13
+; GCN-NEXT: v_mul_lo_u32 v22, v15, v13
+; GCN-NEXT: v_add_u32_e32 v19, v19, v20
+; GCN-NEXT: v_add_u32_e32 v19, v19, v21
+; GCN-NEXT: v_mul_lo_u32 v20, v13, v19
+; GCN-NEXT: v_mul_hi_u32 v21, v13, v22
+; GCN-NEXT: v_mul_hi_u32 v23, v13, v19
+; GCN-NEXT: v_mul_hi_u32 v24, v14, v19
+; GCN-NEXT: v_mul_lo_u32 v19, v14, v19
+; GCN-NEXT: v_add_co_u32_e32 v20, vcc, v21, v20
+; GCN-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v23, vcc
+; GCN-NEXT: v_mul_lo_u32 v23, v14, v22
+; GCN-NEXT: v_mul_hi_u32 v22, v14, v22
+; GCN-NEXT: v_add_co_u32_e32 v20, vcc, v20, v23
+; GCN-NEXT: v_addc_co_u32_e32 v20, vcc, v21, v22, vcc
+; GCN-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v24, vcc
+; GCN-NEXT: v_add_co_u32_e32 v19, vcc, v20, v19
+; GCN-NEXT: v_addc_co_u32_e32 v20, vcc, 0, v21, vcc
+; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v19
+; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v20, vcc
+; GCN-NEXT: v_mul_lo_u32 v19, v15, v14
+; GCN-NEXT: v_mul_hi_u32 v20, v15, v13
+; GCN-NEXT: v_mul_lo_u32 v18, v18, v13
+; GCN-NEXT: v_mul_lo_u32 v15, v15, v13
+; GCN-NEXT: v_add_u32_e32 v19, v20, v19
+; GCN-NEXT: v_add_u32_e32 v18, v19, v18
+; GCN-NEXT: v_mul_lo_u32 v21, v13, v18
+; GCN-NEXT: v_mul_hi_u32 v22, v13, v15
+; GCN-NEXT: v_mul_hi_u32 v23, v13, v18
+; GCN-NEXT: v_mul_hi_u32 v20, v14, v15
+; GCN-NEXT: v_mul_lo_u32 v15, v14, v15
+; GCN-NEXT: v_mul_hi_u32 v19, v14, v18
+; GCN-NEXT: v_add_co_u32_e32 v21, vcc, v22, v21
+; GCN-NEXT: v_addc_co_u32_e32 v22, vcc, 0, v23, vcc
+; GCN-NEXT: v_mul_lo_u32 v18, v14, v18
+; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v21, v15
+; GCN-NEXT: v_addc_co_u32_e32 v15, vcc, v22, v20, vcc
+; GCN-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v19, vcc
+; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v15, v18
+; GCN-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v19, vcc
+; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v15
+; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v18, vcc
+; GCN-NEXT: v_ashrrev_i32_e32 v15, 31, v17
+; GCN-NEXT: v_add_co_u32_e32 v18, vcc, v16, v15
+; GCN-NEXT: v_xor_b32_e32 v18, v18, v15
+; GCN-NEXT: v_mul_lo_u32 v19, v18, v14
+; GCN-NEXT: v_mul_hi_u32 v20, v18, v13
+; GCN-NEXT: v_mul_hi_u32 v21, v18, v14
+; GCN-NEXT: v_addc_co_u32_e32 v17, vcc, v17, v15, vcc
+; GCN-NEXT: v_xor_b32_e32 v17, v17, v15
+; GCN-NEXT: v_add_co_u32_e32 v19, vcc, v20, v19
+; GCN-NEXT: v_addc_co_u32_e32 v20, vcc, 0, v21, vcc
+; GCN-NEXT: v_mul_lo_u32 v21, v17, v13
+; GCN-NEXT: v_mul_hi_u32 v13, v17, v13
+; GCN-NEXT: v_mul_hi_u32 v22, v17, v14
+; GCN-NEXT: v_mul_lo_u32 v14, v17, v14
+; GCN-NEXT: v_add_co_u32_e32 v19, vcc, v19, v21
+; GCN-NEXT: v_addc_co_u32_e32 v13, vcc, v20, v13, vcc
+; GCN-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v22, vcc
+; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v14
+; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, 0, v19, vcc
+; GCN-NEXT: v_mul_lo_u32 v14, v11, v14
+; GCN-NEXT: v_mul_hi_u32 v19, v11, v13
+; GCN-NEXT: v_mul_lo_u32 v20, v10, v13
+; GCN-NEXT: v_mul_lo_u32 v13, v11, v13
+; GCN-NEXT: v_add_u32_e32 v14, v19, v14
+; GCN-NEXT: v_add_u32_e32 v14, v14, v20
+; GCN-NEXT: v_sub_u32_e32 v19, v17, v14
+; GCN-NEXT: v_sub_co_u32_e32 v13, vcc, v18, v13
+; GCN-NEXT: v_subb_co_u32_e64 v18, s[0:1], v19, v10, vcc
+; GCN-NEXT: v_sub_co_u32_e64 v19, s[0:1], v13, v11
+; GCN-NEXT: v_subbrev_co_u32_e64 v20, s[2:3], 0, v18, s[0:1]
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v20, v10
+; GCN-NEXT: v_cndmask_b32_e64 v21, 0, -1, s[2:3]
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v19, v11
+; GCN-NEXT: v_subb_co_u32_e32 v14, vcc, v17, v14, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v22, 0, -1, s[2:3]
+; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], v20, v10
+; GCN-NEXT: v_subb_co_u32_e64 v18, s[0:1], v18, v10, s[0:1]
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v14, v10
+; GCN-NEXT: v_cndmask_b32_e64 v21, v21, v22, s[2:3]
+; GCN-NEXT: v_sub_co_u32_e64 v22, s[0:1], v19, v11
+; GCN-NEXT: v_cndmask_b32_e64 v17, 0, -1, vcc
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v13, v11
+; GCN-NEXT: v_subbrev_co_u32_e64 v18, s[0:1], 0, v18, s[0:1]
+; GCN-NEXT: v_cndmask_b32_e64 v11, 0, -1, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v14, v10
+; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v21
+; GCN-NEXT: v_cndmask_b32_e32 v10, v17, v11, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v19, v19, v22, s[0:1]
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GCN-NEXT: v_cndmask_b32_e64 v18, v20, v18, s[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v11, v13, v19, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v10, v14, v18, vcc
+; GCN-NEXT: v_xor_b32_e32 v11, v11, v15
+; GCN-NEXT: v_xor_b32_e32 v13, v10, v15
+; GCN-NEXT: v_sub_co_u32_e32 v10, vcc, v11, v15
+; GCN-NEXT: v_subb_co_u32_e32 v11, vcc, v13, v15, vcc
+; GCN-NEXT: s_cbranch_execnz .LBB12_6
+; GCN-NEXT: .LBB12_5:
+; GCN-NEXT: v_cvt_f32_u32_e32 v10, v12
+; GCN-NEXT: v_sub_u32_e32 v11, 0, v12
+; GCN-NEXT: v_rcp_iflag_f32_e32 v10, v10
+; GCN-NEXT: v_mul_f32_e32 v10, 0x4f7ffffe, v10
+; GCN-NEXT: v_cvt_u32_f32_e32 v10, v10
+; GCN-NEXT: v_mul_lo_u32 v11, v11, v10
+; GCN-NEXT: v_mul_hi_u32 v11, v10, v11
+; GCN-NEXT: v_add_u32_e32 v10, v10, v11
+; GCN-NEXT: v_mul_hi_u32 v10, v16, v10
+; GCN-NEXT: v_mul_lo_u32 v10, v10, v12
+; GCN-NEXT: v_sub_u32_e32 v10, v16, v10
+; GCN-NEXT: v_sub_u32_e32 v11, v10, v12
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12
+; GCN-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
+; GCN-NEXT: v_sub_u32_e32 v11, v10, v12
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12
+; GCN-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
+; GCN-NEXT: v_mov_b32_e32 v11, 0
+; GCN-NEXT: .LBB12_6:
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_or_b32_e32 v13, v5, v1
+; GCN-NEXT: v_mov_b32_e32 v12, 0
+; GCN-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
+; GCN-NEXT: s_cbranch_vccz .LBB12_15
+; GCN-NEXT: ; %bb.7:
+; GCN-NEXT: v_ashrrev_i32_e32 v13, 31, v1
+; GCN-NEXT: v_add_co_u32_e32 v12, vcc, v0, v13
+; GCN-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-NEXT: v_xor_b32_e32 v12, v12, v13
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v13
+; GCN-NEXT: v_cvt_f32_u32_e32 v13, v12
+; GCN-NEXT: v_cvt_f32_u32_e32 v14, v1
+; GCN-NEXT: v_sub_co_u32_e32 v15, vcc, 0, v12
+; GCN-NEXT: v_subb_co_u32_e32 v16, vcc, 0, v1, vcc
+; GCN-NEXT: v_madmk_f32 v13, v14, 0x4f800000, v13
+; GCN-NEXT: v_rcp_f32_e32 v13, v13
+; GCN-NEXT: v_mul_f32_e32 v13, 0x5f7ffffc, v13
+; GCN-NEXT: v_mul_f32_e32 v14, 0x2f800000, v13
+; GCN-NEXT: v_trunc_f32_e32 v14, v14
+; GCN-NEXT: v_madmk_f32 v13, v14, 0xcf800000, v13
+; GCN-NEXT: v_cvt_u32_f32_e32 v14, v14
+; GCN-NEXT: v_cvt_u32_f32_e32 v13, v13
+; GCN-NEXT: v_mul_lo_u32 v18, v15, v14
+; GCN-NEXT: v_mul_hi_u32 v17, v15, v13
+; GCN-NEXT: v_mul_lo_u32 v19, v16, v13
+; GCN-NEXT: v_mul_lo_u32 v20, v15, v13
+; GCN-NEXT: v_add_u32_e32 v17, v17, v18
+; GCN-NEXT: v_add_u32_e32 v17, v17, v19
+; GCN-NEXT: v_mul_lo_u32 v18, v13, v17
+; GCN-NEXT: v_mul_hi_u32 v19, v13, v20
+; GCN-NEXT: v_mul_hi_u32 v21, v13, v17
+; GCN-NEXT: v_mul_hi_u32 v22, v14, v17
+; GCN-NEXT: v_mul_lo_u32 v17, v14, v17
+; GCN-NEXT: v_add_co_u32_e32 v18, vcc, v19, v18
+; GCN-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v21, vcc
+; GCN-NEXT: v_mul_lo_u32 v21, v14, v20
+; GCN-NEXT: v_mul_hi_u32 v20, v14, v20
+; GCN-NEXT: v_add_co_u32_e32 v18, vcc, v18, v21
+; GCN-NEXT: v_addc_co_u32_e32 v18, vcc, v19, v20, vcc
+; GCN-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v22, vcc
+; GCN-NEXT: v_add_co_u32_e32 v17, vcc, v18, v17
+; GCN-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v19, vcc
+; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v17
+; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v18, vcc
+; GCN-NEXT: v_mul_lo_u32 v17, v15, v14
+; GCN-NEXT: v_mul_hi_u32 v18, v15, v13
+; GCN-NEXT: v_mul_lo_u32 v16, v16, v13
+; GCN-NEXT: v_mul_lo_u32 v15, v15, v13
+; GCN-NEXT: v_add_u32_e32 v17, v18, v17
+; GCN-NEXT: v_add_u32_e32 v16, v17, v16
+; GCN-NEXT: v_mul_lo_u32 v19, v13, v16
+; GCN-NEXT: v_mul_hi_u32 v20, v13, v15
+; GCN-NEXT: v_mul_hi_u32 v21, v13, v16
+; GCN-NEXT: v_mul_hi_u32 v18, v14, v15
+; GCN-NEXT: v_mul_lo_u32 v15, v14, v15
+; GCN-NEXT: v_mul_hi_u32 v17, v14, v16
+; GCN-NEXT: v_add_co_u32_e32 v19, vcc, v20, v19
+; GCN-NEXT: v_addc_co_u32_e32 v20, vcc, 0, v21, vcc
+; GCN-NEXT: v_mul_lo_u32 v16, v14, v16
+; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v19, v15
+; GCN-NEXT: v_addc_co_u32_e32 v15, vcc, v20, v18, vcc
+; GCN-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v17, vcc
+; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v15, v16
+; GCN-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v17, vcc
+; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v15
+; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v16, vcc
+; GCN-NEXT: v_ashrrev_i32_e32 v15, 31, v5
+; GCN-NEXT: v_add_co_u32_e32 v16, vcc, v4, v15
+; GCN-NEXT: v_xor_b32_e32 v16, v16, v15
+; GCN-NEXT: v_mul_lo_u32 v17, v16, v14
+; GCN-NEXT: v_mul_hi_u32 v18, v16, v13
+; GCN-NEXT: v_mul_hi_u32 v19, v16, v14
+; GCN-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v15, vcc
+; GCN-NEXT: v_xor_b32_e32 v5, v5, v15
+; GCN-NEXT: v_add_co_u32_e32 v17, vcc, v18, v17
+; GCN-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v19, vcc
+; GCN-NEXT: v_mul_lo_u32 v19, v5, v13
+; GCN-NEXT: v_mul_hi_u32 v13, v5, v13
+; GCN-NEXT: v_mul_hi_u32 v20, v5, v14
+; GCN-NEXT: v_mul_lo_u32 v14, v5, v14
+; GCN-NEXT: v_add_co_u32_e32 v17, vcc, v17, v19
+; GCN-NEXT: v_addc_co_u32_e32 v13, vcc, v18, v13, vcc
+; GCN-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v20, vcc
+; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v14
+; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, 0, v17, vcc
+; GCN-NEXT: v_mul_lo_u32 v14, v12, v14
+; GCN-NEXT: v_mul_hi_u32 v17, v12, v13
+; GCN-NEXT: v_mul_lo_u32 v18, v1, v13
+; GCN-NEXT: v_mul_lo_u32 v13, v12, v13
+; GCN-NEXT: v_add_u32_e32 v14, v17, v14
+; GCN-NEXT: v_add_u32_e32 v14, v14, v18
+; GCN-NEXT: v_sub_u32_e32 v17, v5, v14
+; GCN-NEXT: v_sub_co_u32_e32 v13, vcc, v16, v13
+; GCN-NEXT: v_subb_co_u32_e64 v16, s[0:1], v17, v1, vcc
+; GCN-NEXT: v_sub_co_u32_e64 v17, s[0:1], v13, v12
+; GCN-NEXT: v_subbrev_co_u32_e64 v18, s[2:3], 0, v16, s[0:1]
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v18, v1
+; GCN-NEXT: v_cndmask_b32_e64 v19, 0, -1, s[2:3]
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v17, v12
+; GCN-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v14, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v20, 0, -1, s[2:3]
+; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], v18, v1
+; GCN-NEXT: v_subb_co_u32_e64 v16, s[0:1], v16, v1, s[0:1]
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v5, v1
+; GCN-NEXT: v_cndmask_b32_e64 v19, v19, v20, s[2:3]
+; GCN-NEXT: v_sub_co_u32_e64 v20, s[0:1], v17, v12
+; GCN-NEXT: v_cndmask_b32_e64 v14, 0, -1, vcc
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v13, v12
+; GCN-NEXT: v_subbrev_co_u32_e64 v16, s[0:1], 0, v16, s[0:1]
+; GCN-NEXT: v_cndmask_b32_e64 v12, 0, -1, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
+; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v19
+; GCN-NEXT: v_cndmask_b32_e32 v1, v14, v12, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v17, v17, v20, s[0:1]
+; GCN-NEXT: v_cndmask_b32_e64 v16, v18, v16, s[0:1]
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_cndmask_b32_e32 v1, v5, v16, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v5, v13, v17, vcc
+; GCN-NEXT: v_xor_b32_e32 v5, v5, v15
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v15
+; GCN-NEXT: v_sub_co_u32_e32 v12, vcc, v5, v15
+; GCN-NEXT: v_subb_co_u32_e32 v13, vcc, v1, v15, vcc
+; GCN-NEXT: s_cbranch_execnz .LBB12_9
+; GCN-NEXT: .LBB12_8:
+; GCN-NEXT: v_cvt_f32_u32_e32 v1, v0
+; GCN-NEXT: v_sub_u32_e32 v5, 0, v0
+; GCN-NEXT: v_mov_b32_e32 v13, 0
+; GCN-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; GCN-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT: v_mul_lo_u32 v5, v5, v1
+; GCN-NEXT: v_mul_hi_u32 v5, v1, v5
+; GCN-NEXT: v_add_u32_e32 v1, v1, v5
+; GCN-NEXT: v_mul_hi_u32 v1, v4, v1
+; GCN-NEXT: v_mul_lo_u32 v1, v1, v0
+; GCN-NEXT: v_sub_u32_e32 v1, v4, v1
+; GCN-NEXT: v_sub_u32_e32 v4, v1, v0
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
+; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; GCN-NEXT: v_sub_u32_e32 v4, v1, v0
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
+; GCN-NEXT: v_cndmask_b32_e32 v12, v1, v4, vcc
+; GCN-NEXT: .LBB12_9:
+; GCN-NEXT: v_or_b32_e32 v1, v7, v3
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: s_cbranch_vccz .LBB12_16
+; GCN-NEXT: ; %bb.10:
+; GCN-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GCN-NEXT: v_add_co_u32_e32 v1, vcc, v2, v0
+; GCN-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v0, vcc
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v0
+; GCN-NEXT: v_xor_b32_e32 v0, v3, v0
+; GCN-NEXT: v_cvt_f32_u32_e32 v3, v1
+; GCN-NEXT: v_cvt_f32_u32_e32 v4, v0
+; GCN-NEXT: v_sub_co_u32_e32 v5, vcc, 0, v1
+; GCN-NEXT: v_subb_co_u32_e32 v14, vcc, 0, v0, vcc
+; GCN-NEXT: v_madmk_f32 v3, v4, 0x4f800000, v3
+; GCN-NEXT: v_rcp_f32_e32 v3, v3
+; GCN-NEXT: v_mul_f32_e32 v3, 0x5f7ffffc, v3
+; GCN-NEXT: v_mul_f32_e32 v4, 0x2f800000, v3
+; GCN-NEXT: v_trunc_f32_e32 v4, v4
+; GCN-NEXT: v_madmk_f32 v3, v4, 0xcf800000, v3
+; GCN-NEXT: v_cvt_u32_f32_e32 v4, v4
+; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3
+; GCN-NEXT: v_mul_lo_u32 v16, v5, v4
+; GCN-NEXT: v_mul_hi_u32 v15, v5, v3
+; GCN-NEXT: v_mul_lo_u32 v17, v14, v3
+; GCN-NEXT: v_mul_lo_u32 v18, v5, v3
+; GCN-NEXT: v_add_u32_e32 v15, v15, v16
+; GCN-NEXT: v_add_u32_e32 v15, v15, v17
+; GCN-NEXT: v_mul_lo_u32 v16, v3, v15
+; GCN-NEXT: v_mul_hi_u32 v17, v3, v18
+; GCN-NEXT: v_mul_hi_u32 v19, v3, v15
+; GCN-NEXT: v_mul_hi_u32 v20, v4, v15
+; GCN-NEXT: v_mul_lo_u32 v15, v4, v15
+; GCN-NEXT: v_add_co_u32_e32 v16, vcc, v17, v16
+; GCN-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v19, vcc
+; GCN-NEXT: v_mul_lo_u32 v19, v4, v18
+; GCN-NEXT: v_mul_hi_u32 v18, v4, v18
+; GCN-NEXT: v_add_co_u32_e32 v16, vcc, v16, v19
+; GCN-NEXT: v_addc_co_u32_e32 v16, vcc, v17, v18, vcc
+; GCN-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v20, vcc
+; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v16, v15
+; GCN-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v17, vcc
+; GCN-NEXT: v_add_co_u32_e32 v3, vcc, v3, v15
+; GCN-NEXT: v_addc_co_u32_e32 v4, vcc, v4, v16, vcc
+; GCN-NEXT: v_mul_lo_u32 v15, v5, v4
+; GCN-NEXT: v_mul_hi_u32 v16, v5, v3
+; GCN-NEXT: v_mul_lo_u32 v14, v14, v3
+; GCN-NEXT: v_mul_lo_u32 v5, v5, v3
+; GCN-NEXT: v_add_u32_e32 v15, v16, v15
+; GCN-NEXT: v_add_u32_e32 v14, v15, v14
+; GCN-NEXT: v_mul_lo_u32 v17, v3, v14
+; GCN-NEXT: v_mul_hi_u32 v18, v3, v5
+; GCN-NEXT: v_mul_hi_u32 v19, v3, v14
+; GCN-NEXT: v_mul_hi_u32 v16, v4, v5
+; GCN-NEXT: v_mul_lo_u32 v5, v4, v5
+; GCN-NEXT: v_mul_hi_u32 v15, v4, v14
+; GCN-NEXT: v_add_co_u32_e32 v17, vcc, v18, v17
+; GCN-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v19, vcc
+; GCN-NEXT: v_mul_lo_u32 v14, v4, v14
+; GCN-NEXT: v_add_co_u32_e32 v5, vcc, v17, v5
+; GCN-NEXT: v_addc_co_u32_e32 v5, vcc, v18, v16, vcc
+; GCN-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-NEXT: v_add_co_u32_e32 v5, vcc, v5, v14
+; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, 0, v15, vcc
+; GCN-NEXT: v_add_co_u32_e32 v3, vcc, v3, v5
+; GCN-NEXT: v_addc_co_u32_e32 v4, vcc, v4, v14, vcc
+; GCN-NEXT: v_ashrrev_i32_e32 v5, 31, v7
+; GCN-NEXT: v_add_co_u32_e32 v14, vcc, v6, v5
+; GCN-NEXT: v_xor_b32_e32 v14, v14, v5
+; GCN-NEXT: v_mul_lo_u32 v15, v14, v4
+; GCN-NEXT: v_mul_hi_u32 v16, v14, v3
+; GCN-NEXT: v_mul_hi_u32 v17, v14, v4
+; GCN-NEXT: v_addc_co_u32_e32 v7, vcc, v7, v5, vcc
+; GCN-NEXT: v_xor_b32_e32 v7, v7, v5
+; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v16, v15
+; GCN-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v17, vcc
+; GCN-NEXT: v_mul_lo_u32 v17, v7, v3
+; GCN-NEXT: v_mul_hi_u32 v3, v7, v3
+; GCN-NEXT: v_mul_hi_u32 v18, v7, v4
+; GCN-NEXT: v_mul_lo_u32 v4, v7, v4
+; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v15, v17
+; GCN-NEXT: v_addc_co_u32_e32 v3, vcc, v16, v3, vcc
+; GCN-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v18, vcc
+; GCN-NEXT: v_add_co_u32_e32 v3, vcc, v3, v4
+; GCN-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v15, vcc
+; GCN-NEXT: v_mul_lo_u32 v4, v1, v4
+; GCN-NEXT: v_mul_hi_u32 v15, v1, v3
+; GCN-NEXT: v_mul_lo_u32 v16, v0, v3
+; GCN-NEXT: v_mul_lo_u32 v3, v1, v3
+; GCN-NEXT: v_add_u32_e32 v4, v15, v4
+; GCN-NEXT: v_add_u32_e32 v4, v4, v16
+; GCN-NEXT: v_sub_u32_e32 v15, v7, v4
+; GCN-NEXT: v_sub_co_u32_e32 v3, vcc, v14, v3
+; GCN-NEXT: v_subb_co_u32_e64 v14, s[0:1], v15, v0, vcc
+; GCN-NEXT: v_sub_co_u32_e64 v15, s[0:1], v3, v1
+; GCN-NEXT: v_subbrev_co_u32_e64 v16, s[2:3], 0, v14, s[0:1]
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v16, v0
+; GCN-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[2:3]
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v15, v1
+; GCN-NEXT: v_subb_co_u32_e32 v4, vcc, v7, v4, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v18, 0, -1, s[2:3]
+; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], v16, v0
+; GCN-NEXT: v_subb_co_u32_e64 v14, s[0:1], v14, v0, s[0:1]
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v4, v0
+; GCN-NEXT: v_cndmask_b32_e64 v17, v17, v18, s[2:3]
+; GCN-NEXT: v_sub_co_u32_e64 v18, s[0:1], v15, v1
+; GCN-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v3, v1
+; GCN-NEXT: v_subbrev_co_u32_e64 v14, s[0:1], 0, v14, s[0:1]
+; GCN-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v4, v0
+; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v17
+; GCN-NEXT: v_cndmask_b32_e32 v0, v7, v1, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v15, v15, v18, s[0:1]
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v14, v16, v14, s[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v15, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v14, vcc
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v5
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v5
+; GCN-NEXT: v_sub_co_u32_e32 v14, vcc, v1, v5
+; GCN-NEXT: v_subb_co_u32_e32 v15, vcc, v0, v5, vcc
+; GCN-NEXT: s_cbranch_execnz .LBB12_12
+; GCN-NEXT: .LBB12_11:
+; GCN-NEXT: v_cvt_f32_u32_e32 v0, v2
+; GCN-NEXT: v_sub_u32_e32 v1, 0, v2
+; GCN-NEXT: v_mov_b32_e32 v15, 0
+; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT: v_mul_lo_u32 v1, v1, v0
+; GCN-NEXT: v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT: v_add_u32_e32 v0, v0, v1
+; GCN-NEXT: v_mul_hi_u32 v0, v6, v0
+; GCN-NEXT: v_mul_lo_u32 v0, v0, v2
+; GCN-NEXT: v_sub_u32_e32 v0, v6, v0
+; GCN-NEXT: v_sub_u32_e32 v1, v0, v2
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT: v_sub_u32_e32 v1, v0, v2
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; GCN-NEXT: v_cndmask_b32_e32 v14, v0, v1, vcc
+; GCN-NEXT: .LBB12_12:
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: global_store_dwordx4 v0, v[12:15], s[4:5] offset:16
+; GCN-NEXT: global_store_dwordx4 v0, v[8:11], s[4:5]
+; GCN-NEXT: s_endpgm
+; GCN-NEXT: .LBB12_13:
+; GCN-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GCN-NEXT: s_branch .LBB12_2
+; GCN-NEXT: .LBB12_14:
+; GCN-NEXT: s_branch .LBB12_5
+; GCN-NEXT: .LBB12_15:
+; GCN-NEXT: ; implicit-def: $vgpr12_vgpr13
+; GCN-NEXT: s_branch .LBB12_8
+; GCN-NEXT: .LBB12_16:
+; GCN-NEXT: s_branch .LBB12_11
+;
+; TAHITI-LABEL: srem_v4i64:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s3, 0xf000
+; TAHITI-NEXT: s_mov_b32 s2, -1
+; TAHITI-NEXT: v_mov_b32_e32 v8, 0
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s0, s6
+; TAHITI-NEXT: s_mov_b32 s1, s7
+; TAHITI-NEXT: buffer_load_dwordx4 v[10:13], off, s[0:3], 0 offset:32
+; TAHITI-NEXT: buffer_load_dwordx4 v[14:17], off, s[0:3], 0
+; TAHITI-NEXT: buffer_load_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; TAHITI-NEXT: s_waitcnt vmcnt(2)
+; TAHITI-NEXT: v_or_b32_e32 v9, v15, v11
+; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_13
+; TAHITI-NEXT: ; %bb.1:
+; TAHITI-NEXT: v_ashrrev_i32_e32 v8, 31, v11
+; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v10, v8
+; TAHITI-NEXT: v_addc_u32_e32 v11, vcc, v11, v8, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v9, v9, v8
+; TAHITI-NEXT: v_xor_b32_e32 v8, v11, v8
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v11, v9
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v18, v8
+; TAHITI-NEXT: v_sub_i32_e32 v19, vcc, 0, v9
+; TAHITI-NEXT: v_subb_u32_e32 v20, vcc, 0, v8, vcc
+; TAHITI-NEXT: v_madmk_f32 v11, v18, 0x4f800000, v11
+; TAHITI-NEXT: v_rcp_f32_e32 v11, v11
+; TAHITI-NEXT: v_mul_f32_e32 v11, 0x5f7ffffc, v11
+; TAHITI-NEXT: v_mul_f32_e32 v18, 0x2f800000, v11
+; TAHITI-NEXT: v_trunc_f32_e32 v18, v18
+; TAHITI-NEXT: v_madmk_f32 v11, v18, 0xcf800000, v11
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v18, v18
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v11, v11
+; TAHITI-NEXT: v_mul_lo_u32 v22, v19, v18
+; TAHITI-NEXT: v_mul_hi_u32 v21, v19, v11
+; TAHITI-NEXT: v_mul_lo_u32 v23, v20, v11
+; TAHITI-NEXT: v_add_i32_e32 v21, vcc, v22, v21
+; TAHITI-NEXT: v_mul_lo_u32 v22, v19, v11
+; TAHITI-NEXT: v_add_i32_e32 v21, vcc, v21, v23
+; TAHITI-NEXT: v_mul_lo_u32 v23, v11, v21
+; TAHITI-NEXT: v_mul_hi_u32 v24, v11, v22
+; TAHITI-NEXT: v_mul_hi_u32 v25, v11, v21
+; TAHITI-NEXT: v_mul_hi_u32 v26, v18, v21
+; TAHITI-NEXT: v_mul_lo_u32 v21, v18, v21
+; TAHITI-NEXT: v_add_i32_e32 v23, vcc, v24, v23
+; TAHITI-NEXT: v_addc_u32_e32 v24, vcc, 0, v25, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v25, v18, v22
+; TAHITI-NEXT: v_mul_hi_u32 v22, v18, v22
+; TAHITI-NEXT: v_add_i32_e32 v23, vcc, v23, v25
+; TAHITI-NEXT: v_addc_u32_e32 v22, vcc, v24, v22, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v23, vcc, 0, v26, vcc
+; TAHITI-NEXT: v_add_i32_e32 v21, vcc, v22, v21
+; TAHITI-NEXT: v_addc_u32_e32 v22, vcc, 0, v23, vcc
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v11, v21
+; TAHITI-NEXT: v_addc_u32_e32 v18, vcc, v18, v22, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v21, v19, v18
+; TAHITI-NEXT: v_mul_hi_u32 v22, v19, v11
+; TAHITI-NEXT: v_mul_lo_u32 v20, v20, v11
+; TAHITI-NEXT: v_mul_lo_u32 v19, v19, v11
+; TAHITI-NEXT: v_add_i32_e32 v21, vcc, v21, v22
+; TAHITI-NEXT: v_add_i32_e32 v20, vcc, v21, v20
+; TAHITI-NEXT: v_mul_lo_u32 v23, v11, v20
+; TAHITI-NEXT: v_mul_hi_u32 v24, v11, v19
+; TAHITI-NEXT: v_mul_hi_u32 v25, v11, v20
+; TAHITI-NEXT: v_mul_hi_u32 v22, v18, v19
+; TAHITI-NEXT: v_mul_lo_u32 v19, v18, v19
+; TAHITI-NEXT: v_mul_hi_u32 v21, v18, v20
+; TAHITI-NEXT: v_add_i32_e32 v23, vcc, v24, v23
+; TAHITI-NEXT: v_addc_u32_e32 v24, vcc, 0, v25, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v20, v18, v20
+; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v23, v19
+; TAHITI-NEXT: v_addc_u32_e32 v19, vcc, v24, v22, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v21, vcc, 0, v21, vcc
+; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v19, v20
+; TAHITI-NEXT: v_addc_u32_e32 v20, vcc, 0, v21, vcc
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v11, v19
+; TAHITI-NEXT: v_addc_u32_e32 v18, vcc, v18, v20, vcc
+; TAHITI-NEXT: v_ashrrev_i32_e32 v19, 31, v15
+; TAHITI-NEXT: v_add_i32_e32 v20, vcc, v14, v19
+; TAHITI-NEXT: v_xor_b32_e32 v20, v20, v19
+; TAHITI-NEXT: v_mul_lo_u32 v21, v20, v18
+; TAHITI-NEXT: v_mul_hi_u32 v22, v20, v11
+; TAHITI-NEXT: v_mul_hi_u32 v23, v20, v18
+; TAHITI-NEXT: v_addc_u32_e32 v15, vcc, v15, v19, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v15, v15, v19
+; TAHITI-NEXT: v_add_i32_e32 v21, vcc, v22, v21
+; TAHITI-NEXT: v_addc_u32_e32 v22, vcc, 0, v23, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v23, v15, v11
+; TAHITI-NEXT: v_mul_hi_u32 v11, v15, v11
+; TAHITI-NEXT: v_mul_hi_u32 v24, v15, v18
+; TAHITI-NEXT: v_mul_lo_u32 v18, v15, v18
+; TAHITI-NEXT: v_add_i32_e32 v21, vcc, v21, v23
+; TAHITI-NEXT: v_addc_u32_e32 v11, vcc, v22, v11, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v21, vcc, 0, v24, vcc
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v11, v18
+; TAHITI-NEXT: v_addc_u32_e32 v18, vcc, 0, v21, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v18, v9, v18
+; TAHITI-NEXT: v_mul_hi_u32 v21, v9, v11
+; TAHITI-NEXT: v_mul_lo_u32 v22, v8, v11
+; TAHITI-NEXT: v_mul_lo_u32 v11, v9, v11
+; TAHITI-NEXT: v_add_i32_e32 v18, vcc, v18, v21
+; TAHITI-NEXT: v_add_i32_e32 v18, vcc, v22, v18
+; TAHITI-NEXT: v_sub_i32_e32 v21, vcc, v15, v18
+; TAHITI-NEXT: v_sub_i32_e32 v11, vcc, v20, v11
+; TAHITI-NEXT: v_subb_u32_e64 v20, s[0:1], v21, v8, vcc
+; TAHITI-NEXT: v_sub_i32_e64 v21, s[0:1], v11, v9
+; TAHITI-NEXT: v_subbrev_u32_e64 v22, s[2:3], 0, v20, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v22, v8
+; TAHITI-NEXT: v_cndmask_b32_e64 v23, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v21, v9
+; TAHITI-NEXT: v_subb_u32_e32 v15, vcc, v15, v18, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v24, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_eq_u32_e64 s[2:3], v22, v8
+; TAHITI-NEXT: v_subb_u32_e64 v20, s[0:1], v20, v8, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v15, v8
+; TAHITI-NEXT: v_cndmask_b32_e64 v23, v23, v24, s[2:3]
+; TAHITI-NEXT: v_sub_i32_e64 v24, s[0:1], v21, v9
+; TAHITI-NEXT: v_cndmask_b32_e64 v18, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v11, v9
+; TAHITI-NEXT: v_subbrev_u32_e64 v20, s[0:1], 0, v20, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e64 v9, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_eq_u32_e32 vcc, v15, v8
+; TAHITI-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v23
+; TAHITI-NEXT: v_cndmask_b32_e32 v8, v18, v9, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v21, v21, v24, s[0:1]
+; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; TAHITI-NEXT: v_cndmask_b32_e64 v20, v22, v20, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e32 v9, v11, v21, vcc
+; TAHITI-NEXT: v_cndmask_b32_e32 v8, v15, v20, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v9, v9, v19
+; TAHITI-NEXT: v_xor_b32_e32 v11, v8, v19
+; TAHITI-NEXT: v_sub_i32_e32 v8, vcc, v9, v19
+; TAHITI-NEXT: v_subb_u32_e32 v9, vcc, v11, v19, vcc
+; TAHITI-NEXT: s_cbranch_execnz .LBB12_3
+; TAHITI-NEXT: .LBB12_2:
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v8, v10
+; TAHITI-NEXT: v_sub_i32_e32 v9, vcc, 0, v10
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v8, v8
+; TAHITI-NEXT: v_mul_f32_e32 v8, 0x4f7ffffe, v8
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v8, v8
+; TAHITI-NEXT: v_mul_lo_u32 v9, v9, v8
+; TAHITI-NEXT: v_mul_hi_u32 v9, v8, v9
+; TAHITI-NEXT: v_add_i32_e32 v8, vcc, v8, v9
+; TAHITI-NEXT: v_mul_hi_u32 v8, v14, v8
+; TAHITI-NEXT: v_mul_lo_u32 v8, v8, v10
+; TAHITI-NEXT: v_sub_i32_e32 v8, vcc, v14, v8
+; TAHITI-NEXT: v_sub_i32_e32 v9, vcc, v8, v10
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v8, v10
+; TAHITI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
+; TAHITI-NEXT: v_sub_i32_e32 v9, vcc, v8, v10
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v8, v10
+; TAHITI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
+; TAHITI-NEXT: v_mov_b32_e32 v9, 0
+; TAHITI-NEXT: .LBB12_3:
+; TAHITI-NEXT: v_or_b32_e32 v11, v17, v13
+; TAHITI-NEXT: v_mov_b32_e32 v10, 0
+; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_14
+; TAHITI-NEXT: ; %bb.4:
+; TAHITI-NEXT: v_ashrrev_i32_e32 v10, 31, v13
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v12, v10
+; TAHITI-NEXT: v_addc_u32_e32 v13, vcc, v13, v10, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v11, v11, v10
+; TAHITI-NEXT: v_xor_b32_e32 v10, v13, v10
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v13, v11
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v14, v10
+; TAHITI-NEXT: v_sub_i32_e32 v15, vcc, 0, v11
+; TAHITI-NEXT: v_subb_u32_e32 v18, vcc, 0, v10, vcc
+; TAHITI-NEXT: v_madmk_f32 v13, v14, 0x4f800000, v13
+; TAHITI-NEXT: v_rcp_f32_e32 v13, v13
+; TAHITI-NEXT: v_mul_f32_e32 v13, 0x5f7ffffc, v13
+; TAHITI-NEXT: v_mul_f32_e32 v14, 0x2f800000, v13
+; TAHITI-NEXT: v_trunc_f32_e32 v14, v14
+; TAHITI-NEXT: v_madmk_f32 v13, v14, 0xcf800000, v13
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v14, v14
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v13, v13
+; TAHITI-NEXT: v_mul_lo_u32 v20, v15, v14
+; TAHITI-NEXT: v_mul_hi_u32 v19, v15, v13
+; TAHITI-NEXT: v_mul_lo_u32 v21, v18, v13
+; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v20, v19
+; TAHITI-NEXT: v_mul_lo_u32 v20, v15, v13
+; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v19, v21
+; TAHITI-NEXT: v_mul_lo_u32 v21, v13, v19
+; TAHITI-NEXT: v_mul_hi_u32 v22, v13, v20
+; TAHITI-NEXT: v_mul_hi_u32 v23, v13, v19
+; TAHITI-NEXT: v_mul_hi_u32 v24, v14, v19
+; TAHITI-NEXT: v_mul_lo_u32 v19, v14, v19
+; TAHITI-NEXT: v_add_i32_e32 v21, vcc, v22, v21
+; TAHITI-NEXT: v_addc_u32_e32 v22, vcc, 0, v23, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v23, v14, v20
+; TAHITI-NEXT: v_mul_hi_u32 v20, v14, v20
+; TAHITI-NEXT: v_add_i32_e32 v21, vcc, v21, v23
+; TAHITI-NEXT: v_addc_u32_e32 v20, vcc, v22, v20, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v21, vcc, 0, v24, vcc
+; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v20, v19
+; TAHITI-NEXT: v_addc_u32_e32 v20, vcc, 0, v21, vcc
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v13, v19
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, v14, v20, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v19, v15, v14
+; TAHITI-NEXT: v_mul_hi_u32 v20, v15, v13
+; TAHITI-NEXT: v_mul_lo_u32 v18, v18, v13
+; TAHITI-NEXT: v_mul_lo_u32 v15, v15, v13
+; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v19, v20
+; TAHITI-NEXT: v_add_i32_e32 v18, vcc, v19, v18
+; TAHITI-NEXT: v_mul_lo_u32 v21, v13, v18
+; TAHITI-NEXT: v_mul_hi_u32 v22, v13, v15
+; TAHITI-NEXT: v_mul_hi_u32 v23, v13, v18
+; TAHITI-NEXT: v_mul_hi_u32 v20, v14, v15
+; TAHITI-NEXT: v_mul_lo_u32 v15, v14, v15
+; TAHITI-NEXT: v_mul_hi_u32 v19, v14, v18
+; TAHITI-NEXT: v_add_i32_e32 v21, vcc, v22, v21
+; TAHITI-NEXT: v_addc_u32_e32 v22, vcc, 0, v23, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v18, v14, v18
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v21, v15
+; TAHITI-NEXT: v_addc_u32_e32 v15, vcc, v22, v20, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v15, v18
+; TAHITI-NEXT: v_addc_u32_e32 v18, vcc, 0, v19, vcc
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v13, v15
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, v14, v18, vcc
+; TAHITI-NEXT: v_ashrrev_i32_e32 v15, 31, v17
+; TAHITI-NEXT: v_add_i32_e32 v18, vcc, v16, v15
+; TAHITI-NEXT: v_xor_b32_e32 v18, v18, v15
+; TAHITI-NEXT: v_mul_lo_u32 v19, v18, v14
+; TAHITI-NEXT: v_mul_hi_u32 v20, v18, v13
+; TAHITI-NEXT: v_mul_hi_u32 v21, v18, v14
+; TAHITI-NEXT: v_addc_u32_e32 v17, vcc, v17, v15, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v17, v17, v15
+; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v20, v19
+; TAHITI-NEXT: v_addc_u32_e32 v20, vcc, 0, v21, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v21, v17, v13
+; TAHITI-NEXT: v_mul_hi_u32 v13, v17, v13
+; TAHITI-NEXT: v_mul_hi_u32 v22, v17, v14
+; TAHITI-NEXT: v_mul_lo_u32 v14, v17, v14
+; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v19, v21
+; TAHITI-NEXT: v_addc_u32_e32 v13, vcc, v20, v13, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v19, vcc, 0, v22, vcc
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v13, v14
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, 0, v19, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v14, v11, v14
+; TAHITI-NEXT: v_mul_hi_u32 v19, v11, v13
+; TAHITI-NEXT: v_mul_lo_u32 v20, v10, v13
+; TAHITI-NEXT: v_mul_lo_u32 v13, v11, v13
+; TAHITI-NEXT: v_add_i32_e32 v14, vcc, v14, v19
+; TAHITI-NEXT: v_add_i32_e32 v14, vcc, v20, v14
+; TAHITI-NEXT: v_sub_i32_e32 v19, vcc, v17, v14
+; TAHITI-NEXT: v_sub_i32_e32 v13, vcc, v18, v13
+; TAHITI-NEXT: v_subb_u32_e64 v18, s[0:1], v19, v10, vcc
+; TAHITI-NEXT: v_sub_i32_e64 v19, s[0:1], v13, v11
+; TAHITI-NEXT: v_subbrev_u32_e64 v20, s[2:3], 0, v18, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v20, v10
+; TAHITI-NEXT: v_cndmask_b32_e64 v21, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v19, v11
+; TAHITI-NEXT: v_subb_u32_e32 v14, vcc, v17, v14, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v22, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_eq_u32_e64 s[2:3], v20, v10
+; TAHITI-NEXT: v_subb_u32_e64 v18, s[0:1], v18, v10, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v14, v10
+; TAHITI-NEXT: v_cndmask_b32_e64 v21, v21, v22, s[2:3]
+; TAHITI-NEXT: v_sub_i32_e64 v22, s[0:1], v19, v11
+; TAHITI-NEXT: v_cndmask_b32_e64 v17, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v13, v11
+; TAHITI-NEXT: v_subbrev_u32_e64 v18, s[0:1], 0, v18, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e64 v11, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_eq_u32_e32 vcc, v14, v10
+; TAHITI-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v21
+; TAHITI-NEXT: v_cndmask_b32_e32 v10, v17, v11, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v19, v19, v22, s[0:1]
+; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; TAHITI-NEXT: v_cndmask_b32_e64 v18, v20, v18, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e32 v11, v13, v19, vcc
+; TAHITI-NEXT: v_cndmask_b32_e32 v10, v14, v18, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v11, v11, v15
+; TAHITI-NEXT: v_xor_b32_e32 v13, v10, v15
+; TAHITI-NEXT: v_sub_i32_e32 v10, vcc, v11, v15
+; TAHITI-NEXT: v_subb_u32_e32 v11, vcc, v13, v15, vcc
+; TAHITI-NEXT: s_cbranch_execnz .LBB12_6
+; TAHITI-NEXT: .LBB12_5:
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v10, v12
+; TAHITI-NEXT: v_sub_i32_e32 v11, vcc, 0, v12
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v10, v10
+; TAHITI-NEXT: v_mul_f32_e32 v10, 0x4f7ffffe, v10
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v10, v10
+; TAHITI-NEXT: v_mul_lo_u32 v11, v11, v10
+; TAHITI-NEXT: v_mul_hi_u32 v11, v10, v11
+; TAHITI-NEXT: v_add_i32_e32 v10, vcc, v10, v11
+; TAHITI-NEXT: v_mul_hi_u32 v10, v16, v10
+; TAHITI-NEXT: v_mul_lo_u32 v10, v10, v12
+; TAHITI-NEXT: v_sub_i32_e32 v10, vcc, v16, v10
+; TAHITI-NEXT: v_subrev_i32_e32 v11, vcc, v12, v10
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12
+; TAHITI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
+; TAHITI-NEXT: v_subrev_i32_e32 v11, vcc, v12, v10
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12
+; TAHITI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
+; TAHITI-NEXT: v_mov_b32_e32 v11, 0
+; TAHITI-NEXT: .LBB12_6:
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_or_b32_e32 v13, v5, v1
+; TAHITI-NEXT: v_mov_b32_e32 v12, 0
+; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_15
+; TAHITI-NEXT: ; %bb.7:
+; TAHITI-NEXT: v_ashrrev_i32_e32 v13, 31, v1
+; TAHITI-NEXT: v_add_i32_e32 v12, vcc, v0, v13
+; TAHITI-NEXT: v_addc_u32_e32 v1, vcc, v1, v13, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v12, v12, v13
+; TAHITI-NEXT: v_xor_b32_e32 v1, v1, v13
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v13, v12
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v14, v1
+; TAHITI-NEXT: v_sub_i32_e32 v15, vcc, 0, v12
+; TAHITI-NEXT: v_subb_u32_e32 v16, vcc, 0, v1, vcc
+; TAHITI-NEXT: v_madmk_f32 v13, v14, 0x4f800000, v13
+; TAHITI-NEXT: v_rcp_f32_e32 v13, v13
+; TAHITI-NEXT: v_mul_f32_e32 v13, 0x5f7ffffc, v13
+; TAHITI-NEXT: v_mul_f32_e32 v14, 0x2f800000, v13
+; TAHITI-NEXT: v_trunc_f32_e32 v14, v14
+; TAHITI-NEXT: v_madmk_f32 v13, v14, 0xcf800000, v13
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v14, v14
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v13, v13
+; TAHITI-NEXT: v_mul_lo_u32 v18, v15, v14
+; TAHITI-NEXT: v_mul_hi_u32 v17, v15, v13
+; TAHITI-NEXT: v_mul_lo_u32 v19, v16, v13
+; TAHITI-NEXT: v_add_i32_e32 v17, vcc, v18, v17
+; TAHITI-NEXT: v_mul_lo_u32 v18, v15, v13
+; TAHITI-NEXT: v_add_i32_e32 v17, vcc, v17, v19
+; TAHITI-NEXT: v_mul_lo_u32 v19, v13, v17
+; TAHITI-NEXT: v_mul_hi_u32 v20, v13, v18
+; TAHITI-NEXT: v_mul_hi_u32 v21, v13, v17
+; TAHITI-NEXT: v_mul_hi_u32 v22, v14, v17
+; TAHITI-NEXT: v_mul_lo_u32 v17, v14, v17
+; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v20, v19
+; TAHITI-NEXT: v_addc_u32_e32 v20, vcc, 0, v21, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v21, v14, v18
+; TAHITI-NEXT: v_mul_hi_u32 v18, v14, v18
+; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v19, v21
+; TAHITI-NEXT: v_addc_u32_e32 v18, vcc, v20, v18, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v19, vcc, 0, v22, vcc
+; TAHITI-NEXT: v_add_i32_e32 v17, vcc, v18, v17
+; TAHITI-NEXT: v_addc_u32_e32 v18, vcc, 0, v19, vcc
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v13, v17
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, v14, v18, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v17, v15, v14
+; TAHITI-NEXT: v_mul_hi_u32 v18, v15, v13
+; TAHITI-NEXT: v_mul_lo_u32 v16, v16, v13
+; TAHITI-NEXT: v_mul_lo_u32 v15, v15, v13
+; TAHITI-NEXT: v_add_i32_e32 v17, vcc, v17, v18
+; TAHITI-NEXT: v_add_i32_e32 v16, vcc, v17, v16
+; TAHITI-NEXT: v_mul_lo_u32 v19, v13, v16
+; TAHITI-NEXT: v_mul_hi_u32 v20, v13, v15
+; TAHITI-NEXT: v_mul_hi_u32 v21, v13, v16
+; TAHITI-NEXT: v_mul_hi_u32 v18, v14, v15
+; TAHITI-NEXT: v_mul_lo_u32 v15, v14, v15
+; TAHITI-NEXT: v_mul_hi_u32 v17, v14, v16
+; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v20, v19
+; TAHITI-NEXT: v_addc_u32_e32 v20, vcc, 0, v21, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v16, v14, v16
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v19, v15
+; TAHITI-NEXT: v_addc_u32_e32 v15, vcc, v20, v18, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v15, v16
+; TAHITI-NEXT: v_addc_u32_e32 v16, vcc, 0, v17, vcc
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v13, v15
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, v14, v16, vcc
+; TAHITI-NEXT: v_ashrrev_i32_e32 v15, 31, v5
+; TAHITI-NEXT: v_add_i32_e32 v16, vcc, v4, v15
+; TAHITI-NEXT: v_xor_b32_e32 v16, v16, v15
+; TAHITI-NEXT: v_mul_lo_u32 v17, v16, v14
+; TAHITI-NEXT: v_mul_hi_u32 v18, v16, v13
+; TAHITI-NEXT: v_mul_hi_u32 v19, v16, v14
+; TAHITI-NEXT: v_addc_u32_e32 v5, vcc, v5, v15, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v5, v5, v15
+; TAHITI-NEXT: v_add_i32_e32 v17, vcc, v18, v17
+; TAHITI-NEXT: v_addc_u32_e32 v18, vcc, 0, v19, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v19, v5, v13
+; TAHITI-NEXT: v_mul_hi_u32 v13, v5, v13
+; TAHITI-NEXT: v_mul_hi_u32 v20, v5, v14
+; TAHITI-NEXT: v_mul_lo_u32 v14, v5, v14
+; TAHITI-NEXT: v_add_i32_e32 v17, vcc, v17, v19
+; TAHITI-NEXT: v_addc_u32_e32 v13, vcc, v18, v13, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v17, vcc, 0, v20, vcc
+; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v13, v14
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, 0, v17, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v14, v12, v14
+; TAHITI-NEXT: v_mul_hi_u32 v17, v12, v13
+; TAHITI-NEXT: v_mul_lo_u32 v18, v1, v13
+; TAHITI-NEXT: v_mul_lo_u32 v13, v12, v13
+; TAHITI-NEXT: v_add_i32_e32 v14, vcc, v14, v17
+; TAHITI-NEXT: v_add_i32_e32 v14, vcc, v18, v14
+; TAHITI-NEXT: v_sub_i32_e32 v17, vcc, v5, v14
+; TAHITI-NEXT: v_sub_i32_e32 v13, vcc, v16, v13
+; TAHITI-NEXT: v_subb_u32_e64 v16, s[0:1], v17, v1, vcc
+; TAHITI-NEXT: v_sub_i32_e64 v17, s[0:1], v13, v12
+; TAHITI-NEXT: v_subbrev_u32_e64 v18, s[2:3], 0, v16, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v18, v1
+; TAHITI-NEXT: v_cndmask_b32_e64 v19, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v17, v12
+; TAHITI-NEXT: v_subb_u32_e32 v5, vcc, v5, v14, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v20, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_eq_u32_e64 s[2:3], v18, v1
+; TAHITI-NEXT: v_subb_u32_e64 v16, s[0:1], v16, v1, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v5, v1
+; TAHITI-NEXT: v_cndmask_b32_e64 v19, v19, v20, s[2:3]
+; TAHITI-NEXT: v_sub_i32_e64 v20, s[0:1], v17, v12
+; TAHITI-NEXT: v_cndmask_b32_e64 v14, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v13, v12
+; TAHITI-NEXT: v_subbrev_u32_e64 v16, s[0:1], 0, v16, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e64 v12, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
+; TAHITI-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v19
+; TAHITI-NEXT: v_cndmask_b32_e32 v1, v14, v12, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v17, v17, v20, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e64 v16, v18, v16, s[0:1]
+; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
+; TAHITI-NEXT: v_cndmask_b32_e32 v1, v5, v16, vcc
+; TAHITI-NEXT: v_cndmask_b32_e32 v5, v13, v17, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v5, v5, v15
+; TAHITI-NEXT: v_xor_b32_e32 v1, v1, v15
+; TAHITI-NEXT: v_sub_i32_e32 v12, vcc, v5, v15
+; TAHITI-NEXT: v_subb_u32_e32 v13, vcc, v1, v15, vcc
+; TAHITI-NEXT: s_cbranch_execnz .LBB12_9
+; TAHITI-NEXT: .LBB12_8:
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v1, v0
+; TAHITI-NEXT: v_sub_i32_e32 v5, vcc, 0, v0
+; TAHITI-NEXT: v_mov_b32_e32 v13, 0
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; TAHITI-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v1, v1
+; TAHITI-NEXT: v_mul_lo_u32 v5, v5, v1
+; TAHITI-NEXT: v_mul_hi_u32 v5, v1, v5
+; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v1, v5
+; TAHITI-NEXT: v_mul_hi_u32 v1, v4, v1
+; TAHITI-NEXT: v_mul_lo_u32 v1, v1, v0
+; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, v4, v1
+; TAHITI-NEXT: v_subrev_i32_e32 v4, vcc, v0, v1
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
+; TAHITI-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; TAHITI-NEXT: v_subrev_i32_e32 v4, vcc, v0, v1
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
+; TAHITI-NEXT: v_cndmask_b32_e32 v12, v1, v4, vcc
+; TAHITI-NEXT: .LBB12_9:
+; TAHITI-NEXT: v_or_b32_e32 v1, v7, v3
+; TAHITI-NEXT: v_mov_b32_e32 v0, 0
+; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_16
+; TAHITI-NEXT: ; %bb.10:
+; TAHITI-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v2, v0
+; TAHITI-NEXT: v_addc_u32_e32 v3, vcc, v3, v0, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v1, v1, v0
+; TAHITI-NEXT: v_xor_b32_e32 v0, v3, v0
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v3, v1
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v4, v0
+; TAHITI-NEXT: v_sub_i32_e32 v5, vcc, 0, v1
+; TAHITI-NEXT: v_subb_u32_e32 v14, vcc, 0, v0, vcc
+; TAHITI-NEXT: v_madmk_f32 v3, v4, 0x4f800000, v3
+; TAHITI-NEXT: v_rcp_f32_e32 v3, v3
+; TAHITI-NEXT: v_mul_f32_e32 v3, 0x5f7ffffc, v3
+; TAHITI-NEXT: v_mul_f32_e32 v4, 0x2f800000, v3
+; TAHITI-NEXT: v_trunc_f32_e32 v4, v4
+; TAHITI-NEXT: v_madmk_f32 v3, v4, 0xcf800000, v3
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v4, v4
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v3, v3
+; TAHITI-NEXT: v_mul_lo_u32 v16, v5, v4
+; TAHITI-NEXT: v_mul_hi_u32 v15, v5, v3
+; TAHITI-NEXT: v_mul_lo_u32 v17, v14, v3
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v16, v15
+; TAHITI-NEXT: v_mul_lo_u32 v16, v5, v3
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v15, v17
+; TAHITI-NEXT: v_mul_lo_u32 v17, v3, v15
+; TAHITI-NEXT: v_mul_hi_u32 v18, v3, v16
+; TAHITI-NEXT: v_mul_hi_u32 v19, v3, v15
+; TAHITI-NEXT: v_mul_hi_u32 v20, v4, v15
+; TAHITI-NEXT: v_mul_lo_u32 v15, v4, v15
+; TAHITI-NEXT: v_add_i32_e32 v17, vcc, v18, v17
+; TAHITI-NEXT: v_addc_u32_e32 v18, vcc, 0, v19, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v19, v4, v16
+; TAHITI-NEXT: v_mul_hi_u32 v16, v4, v16
+; TAHITI-NEXT: v_add_i32_e32 v17, vcc, v17, v19
+; TAHITI-NEXT: v_addc_u32_e32 v16, vcc, v18, v16, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v17, vcc, 0, v20, vcc
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v16, v15
+; TAHITI-NEXT: v_addc_u32_e32 v16, vcc, 0, v17, vcc
+; TAHITI-NEXT: v_add_i32_e32 v3, vcc, v3, v15
+; TAHITI-NEXT: v_addc_u32_e32 v4, vcc, v4, v16, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v15, v5, v4
+; TAHITI-NEXT: v_mul_hi_u32 v16, v5, v3
+; TAHITI-NEXT: v_mul_lo_u32 v14, v14, v3
+; TAHITI-NEXT: v_mul_lo_u32 v5, v5, v3
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v15, v16
+; TAHITI-NEXT: v_add_i32_e32 v14, vcc, v15, v14
+; TAHITI-NEXT: v_mul_lo_u32 v17, v3, v14
+; TAHITI-NEXT: v_mul_hi_u32 v18, v3, v5
+; TAHITI-NEXT: v_mul_hi_u32 v19, v3, v14
+; TAHITI-NEXT: v_mul_hi_u32 v16, v4, v5
+; TAHITI-NEXT: v_mul_lo_u32 v5, v4, v5
+; TAHITI-NEXT: v_mul_hi_u32 v15, v4, v14
+; TAHITI-NEXT: v_add_i32_e32 v17, vcc, v18, v17
+; TAHITI-NEXT: v_addc_u32_e32 v18, vcc, 0, v19, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v14, v4, v14
+; TAHITI-NEXT: v_add_i32_e32 v5, vcc, v17, v5
+; TAHITI-NEXT: v_addc_u32_e32 v5, vcc, v18, v16, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; TAHITI-NEXT: v_add_i32_e32 v5, vcc, v5, v14
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, 0, v15, vcc
+; TAHITI-NEXT: v_add_i32_e32 v3, vcc, v3, v5
+; TAHITI-NEXT: v_addc_u32_e32 v4, vcc, v4, v14, vcc
+; TAHITI-NEXT: v_ashrrev_i32_e32 v5, 31, v7
+; TAHITI-NEXT: v_add_i32_e32 v14, vcc, v6, v5
+; TAHITI-NEXT: v_xor_b32_e32 v14, v14, v5
+; TAHITI-NEXT: v_mul_lo_u32 v15, v14, v4
+; TAHITI-NEXT: v_mul_hi_u32 v16, v14, v3
+; TAHITI-NEXT: v_mul_hi_u32 v17, v14, v4
+; TAHITI-NEXT: v_addc_u32_e32 v7, vcc, v7, v5, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v7, v7, v5
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v16, v15
+; TAHITI-NEXT: v_addc_u32_e32 v16, vcc, 0, v17, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v17, v7, v3
+; TAHITI-NEXT: v_mul_hi_u32 v3, v7, v3
+; TAHITI-NEXT: v_mul_hi_u32 v18, v7, v4
+; TAHITI-NEXT: v_mul_lo_u32 v4, v7, v4
+; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v15, v17
+; TAHITI-NEXT: v_addc_u32_e32 v3, vcc, v16, v3, vcc
+; TAHITI-NEXT: v_addc_u32_e32 v15, vcc, 0, v18, vcc
+; TAHITI-NEXT: v_add_i32_e32 v3, vcc, v3, v4
+; TAHITI-NEXT: v_addc_u32_e32 v4, vcc, 0, v15, vcc
+; TAHITI-NEXT: v_mul_lo_u32 v4, v1, v4
+; TAHITI-NEXT: v_mul_hi_u32 v15, v1, v3
+; TAHITI-NEXT: v_mul_lo_u32 v16, v0, v3
+; TAHITI-NEXT: v_mul_lo_u32 v3, v1, v3
+; TAHITI-NEXT: v_add_i32_e32 v4, vcc, v4, v15
+; TAHITI-NEXT: v_add_i32_e32 v4, vcc, v16, v4
+; TAHITI-NEXT: v_sub_i32_e32 v15, vcc, v7, v4
+; TAHITI-NEXT: v_sub_i32_e32 v3, vcc, v14, v3
+; TAHITI-NEXT: v_subb_u32_e64 v14, s[0:1], v15, v0, vcc
+; TAHITI-NEXT: v_sub_i32_e64 v15, s[0:1], v3, v1
+; TAHITI-NEXT: v_subbrev_u32_e64 v16, s[2:3], 0, v14, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v16, v0
+; TAHITI-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_ge_u32_e64 s[2:3], v15, v1
+; TAHITI-NEXT: v_subb_u32_e32 v4, vcc, v7, v4, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v18, 0, -1, s[2:3]
+; TAHITI-NEXT: v_cmp_eq_u32_e64 s[2:3], v16, v0
+; TAHITI-NEXT: v_subb_u32_e64 v14, s[0:1], v14, v0, s[0:1]
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v4, v0
+; TAHITI-NEXT: v_cndmask_b32_e64 v17, v17, v18, s[2:3]
+; TAHITI-NEXT: v_sub_i32_e64 v18, s[0:1], v15, v1
+; TAHITI-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v3, v1
+; TAHITI-NEXT: v_subbrev_u32_e64 v14, s[0:1], 0, v14, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc
+; TAHITI-NEXT: v_cmp_eq_u32_e32 vcc, v4, v0
+; TAHITI-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v17
+; TAHITI-NEXT: v_cndmask_b32_e32 v0, v7, v1, vcc
+; TAHITI-NEXT: v_cndmask_b32_e64 v15, v15, v18, s[0:1]
+; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; TAHITI-NEXT: v_cndmask_b32_e64 v14, v16, v14, s[0:1]
+; TAHITI-NEXT: v_cndmask_b32_e32 v1, v3, v15, vcc
+; TAHITI-NEXT: v_cndmask_b32_e32 v0, v4, v14, vcc
+; TAHITI-NEXT: v_xor_b32_e32 v1, v1, v5
+; TAHITI-NEXT: v_xor_b32_e32 v0, v0, v5
+; TAHITI-NEXT: v_sub_i32_e32 v14, vcc, v1, v5
+; TAHITI-NEXT: v_subb_u32_e32 v15, vcc, v0, v5, vcc
+; TAHITI-NEXT: s_cbranch_execnz .LBB12_12
+; TAHITI-NEXT: .LBB12_11:
+; TAHITI-NEXT: v_cvt_f32_u32_e32 v0, v2
+; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, 0, v2
+; TAHITI-NEXT: v_mov_b32_e32 v15, 0
+; TAHITI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TAHITI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TAHITI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TAHITI-NEXT: v_mul_lo_u32 v1, v1, v0
+; TAHITI-NEXT: v_mul_hi_u32 v1, v0, v1
+; TAHITI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; TAHITI-NEXT: v_mul_hi_u32 v0, v6, v0
+; TAHITI-NEXT: v_mul_lo_u32 v0, v0, v2
+; TAHITI-NEXT: v_sub_i32_e32 v0, vcc, v6, v0
+; TAHITI-NEXT: v_subrev_i32_e32 v1, vcc, v2, v0
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TAHITI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; TAHITI-NEXT: v_subrev_i32_e32 v1, vcc, v2, v0
+; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TAHITI-NEXT: v_cndmask_b32_e32 v14, v0, v1, vcc
+; TAHITI-NEXT: .LBB12_12:
+; TAHITI-NEXT: s_mov_b32 s7, 0xf000
+; TAHITI-NEXT: s_mov_b32 s6, -1
+; TAHITI-NEXT: buffer_store_dwordx4 v[12:15], off, s[4:7], 0 offset:16
+; TAHITI-NEXT: buffer_store_dwordx4 v[8:11], off, s[4:7], 0
+; TAHITI-NEXT: s_endpgm
+; TAHITI-NEXT: .LBB12_13:
+; TAHITI-NEXT: ; implicit-def: $vgpr8_vgpr9
+; TAHITI-NEXT: s_branch .LBB12_2
+; TAHITI-NEXT: .LBB12_14:
+; TAHITI-NEXT: s_branch .LBB12_5
+; TAHITI-NEXT: .LBB12_15:
+; TAHITI-NEXT: ; implicit-def: $vgpr12_vgpr13
+; TAHITI-NEXT: s_branch .LBB12_8
+; TAHITI-NEXT: .LBB12_16:
+; TAHITI-NEXT: s_branch .LBB12_11
+;
+; TONGA-LABEL: srem_v4i64:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; TONGA-NEXT: v_mov_b32_e32 v8, 0
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: s_add_u32 s0, s6, 48
+; TONGA-NEXT: s_addc_u32 s1, s7, 0
+; TONGA-NEXT: s_add_u32 s2, s6, 32
+; TONGA-NEXT: v_mov_b32_e32 v0, s6
+; TONGA-NEXT: s_addc_u32 s3, s7, 0
+; TONGA-NEXT: v_mov_b32_e32 v2, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s7
+; TONGA-NEXT: v_mov_b32_e32 v3, s3
+; TONGA-NEXT: flat_load_dwordx4 v[10:13], v[2:3]
+; TONGA-NEXT: flat_load_dwordx4 v[14:17], v[0:1]
+; TONGA-NEXT: v_mov_b32_e32 v0, s0
+; TONGA-NEXT: v_mov_b32_e32 v1, s1
+; TONGA-NEXT: s_add_u32 s0, s6, 16
+; TONGA-NEXT: s_addc_u32 s1, s7, 0
+; TONGA-NEXT: v_mov_b32_e32 v5, s1
+; TONGA-NEXT: v_mov_b32_e32 v4, s0
+; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; TONGA-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; TONGA-NEXT: s_waitcnt vmcnt(2)
+; TONGA-NEXT: v_or_b32_e32 v9, v15, v11
+; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; TONGA-NEXT: s_cbranch_vccz .LBB12_13
+; TONGA-NEXT: ; %bb.1:
+; TONGA-NEXT: v_ashrrev_i32_e32 v8, 31, v11
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v10, v8
+; TONGA-NEXT: v_addc_u32_e32 v11, vcc, v11, v8, vcc
+; TONGA-NEXT: v_xor_b32_e32 v22, v9, v8
+; TONGA-NEXT: v_xor_b32_e32 v11, v11, v8
+; TONGA-NEXT: v_cvt_f32_u32_e32 v8, v22
+; TONGA-NEXT: v_cvt_f32_u32_e32 v9, v11
+; TONGA-NEXT: v_sub_u32_e32 v23, vcc, 0, v22
+; TONGA-NEXT: v_subb_u32_e32 v24, vcc, 0, v11, vcc
+; TONGA-NEXT: v_madmk_f32 v8, v9, 0x4f800000, v8
+; TONGA-NEXT: v_rcp_f32_e32 v8, v8
+; TONGA-NEXT: v_mul_f32_e32 v8, 0x5f7ffffc, v8
+; TONGA-NEXT: v_mul_f32_e32 v9, 0x2f800000, v8
+; TONGA-NEXT: v_trunc_f32_e32 v9, v9
+; TONGA-NEXT: v_madmk_f32 v8, v9, 0xcf800000, v8
+; TONGA-NEXT: v_cvt_u32_f32_e32 v20, v9
+; TONGA-NEXT: v_cvt_u32_f32_e32 v21, v8
+; TONGA-NEXT: v_mul_lo_u32 v18, v23, v20
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v23, v21, 0
+; TONGA-NEXT: v_mul_lo_u32 v19, v24, v21
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v9, v18
+; TONGA-NEXT: v_add_u32_e32 v25, vcc, v9, v19
+; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v21, v25, 0
+; TONGA-NEXT: v_mul_hi_u32 v9, v21, v8
+; TONGA-NEXT: v_add_u32_e32 v26, vcc, v9, v18
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v20, v8, 0
+; TONGA-NEXT: v_addc_u32_e32 v27, vcc, 0, v19, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v20, v25, 0
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, v26, v8
+; TONGA-NEXT: v_addc_u32_e32 v8, vcc, v27, v9, vcc
+; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v19, vcc
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, v8, v18
+; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; TONGA-NEXT: v_add_u32_e32 v25, vcc, v21, v8
+; TONGA-NEXT: v_addc_u32_e32 v26, vcc, v20, v9, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v23, v25, 0
+; TONGA-NEXT: v_mul_lo_u32 v20, v23, v26
+; TONGA-NEXT: v_mul_lo_u32 v21, v24, v25
+; TONGA-NEXT: v_mul_hi_u32 v23, v25, v8
+; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v26, v8, 0
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v20, v9
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v9, v21
+; TONGA-NEXT: v_mad_u64_u32 v[20:21], s[0:1], v25, v9, 0
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v26, v9, 0
+; TONGA-NEXT: v_add_u32_e32 v20, vcc, v23, v20
+; TONGA-NEXT: v_addc_u32_e32 v21, vcc, 0, v21, vcc
+; TONGA-NEXT: v_add_u32_e32 v18, vcc, v20, v18
+; TONGA-NEXT: v_addc_u32_e32 v18, vcc, v21, v19, vcc
+; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, v18, v8
+; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; TONGA-NEXT: v_add_u32_e32 v18, vcc, v25, v8
+; TONGA-NEXT: v_addc_u32_e32 v19, vcc, v26, v9, vcc
+; TONGA-NEXT: v_ashrrev_i32_e32 v20, 31, v15
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, v14, v20
+; TONGA-NEXT: v_xor_b32_e32 v21, v8, v20
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v21, v19, 0
+; TONGA-NEXT: v_mul_hi_u32 v23, v21, v18
+; TONGA-NEXT: v_addc_u32_e32 v15, vcc, v15, v20, vcc
+; TONGA-NEXT: v_xor_b32_e32 v15, v15, v20
+; TONGA-NEXT: v_add_u32_e32 v23, vcc, v23, v8
+; TONGA-NEXT: v_addc_u32_e32 v24, vcc, 0, v9, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v15, v18, 0
+; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v15, v19, 0
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, v23, v8
+; TONGA-NEXT: v_addc_u32_e32 v8, vcc, v24, v9, vcc
+; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v19, vcc
+; TONGA-NEXT: v_add_u32_e32 v18, vcc, v8, v18
+; TONGA-NEXT: v_addc_u32_e32 v8, vcc, 0, v9, vcc
+; TONGA-NEXT: v_mul_lo_u32 v19, v22, v8
+; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v22, v18, 0
+; TONGA-NEXT: v_mul_lo_u32 v18, v11, v18
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v19, v9
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, v18, v9
+; TONGA-NEXT: v_sub_u32_e32 v18, vcc, v15, v9
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v21, v8
+; TONGA-NEXT: v_subb_u32_e64 v18, s[0:1], v18, v11, vcc
+; TONGA-NEXT: v_sub_u32_e64 v19, s[0:1], v8, v22
+; TONGA-NEXT: v_subbrev_u32_e64 v21, s[2:3], 0, v18, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v21, v11
+; TONGA-NEXT: v_cndmask_b32_e64 v23, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v19, v22
+; TONGA-NEXT: v_cndmask_b32_e64 v24, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v21, v11
+; TONGA-NEXT: v_subb_u32_e64 v18, s[0:1], v18, v11, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e64 v23, v23, v24, s[2:3]
+; TONGA-NEXT: v_sub_u32_e64 v24, s[0:1], v19, v22
+; TONGA-NEXT: v_subb_u32_e32 v9, vcc, v15, v9, vcc
+; TONGA-NEXT: v_subbrev_u32_e64 v18, s[0:1], 0, v18, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v9, v11
+; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v23
+; TONGA-NEXT: v_cndmask_b32_e64 v15, 0, -1, vcc
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v8, v22
+; TONGA-NEXT: v_cndmask_b32_e64 v18, v21, v18, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e64 v21, 0, -1, vcc
+; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v9, v11
+; TONGA-NEXT: v_cndmask_b32_e32 v11, v15, v21, vcc
+; TONGA-NEXT: v_cndmask_b32_e64 v19, v19, v24, s[0:1]
+; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; TONGA-NEXT: v_cndmask_b32_e32 v8, v8, v19, vcc
+; TONGA-NEXT: v_cndmask_b32_e32 v9, v9, v18, vcc
+; TONGA-NEXT: v_xor_b32_e32 v8, v8, v20
+; TONGA-NEXT: v_xor_b32_e32 v9, v9, v20
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v8, v20
+; TONGA-NEXT: v_subb_u32_e32 v9, vcc, v9, v20, vcc
+; TONGA-NEXT: s_cbranch_execnz .LBB12_3
+; TONGA-NEXT: .LBB12_2:
+; TONGA-NEXT: v_cvt_f32_u32_e32 v8, v10
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v10
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v8, v8
+; TONGA-NEXT: v_mul_f32_e32 v8, 0x4f7ffffe, v8
+; TONGA-NEXT: v_cvt_u32_f32_e32 v8, v8
+; TONGA-NEXT: v_mul_lo_u32 v9, v9, v8
+; TONGA-NEXT: v_mul_hi_u32 v9, v8, v9
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, v8, v9
+; TONGA-NEXT: v_mul_hi_u32 v8, v14, v8
+; TONGA-NEXT: v_mul_lo_u32 v8, v8, v10
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v14, v8
+; TONGA-NEXT: v_subrev_u32_e32 v9, vcc, v10, v8
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v8, v10
+; TONGA-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, v8, v10
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v8, v10
+; TONGA-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
+; TONGA-NEXT: v_mov_b32_e32 v9, 0
+; TONGA-NEXT: .LBB12_3:
+; TONGA-NEXT: v_or_b32_e32 v11, v17, v13
+; TONGA-NEXT: v_mov_b32_e32 v10, 0
+; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; TONGA-NEXT: s_cbranch_vccz .LBB12_14
+; TONGA-NEXT: ; %bb.4:
+; TONGA-NEXT: v_ashrrev_i32_e32 v10, 31, v13
+; TONGA-NEXT: v_add_u32_e32 v11, vcc, v12, v10
+; TONGA-NEXT: v_addc_u32_e32 v13, vcc, v13, v10, vcc
+; TONGA-NEXT: v_xor_b32_e32 v15, v11, v10
+; TONGA-NEXT: v_xor_b32_e32 v20, v13, v10
+; TONGA-NEXT: v_cvt_f32_u32_e32 v10, v15
+; TONGA-NEXT: v_cvt_f32_u32_e32 v11, v20
+; TONGA-NEXT: v_sub_u32_e32 v21, vcc, 0, v15
+; TONGA-NEXT: v_subb_u32_e32 v22, vcc, 0, v20, vcc
+; TONGA-NEXT: v_madmk_f32 v10, v11, 0x4f800000, v10
+; TONGA-NEXT: v_rcp_f32_e32 v10, v10
+; TONGA-NEXT: v_mul_f32_e32 v10, 0x5f7ffffc, v10
+; TONGA-NEXT: v_mul_f32_e32 v11, 0x2f800000, v10
+; TONGA-NEXT: v_trunc_f32_e32 v11, v11
+; TONGA-NEXT: v_madmk_f32 v10, v11, 0xcf800000, v10
+; TONGA-NEXT: v_cvt_u32_f32_e32 v18, v11
+; TONGA-NEXT: v_cvt_u32_f32_e32 v19, v10
+; TONGA-NEXT: v_mul_lo_u32 v13, v21, v18
+; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v21, v19, 0
+; TONGA-NEXT: v_mul_lo_u32 v14, v22, v19
+; TONGA-NEXT: v_add_u32_e32 v11, vcc, v11, v13
+; TONGA-NEXT: v_add_u32_e32 v23, vcc, v11, v14
+; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v19, v23, 0
+; TONGA-NEXT: v_mul_hi_u32 v11, v19, v10
+; TONGA-NEXT: v_add_u32_e32 v24, vcc, v11, v13
+; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v18, v10, 0
+; TONGA-NEXT: v_addc_u32_e32 v25, vcc, 0, v14, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v18, v23, 0
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v24, v10
+; TONGA-NEXT: v_addc_u32_e32 v10, vcc, v25, v11, vcc
+; TONGA-NEXT: v_addc_u32_e32 v11, vcc, 0, v14, vcc
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v10, v13
+; TONGA-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; TONGA-NEXT: v_add_u32_e32 v23, vcc, v19, v10
+; TONGA-NEXT: v_addc_u32_e32 v24, vcc, v18, v11, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v21, v23, 0
+; TONGA-NEXT: v_mul_lo_u32 v18, v21, v24
+; TONGA-NEXT: v_mul_lo_u32 v19, v22, v23
+; TONGA-NEXT: v_mul_hi_u32 v21, v23, v10
+; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v24, v10, 0
+; TONGA-NEXT: v_add_u32_e32 v11, vcc, v18, v11
+; TONGA-NEXT: v_add_u32_e32 v11, vcc, v11, v19
+; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v23, v11, 0
+; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v24, v11, 0
+; TONGA-NEXT: v_add_u32_e32 v18, vcc, v21, v18
+; TONGA-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v18, v13
+; TONGA-NEXT: v_addc_u32_e32 v13, vcc, v19, v14, vcc
+; TONGA-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v13, v10
+; TONGA-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v23, v10
+; TONGA-NEXT: v_addc_u32_e32 v14, vcc, v24, v11, vcc
+; TONGA-NEXT: v_ashrrev_i32_e32 v18, 31, v17
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v16, v18
+; TONGA-NEXT: v_xor_b32_e32 v19, v10, v18
+; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v19, v14, 0
+; TONGA-NEXT: v_mul_hi_u32 v21, v19, v13
+; TONGA-NEXT: v_addc_u32_e32 v17, vcc, v17, v18, vcc
+; TONGA-NEXT: v_xor_b32_e32 v17, v17, v18
+; TONGA-NEXT: v_add_u32_e32 v21, vcc, v21, v10
+; TONGA-NEXT: v_addc_u32_e32 v22, vcc, 0, v11, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v17, v13, 0
+; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v17, v14, 0
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v21, v10
+; TONGA-NEXT: v_addc_u32_e32 v10, vcc, v22, v11, vcc
+; TONGA-NEXT: v_addc_u32_e32 v11, vcc, 0, v14, vcc
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v10, v13
+; TONGA-NEXT: v_addc_u32_e32 v10, vcc, 0, v11, vcc
+; TONGA-NEXT: v_mul_lo_u32 v14, v15, v10
+; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v15, v13, 0
+; TONGA-NEXT: v_mul_lo_u32 v13, v20, v13
+; TONGA-NEXT: v_add_u32_e32 v11, vcc, v14, v11
+; TONGA-NEXT: v_add_u32_e32 v11, vcc, v13, v11
+; TONGA-NEXT: v_sub_u32_e32 v13, vcc, v17, v11
+; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v19, v10
+; TONGA-NEXT: v_subb_u32_e64 v13, s[0:1], v13, v20, vcc
+; TONGA-NEXT: v_sub_u32_e64 v14, s[0:1], v10, v15
+; TONGA-NEXT: v_subbrev_u32_e64 v19, s[2:3], 0, v13, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v19, v20
+; TONGA-NEXT: v_cndmask_b32_e64 v21, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v14, v15
+; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v17, v11, vcc
+; TONGA-NEXT: v_cndmask_b32_e64 v22, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v19, v20
+; TONGA-NEXT: v_subb_u32_e64 v13, s[0:1], v13, v20, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v11, v20
+; TONGA-NEXT: v_cndmask_b32_e64 v21, v21, v22, s[2:3]
+; TONGA-NEXT: v_sub_u32_e64 v22, s[0:1], v14, v15
+; TONGA-NEXT: v_cndmask_b32_e64 v17, 0, -1, vcc
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v10, v15
+; TONGA-NEXT: v_subbrev_u32_e64 v13, s[0:1], 0, v13, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e64 v15, 0, -1, vcc
+; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v11, v20
+; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v21
+; TONGA-NEXT: v_cndmask_b32_e32 v15, v17, v15, vcc
+; TONGA-NEXT: v_cndmask_b32_e64 v14, v14, v22, s[0:1]
+; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
+; TONGA-NEXT: v_cndmask_b32_e64 v13, v19, v13, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e32 v10, v10, v14, vcc
+; TONGA-NEXT: v_cndmask_b32_e32 v11, v11, v13, vcc
+; TONGA-NEXT: v_xor_b32_e32 v10, v10, v18
+; TONGA-NEXT: v_xor_b32_e32 v11, v11, v18
+; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v10, v18
+; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v11, v18, vcc
+; TONGA-NEXT: s_cbranch_execnz .LBB12_6
+; TONGA-NEXT: .LBB12_5:
+; TONGA-NEXT: v_cvt_f32_u32_e32 v10, v12
+; TONGA-NEXT: v_sub_u32_e32 v11, vcc, 0, v12
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v10, v10
+; TONGA-NEXT: v_mul_f32_e32 v10, 0x4f7ffffe, v10
+; TONGA-NEXT: v_cvt_u32_f32_e32 v10, v10
+; TONGA-NEXT: v_mul_lo_u32 v11, v11, v10
+; TONGA-NEXT: v_mul_hi_u32 v11, v10, v11
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v10, v11
+; TONGA-NEXT: v_mul_hi_u32 v10, v16, v10
+; TONGA-NEXT: v_mul_lo_u32 v10, v10, v12
+; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v16, v10
+; TONGA-NEXT: v_subrev_u32_e32 v11, vcc, v12, v10
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12
+; TONGA-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
+; TONGA-NEXT: v_subrev_u32_e32 v11, vcc, v12, v10
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12
+; TONGA-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
+; TONGA-NEXT: v_mov_b32_e32 v11, 0
+; TONGA-NEXT: .LBB12_6:
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_or_b32_e32 v13, v5, v1
+; TONGA-NEXT: v_mov_b32_e32 v12, 0
+; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
+; TONGA-NEXT: s_cbranch_vccz .LBB12_15
+; TONGA-NEXT: ; %bb.7:
+; TONGA-NEXT: v_ashrrev_i32_e32 v12, 31, v1
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v0, v12
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, v1, v12, vcc
+; TONGA-NEXT: v_xor_b32_e32 v18, v13, v12
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v12
+; TONGA-NEXT: v_cvt_f32_u32_e32 v12, v18
+; TONGA-NEXT: v_cvt_f32_u32_e32 v13, v1
+; TONGA-NEXT: v_sub_u32_e32 v19, vcc, 0, v18
+; TONGA-NEXT: v_subb_u32_e32 v20, vcc, 0, v1, vcc
+; TONGA-NEXT: v_madmk_f32 v12, v13, 0x4f800000, v12
+; TONGA-NEXT: v_rcp_f32_e32 v12, v12
+; TONGA-NEXT: v_mul_f32_e32 v12, 0x5f7ffffc, v12
+; TONGA-NEXT: v_mul_f32_e32 v13, 0x2f800000, v12
+; TONGA-NEXT: v_trunc_f32_e32 v13, v13
+; TONGA-NEXT: v_madmk_f32 v12, v13, 0xcf800000, v12
+; TONGA-NEXT: v_cvt_u32_f32_e32 v16, v13
+; TONGA-NEXT: v_cvt_u32_f32_e32 v17, v12
+; TONGA-NEXT: v_mul_lo_u32 v14, v19, v16
+; TONGA-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v19, v17, 0
+; TONGA-NEXT: v_mul_lo_u32 v15, v20, v17
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v13, v14
+; TONGA-NEXT: v_add_u32_e32 v15, vcc, v13, v15
+; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v17, v15, 0
+; TONGA-NEXT: v_mul_hi_u32 v21, v17, v12
+; TONGA-NEXT: v_add_u32_e32 v21, vcc, v21, v13
+; TONGA-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v16, v12, 0
+; TONGA-NEXT: v_addc_u32_e32 v22, vcc, 0, v14, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[14:15], s[0:1], v16, v15, 0
+; TONGA-NEXT: v_add_u32_e32 v12, vcc, v21, v12
+; TONGA-NEXT: v_addc_u32_e32 v12, vcc, v22, v13, vcc
+; TONGA-NEXT: v_addc_u32_e32 v13, vcc, 0, v15, vcc
+; TONGA-NEXT: v_add_u32_e32 v12, vcc, v12, v14
+; TONGA-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; TONGA-NEXT: v_add_u32_e32 v21, vcc, v17, v12
+; TONGA-NEXT: v_addc_u32_e32 v22, vcc, v16, v13, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v19, v21, 0
+; TONGA-NEXT: v_mul_lo_u32 v16, v19, v22
+; TONGA-NEXT: v_mul_lo_u32 v17, v20, v21
+; TONGA-NEXT: v_mul_hi_u32 v19, v21, v12
+; TONGA-NEXT: v_mad_u64_u32 v[14:15], s[0:1], v22, v12, 0
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v16, v13
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v13, v17
+; TONGA-NEXT: v_mad_u64_u32 v[16:17], s[0:1], v21, v13, 0
+; TONGA-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v22, v13, 0
+; TONGA-NEXT: v_add_u32_e32 v16, vcc, v19, v16
+; TONGA-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; TONGA-NEXT: v_add_u32_e32 v14, vcc, v16, v14
+; TONGA-NEXT: v_addc_u32_e32 v14, vcc, v17, v15, vcc
+; TONGA-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; TONGA-NEXT: v_add_u32_e32 v12, vcc, v14, v12
+; TONGA-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; TONGA-NEXT: v_add_u32_e32 v14, vcc, v21, v12
+; TONGA-NEXT: v_addc_u32_e32 v15, vcc, v22, v13, vcc
+; TONGA-NEXT: v_ashrrev_i32_e32 v16, 31, v5
+; TONGA-NEXT: v_add_u32_e32 v12, vcc, v4, v16
+; TONGA-NEXT: v_xor_b32_e32 v17, v12, v16
+; TONGA-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v17, v15, 0
+; TONGA-NEXT: v_mul_hi_u32 v19, v17, v14
+; TONGA-NEXT: v_addc_u32_e32 v5, vcc, v5, v16, vcc
+; TONGA-NEXT: v_xor_b32_e32 v5, v5, v16
+; TONGA-NEXT: v_add_u32_e32 v19, vcc, v19, v12
+; TONGA-NEXT: v_addc_u32_e32 v20, vcc, 0, v13, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v5, v14, 0
+; TONGA-NEXT: v_mad_u64_u32 v[14:15], s[0:1], v5, v15, 0
+; TONGA-NEXT: v_add_u32_e32 v12, vcc, v19, v12
+; TONGA-NEXT: v_addc_u32_e32 v12, vcc, v20, v13, vcc
+; TONGA-NEXT: v_addc_u32_e32 v13, vcc, 0, v15, vcc
+; TONGA-NEXT: v_add_u32_e32 v14, vcc, v12, v14
+; TONGA-NEXT: v_addc_u32_e32 v12, vcc, 0, v13, vcc
+; TONGA-NEXT: v_mul_lo_u32 v15, v18, v12
+; TONGA-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v18, v14, 0
+; TONGA-NEXT: v_mul_lo_u32 v14, v1, v14
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v15, v13
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v14, v13
+; TONGA-NEXT: v_sub_u32_e32 v14, vcc, v5, v13
+; TONGA-NEXT: v_sub_u32_e32 v12, vcc, v17, v12
+; TONGA-NEXT: v_subb_u32_e64 v14, s[0:1], v14, v1, vcc
+; TONGA-NEXT: v_sub_u32_e64 v15, s[0:1], v12, v18
+; TONGA-NEXT: v_subbrev_u32_e64 v17, s[2:3], 0, v14, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v17, v1
+; TONGA-NEXT: v_cndmask_b32_e64 v19, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v15, v18
+; TONGA-NEXT: v_cndmask_b32_e64 v20, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v17, v1
+; TONGA-NEXT: v_subb_u32_e64 v14, s[0:1], v14, v1, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e64 v19, v19, v20, s[2:3]
+; TONGA-NEXT: v_sub_u32_e64 v20, s[0:1], v15, v18
+; TONGA-NEXT: v_subb_u32_e32 v5, vcc, v5, v13, vcc
+; TONGA-NEXT: v_subbrev_u32_e64 v14, s[0:1], 0, v14, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v5, v1
+; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v19
+; TONGA-NEXT: v_cndmask_b32_e64 v13, 0, -1, vcc
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v12, v18
+; TONGA-NEXT: v_cndmask_b32_e64 v14, v17, v14, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e64 v17, 0, -1, vcc
+; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
+; TONGA-NEXT: v_cndmask_b32_e32 v1, v13, v17, vcc
+; TONGA-NEXT: v_cndmask_b32_e64 v15, v15, v20, s[0:1]
+; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
+; TONGA-NEXT: v_cndmask_b32_e32 v1, v5, v14, vcc
+; TONGA-NEXT: v_cndmask_b32_e32 v5, v12, v15, vcc
+; TONGA-NEXT: v_xor_b32_e32 v5, v5, v16
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v16
+; TONGA-NEXT: v_sub_u32_e32 v12, vcc, v5, v16
+; TONGA-NEXT: v_subb_u32_e32 v13, vcc, v1, v16, vcc
+; TONGA-NEXT: s_cbranch_execnz .LBB12_9
+; TONGA-NEXT: .LBB12_8:
+; TONGA-NEXT: v_cvt_f32_u32_e32 v1, v0
+; TONGA-NEXT: v_sub_u32_e32 v5, vcc, 0, v0
+; TONGA-NEXT: v_mov_b32_e32 v13, 0
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; TONGA-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; TONGA-NEXT: v_cvt_u32_f32_e32 v1, v1
+; TONGA-NEXT: v_mul_lo_u32 v5, v5, v1
+; TONGA-NEXT: v_mul_hi_u32 v5, v1, v5
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v5
+; TONGA-NEXT: v_mul_hi_u32 v1, v4, v1
+; TONGA-NEXT: v_mul_lo_u32 v1, v1, v0
+; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v4, v1
+; TONGA-NEXT: v_subrev_u32_e32 v4, vcc, v0, v1
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
+; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; TONGA-NEXT: v_subrev_u32_e32 v4, vcc, v0, v1
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
+; TONGA-NEXT: v_cndmask_b32_e32 v12, v1, v4, vcc
+; TONGA-NEXT: .LBB12_9:
+; TONGA-NEXT: v_or_b32_e32 v1, v7, v3
+; TONGA-NEXT: v_mov_b32_e32 v0, 0
+; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; TONGA-NEXT: s_cbranch_vccz .LBB12_16
+; TONGA-NEXT: ; %bb.10:
+; TONGA-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v2, v0
+; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v3, v0, vcc
+; TONGA-NEXT: v_xor_b32_e32 v5, v1, v0
+; TONGA-NEXT: v_xor_b32_e32 v16, v3, v0
+; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v5
+; TONGA-NEXT: v_cvt_f32_u32_e32 v1, v16
+; TONGA-NEXT: v_sub_u32_e32 v17, vcc, 0, v5
+; TONGA-NEXT: v_subb_u32_e32 v18, vcc, 0, v16, vcc
+; TONGA-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
+; TONGA-NEXT: v_rcp_f32_e32 v0, v0
+; TONGA-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; TONGA-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
+; TONGA-NEXT: v_trunc_f32_e32 v1, v1
+; TONGA-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
+; TONGA-NEXT: v_cvt_u32_f32_e32 v14, v1
+; TONGA-NEXT: v_cvt_u32_f32_e32 v15, v0
+; TONGA-NEXT: v_mul_lo_u32 v3, v17, v14
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v17, v15, 0
+; TONGA-NEXT: v_mul_lo_u32 v4, v18, v15
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v3
+; TONGA-NEXT: v_add_u32_e32 v19, vcc, v1, v4
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v15, v19, 0
+; TONGA-NEXT: v_mul_hi_u32 v1, v15, v0
+; TONGA-NEXT: v_add_u32_e32 v20, vcc, v1, v3
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v14, v0, 0
+; TONGA-NEXT: v_addc_u32_e32 v21, vcc, 0, v4, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v14, v19, 0
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v20, v0
+; TONGA-NEXT: v_addc_u32_e32 v0, vcc, v21, v1, vcc
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v4, vcc
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v3
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT: v_add_u32_e32 v19, vcc, v15, v0
+; TONGA-NEXT: v_addc_u32_e32 v20, vcc, v14, v1, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v17, v19, 0
+; TONGA-NEXT: v_mul_lo_u32 v14, v17, v20
+; TONGA-NEXT: v_mul_lo_u32 v15, v18, v19
+; TONGA-NEXT: v_mul_hi_u32 v17, v19, v0
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v20, v0, 0
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v14, v1
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v15
+; TONGA-NEXT: v_mad_u64_u32 v[14:15], s[0:1], v19, v1, 0
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v20, v1, 0
+; TONGA-NEXT: v_add_u32_e32 v14, vcc, v17, v14
+; TONGA-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v14, v3
+; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v15, v4, vcc
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v3, v0
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v19, v0
+; TONGA-NEXT: v_addc_u32_e32 v4, vcc, v20, v1, vcc
+; TONGA-NEXT: v_ashrrev_i32_e32 v15, 31, v7
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v6, v15
+; TONGA-NEXT: v_xor_b32_e32 v14, v0, v15
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v14, v4, 0
+; TONGA-NEXT: v_mul_hi_u32 v17, v14, v3
+; TONGA-NEXT: v_addc_u32_e32 v7, vcc, v7, v15, vcc
+; TONGA-NEXT: v_xor_b32_e32 v7, v7, v15
+; TONGA-NEXT: v_add_u32_e32 v17, vcc, v17, v0
+; TONGA-NEXT: v_addc_u32_e32 v18, vcc, 0, v1, vcc
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v7, v3, 0
+; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v7, v4, 0
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v17, v0
+; TONGA-NEXT: v_addc_u32_e32 v0, vcc, v18, v1, vcc
+; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v4, vcc
+; TONGA-NEXT: v_add_u32_e32 v3, vcc, v0, v3
+; TONGA-NEXT: v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; TONGA-NEXT: v_mul_lo_u32 v4, v5, v0
+; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v5, v3, 0
+; TONGA-NEXT: v_mul_lo_u32 v3, v16, v3
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v4, v1
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, v3, v1
+; TONGA-NEXT: v_sub_u32_e32 v3, vcc, v7, v1
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v14, v0
+; TONGA-NEXT: v_subb_u32_e64 v3, s[0:1], v3, v16, vcc
+; TONGA-NEXT: v_sub_u32_e64 v4, s[0:1], v0, v5
+; TONGA-NEXT: v_subbrev_u32_e64 v14, s[2:3], 0, v3, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v14, v16
+; TONGA-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v4, v5
+; TONGA-NEXT: v_subb_u32_e32 v1, vcc, v7, v1, vcc
+; TONGA-NEXT: v_cndmask_b32_e64 v18, 0, -1, s[2:3]
+; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v14, v16
+; TONGA-NEXT: v_subb_u32_e64 v3, s[0:1], v3, v16, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v16
+; TONGA-NEXT: v_cndmask_b32_e64 v17, v17, v18, s[2:3]
+; TONGA-NEXT: v_sub_u32_e64 v18, s[0:1], v4, v5
+; TONGA-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5
+; TONGA-NEXT: v_subbrev_u32_e64 v3, s[0:1], 0, v3, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
+; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v1, v16
+; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v17
+; TONGA-NEXT: v_cndmask_b32_e32 v5, v7, v5, vcc
+; TONGA-NEXT: v_cndmask_b32_e64 v4, v4, v18, s[0:1]
+; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
+; TONGA-NEXT: v_cndmask_b32_e64 v3, v14, v3, s[0:1]
+; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; TONGA-NEXT: v_xor_b32_e32 v0, v0, v15
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v15
+; TONGA-NEXT: v_sub_u32_e32 v14, vcc, v0, v15
+; TONGA-NEXT: v_subb_u32_e32 v15, vcc, v1, v15, vcc
+; TONGA-NEXT: s_cbranch_execnz .LBB12_12
+; TONGA-NEXT: .LBB12_11:
+; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v2
+; TONGA-NEXT: v_sub_u32_e32 v1, vcc, 0, v2
+; TONGA-NEXT: v_mov_b32_e32 v15, 0
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TONGA-NEXT: v_cvt_u32_f32_e32 v0, v0
+; TONGA-NEXT: v_mul_lo_u32 v1, v1, v0
+; TONGA-NEXT: v_mul_hi_u32 v1, v0, v1
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT: v_mul_hi_u32 v0, v6, v0
+; TONGA-NEXT: v_mul_lo_u32 v0, v0, v2
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v6, v0
+; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TONGA-NEXT: v_cndmask_b32_e32 v14, v0, v1, vcc
+; TONGA-NEXT: .LBB12_12:
+; TONGA-NEXT: v_mov_b32_e32 v0, s4
+; TONGA-NEXT: v_mov_b32_e32 v1, s5
+; TONGA-NEXT: s_add_u32 s0, s4, 16
+; TONGA-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; TONGA-NEXT: s_addc_u32 s1, s5, 0
+; TONGA-NEXT: v_mov_b32_e32 v0, s0
+; TONGA-NEXT: v_mov_b32_e32 v1, s1
+; TONGA-NEXT: flat_store_dwordx4 v[0:1], v[12:15]
+; TONGA-NEXT: s_endpgm
+; TONGA-NEXT: .LBB12_13:
+; TONGA-NEXT: ; implicit-def: $vgpr8_vgpr9
+; TONGA-NEXT: s_branch .LBB12_2
+; TONGA-NEXT: .LBB12_14:
+; TONGA-NEXT: s_branch .LBB12_5
+; TONGA-NEXT: .LBB12_15:
+; TONGA-NEXT: ; implicit-def: $vgpr12_vgpr13
+; TONGA-NEXT: s_branch .LBB12_8
+; TONGA-NEXT: .LBB12_16:
+; TONGA-NEXT: s_branch .LBB12_11
+;
+; EG-LABEL: srem_v4i64:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @34, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @26
+; EG-NEXT: ALU 114, @35, KC0[], KC1[]
+; EG-NEXT: ALU 115, @150, KC0[], KC1[]
+; EG-NEXT: ALU 115, @266, KC0[], KC1[]
+; EG-NEXT: ALU 111, @382, KC0[], KC1[]
+; EG-NEXT: TEX 1 @30
+; EG-NEXT: ALU 114, @494, KC0[], KC1[]
+; EG-NEXT: ALU 113, @609, KC0[], KC1[]
+; EG-NEXT: ALU 114, @723, KC0[], KC1[]
+; EG-NEXT: ALU 113, @838, KC0[], KC1[]
+; EG-NEXT: ALU 114, @952, KC0[], KC1[]
+; EG-NEXT: ALU 113, @1067, KC0[], KC1[]
+; EG-NEXT: ALU 114, @1181, KC0[], KC1[]
+; EG-NEXT: ALU 113, @1296, KC0[], KC1[]
+; EG-NEXT: ALU 114, @1410, KC0[], KC1[]
+; EG-NEXT: ALU 114, @1525, KC0[], KC1[]
+; EG-NEXT: ALU 114, @1640, KC0[], KC1[]
+; EG-NEXT: ALU 115, @1755, KC0[], KC1[]
+; EG-NEXT: ALU 113, @1871, KC0[], KC1[]
+; EG-NEXT: ALU 112, @1985, KC0[], KC1[]
+; EG-NEXT: ALU 99, @2098, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T4.XYZW, T1.X, 0
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T7.XYZW, T0.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 26:
+; EG-NEXT: VTX_READ_128 T1.XYZW, T2.X, 32, #1
+; EG-NEXT: VTX_READ_128 T0.XYZW, T2.X, 0, #1
+; EG-NEXT: Fetch clause starting at 30:
+; EG-NEXT: VTX_READ_128 T9.XYZW, T2.X, 16, #1
+; EG-NEXT: VTX_READ_128 T10.XYZW, T2.X, 48, #1
+; EG-NEXT: ALU clause starting at 34:
+; EG-NEXT: MOV * T2.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 35:
+; EG-NEXT: ASHR * T3.W, T1.Y, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T2.W, T1.X, PV.W,
+; EG-NEXT: XOR_INT * T7.W, PV.W, T3.W,
+; EG-NEXT: SUB_INT T2.Z, 0.0, PV.W,
+; EG-NEXT: ASHR T2.W, T0.Y, literal.x,
+; EG-NEXT: RECIP_UINT * T2.Y, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T3.Z, T0.Y, PV.W,
+; EG-NEXT: ADDC_UINT T4.W, T0.X, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PV.Z, PS,
+; EG-NEXT: ADD_INT T4.W, PV.Z, PV.W,
+; EG-NEXT: MULHI * T0.Y, T2.Y, PS,
+; EG-NEXT: ADD_INT T5.W, T2.Y, PS,
+; EG-NEXT: XOR_INT * T4.W, PV.W, T2.W,
+; EG-NEXT: MULHI * T0.Y, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T7.W,
+; EG-NEXT: SUB_INT * T5.W, T4.W, PS,
+; EG-NEXT: SETGE_UINT T6.W, PV.W, T7.W,
+; EG-NEXT: SUB_INT * T8.W, PV.W, T7.W,
+; EG-NEXT: CNDE_INT T2.Z, PV.W, T5.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: ADD_INT T5.W, T1.Y, T3.W,
+; EG-NEXT: ADDC_UINT * T6.W, T1.X, T3.W,
+; EG-NEXT: ADD_INT T3.Z, PV.W, PS,
+; EG-NEXT: SETGE_UINT T5.W, PV.Z, T7.W,
+; EG-NEXT: SUB_INT * T6.W, PV.Z, T7.W,
+; EG-NEXT: ADD_INT T4.Z, T0.X, T2.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T5.W, PV.W, T2.Z, PS,
+; EG-NEXT: XOR_INT * T6.W, PV.Z, T3.W,
+; EG-NEXT: CNDE_INT T3.W, PS, PV.W, T4.W,
+; EG-NEXT: XOR_INT * T8.W, PV.Z, T2.W,
+; EG-NEXT: BIT_ALIGN_INT T4.W, PV.W, PS, literal.x,
+; EG-NEXT: LSHR * T3.W, PV.W, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T2.Z, PS, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.W, PS, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T7.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.Z, PV.W, PS,
+; EG-NEXT: SUB_INT * T2.Z, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T5.W, T3.W, T6.W,
+; EG-NEXT: SUBB_UINT * T9.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT T5.W, T5.W, PV.W,
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 29(4.063766e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 28(3.923636e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 27(3.783506e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 26(3.643376e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT * T2.Z, PS, T7.W,
+; EG-NEXT: ALU clause starting at 150:
+; EG-NEXT: SETE_INT T5.W, T3.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, T3.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, T2.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 25(3.503246e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 22(3.082857e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 21(2.942727e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 20(2.802597e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 19(2.662467e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T2.Z, T4.W, T7.W,
+; EG-NEXT: ALU clause starting at 266:
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 18(2.522337e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 17(2.382207e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 15(2.101948e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 14(1.961818e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 13(1.821688e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 12(1.681558e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: ALU clause starting at 382:
+; EG-NEXT: LSHL T2.Z, T4.W, 1,
+; EG-NEXT: BFE_UINT * T9.W, T8.W, literal.x, 1, BS:VEC_120/SCL_212
+; EG-NEXT: 11(1.541428e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, T5.W,
+; EG-NEXT: BIT_ALIGN_INT T3.W, PV.W, T4.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T4.W, T2.Z, T9.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 10(1.401298e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 6(8.407791e-45), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T9.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 5(7.006492e-45), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T9.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: ALU clause starting at 494:
+; EG-NEXT: LSHL T2.Z, T4.W, 1,
+; EG-NEXT: BFE_UINT * T11.W, T8.W, literal.x, 1, BS:VEC_120/SCL_212
+; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, T5.W,
+; EG-NEXT: BIT_ALIGN_INT T3.W, PV.W, T4.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T4.W, T2.Z, T11.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T11.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T2.Z, PS, 1,
+; EG-NEXT: BFE_UINT T11.W, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT * T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.W, PS, T4.W, literal.x,
+; EG-NEXT: OR_INT * T4.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.Z, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.W, PV.W, T6.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T0.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Z, T4.W, T7.W,
+; EG-NEXT: SUBB_UINT * T5.W, T4.W, T7.W,
+; EG-NEXT: SUB_INT * T11.W, T3.W, T6.W,
+; EG-NEXT: SUB_INT T5.W, PV.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T4.W, T0.Y, T4.W, T2.Z,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T2.Z, T8.W, literal.x, 1,
+; EG-NEXT: CNDE_INT T3.W, T0.Y, T3.W, PV.W,
+; EG-NEXT: ASHR * T11.W, T10.Y, literal.y,
+; EG-NEXT: 2(2.802597e-45), 31(4.344025e-44)
+; EG-NEXT: ADD_INT T3.Z, T10.X, PS,
+; EG-NEXT: BIT_ALIGN_INT T5.W, PV.W, T4.W, literal.x,
+; EG-NEXT: OR_INT * T12.W, PV.Y, PV.Z,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T0.Y, PS, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T2.Z, PV.W, T6.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T3.W, PV.W, T6.W, BS:VEC_102/SCL_221
+; EG-NEXT: XOR_INT * T4.W, PV.Z, T11.W,
+; EG-NEXT: SUB_INT T13.W, 0.0, PS,
+; EG-NEXT: CNDE_INT * T14.W, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T0.X, T12.W, T7.W,
+; EG-NEXT: SUBB_UINT * T0.Y, T12.W, T7.W,
+; EG-NEXT: SUB_INT T2.Z, T5.W, T6.W,
+; EG-NEXT: ASHR T3.W, T9.Y, literal.x,
+; EG-NEXT: RECIP_UINT * T1.X, T4.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T1.Y, T9.Y, PV.W,
+; EG-NEXT: SUB_INT T2.Z, PV.Z, T0.Y,
+; EG-NEXT: CNDE_INT T12.W, T14.W, T12.W, T0.X,
+; EG-NEXT: MULLO_INT * T0.X, T13.W, PS,
+; EG-NEXT: ADDC_UINT T2.X, T9.X, T3.W,
+; EG-NEXT: LSHL T0.Y, PV.W, 1,
+; EG-NEXT: BFE_UINT * T3.Z, T8.W, 1, 1,
+; EG-NEXT: CNDE_INT T5.W, T14.W, T5.W, T2.Z,
+; EG-NEXT: MULHI * T0.X, T1.X, T0.X,
+; EG-NEXT: ADD_INT T2.Y, T1.X, PS,
+; EG-NEXT: BIT_ALIGN_INT T4.Z, PV.W, T12.W, literal.x,
+; EG-NEXT: OR_INT T12.W, T0.Y, T3.Z,
+; EG-NEXT: ADD_INT * T5.W, T1.Y, T2.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: XOR_INT T1.X, PS, T3.W,
+; EG-NEXT: ASHR T0.Y, T10.W, literal.x,
+; EG-NEXT: SETGE_UINT * T2.Z, PV.W, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T5.W, T4.Z, T6.W,
+; EG-NEXT: SETGE_UINT * T13.W, T4.Z, T6.W,
+; EG-NEXT: CNDE_INT T1.Y, PV.W, PS, T2.Z,
+; EG-NEXT: SUB_INT T3.Z, T12.W, T7.W,
+; EG-NEXT: ADD_INT T5.W, T10.Z, T0.Y, BS:VEC_021/SCL_122
+; EG-NEXT: MULHI * T0.X, T1.X, T2.Y,
+; EG-NEXT: SUBB_UINT T2.X, T12.W, T7.W,
+; EG-NEXT: SUB_INT T2.Y, T4.Z, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: XOR_INT * T2.Z, PV.W, T0.Y,
+; EG-NEXT: ASHR T5.W, T9.W, literal.x,
+; EG-NEXT: MULLO_INT * T0.X, T0.X, T4.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T3.X, T9.W, PV.W,
+; EG-NEXT: SUB_INT T3.Y, 0.0, T2.Z,
+; EG-NEXT: SUB_INT T5.Z, T2.Y, T2.X,
+; EG-NEXT: CNDE_INT T9.W, T1.Y, T12.W, T3.Z, BS:VEC_120/SCL_212
+; EG-NEXT: RECIP_UINT * T2.X, T2.Z,
+; EG-NEXT: ADDC_UINT T4.X, T9.Z, T5.W,
+; EG-NEXT: LSHL T2.Y, PV.W, 1,
+; EG-NEXT: AND_INT T3.Z, T8.W, 1,
+; EG-NEXT: CNDE_INT T8.W, T1.Y, T4.Z, PV.Z,
+; EG-NEXT: MULLO_INT * T1.Y, PV.Y, PS,
+; EG-NEXT: BIT_ALIGN_INT T3.Y, PV.W, T9.W, literal.x,
+; EG-NEXT: OR_INT T5.Z, PV.Y, PV.Z,
+; EG-NEXT: ADD_INT T8.W, T3.X, PV.X,
+; EG-NEXT: MULHI * T1.Y, T2.X, PS,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T2.X, T2.X, PS,
+; EG-NEXT: XOR_INT T1.Y, PV.W, T5.W,
+; EG-NEXT: SETGE_UINT T3.Z, PV.Z, T7.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T8.W, PV.Y, T6.W, BS:VEC_102/SCL_221
+; EG-NEXT: SUB_INT * T9.W, T1.X, T0.X,
+; EG-NEXT: SETGE_UINT T0.X, T3.Y, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUBB_UINT T2.Y, T5.Z, T7.W, BS:VEC_102/SCL_221
+; EG-NEXT: SUB_INT T4.Z, T3.Y, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.W, PS, T4.W,
+; EG-NEXT: SUB_INT * T12.W, PS, T4.W,
+; EG-NEXT: CNDE_INT T3.X, PV.W, T9.W, PS,
+; EG-NEXT: ADD_INT T4.Y, T10.Y, T11.W, BS:VEC_102/SCL_221
+; EG-NEXT: ADDC_UINT T6.Z, T10.X, T11.W, BS:VEC_102/SCL_221
+; EG-NEXT: SUB_INT T6.W, PV.Z, PV.Y,
+; EG-NEXT: CNDE_INT * T8.W, T8.W, PV.X, T3.Z,
+; EG-NEXT: CNDE_INT T0.X, PS, T3.Y, PV.W,
+; EG-NEXT: ADD_INT * T2.Y, PV.Y, PV.Z,
+; EG-NEXT: ALU clause starting at 609:
+; EG-NEXT: SETGE_UINT T3.Z, T3.X, T4.W,
+; EG-NEXT: SUB_INT T6.W, T3.X, T4.W,
+; EG-NEXT: MULHI * T2.X, T1.Y, T2.X,
+; EG-NEXT: ASHR T4.X, T1.W, literal.x,
+; EG-NEXT: CNDE_INT T3.Y, PV.Z, T3.X, PV.W,
+; EG-NEXT: XOR_INT T3.Z, T2.Y, T11.W,
+; EG-NEXT: ADD_INT T6.W, T9.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: MULLO_INT * T2.X, PS, T2.Z,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: XOR_INT T3.X, PV.W, T5.W,
+; EG-NEXT: CNDE_INT T2.Y, PV.Z, PV.Y, T1.X,
+; EG-NEXT: ADD_INT T4.Z, T1.Z, PV.X,
+; EG-NEXT: SUB_INT T9.W, T1.Y, PS,
+; EG-NEXT: ASHR * T6.W, T0.W, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T1.X, T0.W, PS,
+; EG-NEXT: ADDC_UINT T3.Y, T0.Z, PS,
+; EG-NEXT: SETGE_UINT T6.Z, PV.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, PV.W, T2.Z,
+; EG-NEXT: XOR_INT * T0.W, PV.Z, T4.X,
+; EG-NEXT: SUB_INT T2.X, 0.0, PS,
+; EG-NEXT: CNDE_INT T4.Y, PV.Z, T9.W, PV.W,
+; EG-NEXT: ADD_INT T4.Z, T10.W, T0.Y,
+; EG-NEXT: ADDC_UINT T9.W, T10.Z, T0.Y,
+; EG-NEXT: RECIP_UINT * T5.X, PS,
+; EG-NEXT: ADD_INT T6.X, PV.Z, PV.W,
+; EG-NEXT: SETGE_UINT T5.Y, PV.Y, T2.Z,
+; EG-NEXT: SUB_INT T4.Z, PV.Y, T2.Z,
+; EG-NEXT: ADD_INT T9.W, T9.X, T3.W,
+; EG-NEXT: MULLO_INT * T6.Y, PV.X, PS,
+; EG-NEXT: XOR_INT T2.X, PV.W, T3.W,
+; EG-NEXT: CNDE_INT T4.Y, PV.Y, T4.Y, PV.Z,
+; EG-NEXT: XOR_INT T4.Z, PV.X, T0.Y, BS:VEC_021/SCL_122
+; EG-NEXT: ADD_INT T9.W, T1.X, T3.Y, BS:VEC_102/SCL_221
+; EG-NEXT: MULHI * T0.Y, T5.X, PS,
+; EG-NEXT: ADD_INT T1.X, T5.X, PS,
+; EG-NEXT: XOR_INT T0.Y, PV.W, T6.W,
+; EG-NEXT: CNDE_INT T6.Z, PV.Z, PV.Y, T1.Y, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T9.W, T2.Y, PV.X, literal.x,
+; EG-NEXT: LSHR * T10.W, T2.Y, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T5.X, PS, T3.Z,
+; EG-NEXT: SETGE_UINT T1.Y, PS, T3.Z,
+; EG-NEXT: SETGE_UINT T7.Z, PV.W, T4.W,
+; EG-NEXT: LSHR T11.W, PV.Z, literal.x,
+; EG-NEXT: MULHI * T1.X, PV.Y, PV.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T6.X, PV.W, T4.Z,
+; EG-NEXT: CNDE_INT T1.Y, PV.X, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT T7.Z, T9.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T12.W, T6.Z, T3.X, literal.x,
+; EG-NEXT: MULLO_INT * T1.X, PS, T0.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T5.X, T11.W, T4.Z,
+; EG-NEXT: SETGE_UINT T2.Y, PV.W, T2.Z, BS:VEC_102/SCL_221
+; EG-NEXT: SUB_INT T6.Z, T0.Y, PS,
+; EG-NEXT: CNDE_INT T13.W, PV.Y, T9.W, PV.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT * T7.W, T5.Z, T7.W,
+; EG-NEXT: CNDE_INT T1.X, T8.W, T5.Z, PS,
+; EG-NEXT: LSHL T3.Y, PV.W, 1,
+; EG-NEXT: SETGE_UINT T5.Z, PV.Z, T0.W,
+; EG-NEXT: SUB_INT T7.W, PV.Z, T0.W,
+; EG-NEXT: CNDE_INT * T8.W, T6.X, PV.X, PV.Y,
+; EG-NEXT: SUB_INT T5.X, T12.W, T2.Z,
+; EG-NEXT: SUB_INT T2.Y, T11.W, T4.Z, BS:VEC_102/SCL_221
+; EG-NEXT: SUBB_UINT T7.Z, T12.W, T2.Z,
+; EG-NEXT: ADD_INT T1.W, T1.W, T4.X, BS:VEC_201
+; EG-NEXT: ADDC_UINT * T14.W, T1.Z, T4.X,
+; EG-NEXT: BFE_UINT T6.X, T2.X, literal.x, 1,
+; EG-NEXT: ADD_INT T4.Y, PV.W, PS,
+; EG-NEXT: SUB_INT T1.Z, PV.Y, PV.Z,
+; EG-NEXT: CNDE_INT T1.W, T8.W, T12.W, PV.X, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T7.W, T5.Z, T6.Z, T7.W,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T5.X, PS, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: SUB_INT T2.Y, PS, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: LSHL T5.Z, PV.W, 1,
+; EG-NEXT: BFE_UINT T12.W, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT * T8.W, T8.W, T11.W, PV.Z,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T7.X, PS, T1.W, literal.x,
+; EG-NEXT: OR_INT T5.Y, PV.Z, PV.W,
+; EG-NEXT: ADD_INT T0.Z, T0.Z, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T7.W, PV.X, T7.W, PV.Y, BS:VEC_102/SCL_221
+; EG-NEXT: XOR_INT * T1.W, T4.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, PS, PV.W, T0.Y,
+; EG-NEXT: XOR_INT T0.Y, PV.Z, T6.W,
+; EG-NEXT: SETGE_UINT T0.Z, PV.Y, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T7.W, PV.X, T4.Z,
+; EG-NEXT: SETGE_UINT * T8.W, PV.X, T4.Z,
+; EG-NEXT: SUB_INT T5.X, T10.W, T3.Z,
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T0.Z, T5.Y, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.X, PV.Y, literal.x,
+; EG-NEXT: LSHR * T8.W, PV.X, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T5.Y, T2.Z,
+; EG-NEXT: SUB_INT T4.Y, T7.X, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T1.Z, PS, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T11.W, PS, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT * T12.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT T8.X, T8.W, T1.W,
+; EG-NEXT: CNDE_INT * T6.Y, PV.Z, PV.W, PS,
+; EG-NEXT: SUB_INT T1.Z, T7.W, T0.W,
+; EG-NEXT: SUB_INT T11.W, T4.Y, T4.X,
+; EG-NEXT: CNDE_INT * T12.W, T2.Y, T5.Y, T0.Z,
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: LSHL T4.Y, PS, 1,
+; EG-NEXT: BFE_UINT T0.Z, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T11.W, T2.Y, T7.X, PV.W,
+; EG-NEXT: CNDE_INT * T7.W, T6.Y, T7.W, PV.Z,
+; EG-NEXT: 29(4.063766e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT * T7.X, T9.W, T4.W,
+; EG-NEXT: ALU clause starting at 723:
+; EG-NEXT: LSHL T2.Y, T7.W, 1,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, T11.W, T12.W, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: OR_INT T9.W, T4.Y, T0.Z,
+; EG-NEXT: SUB_INT * T11.W, T8.X, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T4.Y, T6.Y, T8.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T8.W, PV.Z, T4.Z,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T4.Z,
+; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T8.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T5.Y, T9.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.Y, T7.W, literal.x,
+; EG-NEXT: OR_INT T7.W, T2.Y, PV.X,
+; EG-NEXT: SUB_INT * T8.W, T5.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T1.Y, T10.W, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.Z, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT * T8.W, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T10.W, T8.X, T9.W, T5.Y,
+; EG-NEXT: LSHL T5.X, PV.W, 1,
+; EG-NEXT: CNDE_INT T1.Y, T5.Z, T8.W, T1.Y,
+; EG-NEXT: SUB_INT * T5.Z, T7.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T8.W, T4.X, T13.W, literal.x,
+; EG-NEXT: OR_INT * T11.W, T3.Y, T6.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T0.W,
+; EG-NEXT: SUB_INT * T2.Y, T0.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, T11.W, T4.W,
+; EG-NEXT: SETE_INT T12.W, T8.W, T3.Z, BS:VEC_201
+; EG-NEXT: SETGE_UINT * T13.W, T8.W, T3.Z,
+; EG-NEXT: SUBB_UINT T6.X, T11.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T11.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T12.W, T2.Y, T4.X,
+; EG-NEXT: CNDE_INT * T7.W, T1.Y, T7.W, T5.Z,
+; EG-NEXT: SUB_INT T4.X, T8.W, T3.Z,
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T5.Z, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T0.Z, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T11.W, PV.Y, T11.W, PV.Z,
+; EG-NEXT: 29(4.063766e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T3.X, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.W, T7.W, literal.y,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T6.X,
+; EG-NEXT: 28(3.923636e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T8.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PV.W, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T8.W, PV.Z, T1.W,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T1.W,
+; EG-NEXT: 29(4.063766e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T6.X, T9.W, T2.Z,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T5.Z, T7.W, T0.W, BS:VEC_120/SCL_212
+; EG-NEXT: BIT_ALIGN_INT T8.W, T2.Y, T11.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T0.W,
+; EG-NEXT: SUB_INT * T1.Y, T0.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, T9.W, T4.W,
+; EG-NEXT: SETE_INT T11.W, T8.W, T3.Z, BS:VEC_201
+; EG-NEXT: SETGE_UINT * T12.W, T8.W, T3.Z,
+; EG-NEXT: SUBB_UINT T9.X, T9.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T11.W, T1.Y, T4.X,
+; EG-NEXT: CNDE_INT * T7.W, T3.Y, T7.W, T5.Z,
+; EG-NEXT: SUB_INT T4.X, T8.W, T3.Z,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T5.Z, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T11.W, T3.Y, T0.Z, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 28(3.923636e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T1.Z, T4.Z,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.W, T7.W, literal.x,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T11.W, PV.X, T9.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T8.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PV.W, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T8.W, PV.Z, T1.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T1.W,
+; EG-NEXT: 28(3.923636e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T9.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T7.W, T0.W,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.Y, T9.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T8.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T6.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T8.X, T1.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T1.Z, PV.Z, T3.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T3.Z, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T11.W, PV.X, T7.W, PV.Y,
+; EG-NEXT: LSHL T6.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T1.Z, T8.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T10.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T10.W, T5.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T4.W,
+; EG-NEXT: SUB_INT T2.Y, T5.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T6.Z, PS, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T12.W, T9.W, T4.Z,
+; EG-NEXT: SETGE_UINT * T13.W, T9.W, T4.Z,
+; EG-NEXT: SUBB_UINT T5.X, T10.W, T2.Z,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, T6.Z,
+; EG-NEXT: SUB_INT * T6.Z, T10.W, T2.Z,
+; EG-NEXT: ALU clause starting at 838:
+; EG-NEXT: SUB_INT T12.W, T2.Y, T4.X,
+; EG-NEXT: CNDE_INT * T8.W, T1.Y, T8.W, T1.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T4.Z,
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T1.Z, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T5.Z, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T3.Y, T10.W, T6.Z,
+; EG-NEXT: 27(3.783506e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T0.Y, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.W, T8.W, literal.y,
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T5.X,
+; EG-NEXT: 27(3.783506e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T9.W, PV.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T3.Z,
+; EG-NEXT: 27(3.783506e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T5.X, T7.W, T0.W,
+; EG-NEXT: CNDE_INT * T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T8.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T7.W, T2.Y, T10.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T4.W,
+; EG-NEXT: SUB_INT T1.Y, T5.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T6.Z, PS, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T10.W, T7.W, T4.Z,
+; EG-NEXT: SETGE_UINT * T12.W, T7.W, T4.Z,
+; EG-NEXT: SUBB_UINT T8.X, T9.W, T2.Z,
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, T6.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T2.Z,
+; EG-NEXT: SUB_INT T10.W, T1.Y, T4.X,
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T1.Z,
+; EG-NEXT: SUB_INT T4.X, T7.W, T4.Z,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T1.Z, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T10.W, T3.Y, T5.Z, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 26(3.643376e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T0.Z, T1.W,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T10.W, PV.X, T8.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T7.W, PS,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T7.W, PV.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T10.W, PV.Z, T3.Z,
+; EG-NEXT: 26(3.643376e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T8.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T8.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.Y, T9.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T7.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T5.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T9.X, T0.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T0.Z, PV.Z, T4.Z, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T4.Z, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T10.W, PV.X, T8.W, PV.Y,
+; EG-NEXT: LSHL T5.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T0.Z, T7.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T11.W, literal.x,
+; EG-NEXT: OR_INT * T11.W, T6.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T2.Z,
+; EG-NEXT: SUB_INT T2.Y, T1.Z, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T12.W, PV.W, T1.W,
+; EG-NEXT: SETGE_UINT * T13.W, PV.W, T1.W,
+; EG-NEXT: SUBB_UINT T6.X, T11.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T11.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T12.W, PV.Y, PV.X,
+; EG-NEXT: CNDE_INT * T7.W, T1.Y, T7.W, T0.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T0.Z, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T1.Z, PV.W,
+; EG-NEXT: CNDE_INT * T11.W, PV.Y, T11.W, PV.Z,
+; EG-NEXT: 25(3.503246e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T2.X, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T7.W, literal.y,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T6.X,
+; EG-NEXT: 25(3.503246e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T9.W, PV.Z, T4.Z,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T4.Z,
+; EG-NEXT: 26(3.643376e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T6.X, T8.W, T4.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T0.Z, T7.W, T2.Z, BS:VEC_201
+; EG-NEXT: BIT_ALIGN_INT T8.W, T2.Y, T11.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T2.Z,
+; EG-NEXT: SUB_INT T1.Y, T1.Z, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T11.W, PV.W, T1.W,
+; EG-NEXT: SETGE_UINT * T12.W, PV.W, T1.W,
+; EG-NEXT: SUBB_UINT T9.X, T9.W, T0.W,
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T0.W,
+; EG-NEXT: SUB_INT * T11.W, PV.Y, PV.X,
+; EG-NEXT: ALU clause starting at 952:
+; EG-NEXT: CNDE_INT * T7.W, T3.Y, T7.W, T0.Z,
+; EG-NEXT: SUB_INT T4.X, T8.W, T1.W,
+; EG-NEXT: LSHL T1.Y, PV.W, 1,
+; EG-NEXT: BFE_UINT T0.Z, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT * T11.W, T3.Y, T1.Z, T11.W,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT * T9.W, T2.Y, T9.W, T6.Z,
+; EG-NEXT: SUB_INT T10.X, T5.Z, T3.Z,
+; EG-NEXT: LSHL T3.Y, PV.W, 1,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, T11.W, T7.W, literal.x,
+; EG-NEXT: OR_INT T7.W, T1.Y, T0.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT * T11.W, T4.X, T9.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T8.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T8.W, PV.Z, T4.Z,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T4.Z,
+; EG-NEXT: 25(3.503246e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T9.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T7.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.Y, T9.W, literal.x,
+; EG-NEXT: OR_INT T8.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T6.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T8.X, T5.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.Z, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T11.W, PV.X, T7.W, PV.Y,
+; EG-NEXT: LSHL T6.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T5.Z, T8.W, T0.W,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T10.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T10.W, T5.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T0.W,
+; EG-NEXT: SUB_INT * T2.Y, T0.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, T10.W, T4.W,
+; EG-NEXT: SETE_INT T12.W, T9.W, T3.Z, BS:VEC_201
+; EG-NEXT: SETGE_UINT * T13.W, T9.W, T3.Z,
+; EG-NEXT: SUBB_UINT T5.X, T10.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T10.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T12.W, T2.Y, T4.X,
+; EG-NEXT: CNDE_INT * T8.W, T1.Y, T8.W, T5.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T3.Z,
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T5.Z, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T0.Z, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T10.W, PV.Y, T10.W, PV.Z,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T3.X, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.W, T8.W, literal.y,
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T5.X,
+; EG-NEXT: 23(3.222986e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PV.W, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T1.W,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T1.W,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T5.X, T7.W, T2.Z,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T5.Z, T8.W, T0.W, BS:VEC_120/SCL_212
+; EG-NEXT: BIT_ALIGN_INT T7.W, T2.Y, T10.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T0.W,
+; EG-NEXT: SUB_INT * T1.Y, T0.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, T9.W, T4.W,
+; EG-NEXT: SETE_INT T10.W, T7.W, T3.Z, BS:VEC_201
+; EG-NEXT: SETGE_UINT * T12.W, T7.W, T3.Z,
+; EG-NEXT: SUBB_UINT T8.X, T9.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T10.W, T1.Y, T4.X,
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T5.Z,
+; EG-NEXT: SUB_INT T4.X, T7.W, T3.Z,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T5.Z, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T10.W, T3.Y, T0.Z, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T1.Z, T4.Z,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.W, T8.W, literal.x,
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T10.W, PV.X, T8.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T7.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PV.W, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T7.W, PV.Z, T1.W,
+; EG-NEXT: SETGE_UINT * T10.W, PV.Z, T1.W,
+; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T8.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T8.W, T0.W,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.Y, T9.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T7.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T5.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T9.X, T1.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T1.Z, PV.Z, T3.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T3.Z, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, PV.X, T8.W, PV.Y,
+; EG-NEXT: LSHL T5.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T1.Z, T7.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T11.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T11.W, T6.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ALU clause starting at 1067:
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T4.W,
+; EG-NEXT: SUB_INT T2.Y, T5.Z, T3.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT * T6.Z, T11.W, T2.Z, BS:VEC_210
+; EG-NEXT: SETE_INT T12.W, T9.W, T4.Z,
+; EG-NEXT: SETGE_UINT * T13.W, T9.W, T4.Z,
+; EG-NEXT: SUBB_UINT T6.X, T11.W, T2.Z,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, T6.Z,
+; EG-NEXT: SUB_INT T6.Z, T11.W, T2.Z,
+; EG-NEXT: SUB_INT T12.W, T2.Y, T4.X,
+; EG-NEXT: CNDE_INT * T7.W, T1.Y, T7.W, T1.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T4.Z,
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T1.Z, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T5.Z, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T11.W, PV.Y, T11.W, PV.Z,
+; EG-NEXT: 22(3.082857e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T0.Y, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.W, T7.W, literal.y,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T6.X,
+; EG-NEXT: 22(3.082857e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T9.W, PV.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T3.Z,
+; EG-NEXT: 22(3.082857e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T6.X, T8.W, T0.W,
+; EG-NEXT: CNDE_INT * T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T7.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T8.W, T2.Y, T11.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T4.W,
+; EG-NEXT: SUB_INT T1.Y, T5.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T6.Z, PS, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T11.W, T8.W, T4.Z,
+; EG-NEXT: SETGE_UINT * T12.W, T8.W, T4.Z,
+; EG-NEXT: SUBB_UINT T9.X, T9.W, T2.Z,
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, T6.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T4.X,
+; EG-NEXT: CNDE_INT * T7.W, T3.Y, T7.W, T1.Z,
+; EG-NEXT: SUB_INT T4.X, T8.W, T4.Z,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T1.Z, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T11.W, T3.Y, T5.Z, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 21(2.942727e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T0.Z, T1.W,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.W, T7.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T11.W, PV.X, T9.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T8.W, PS,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T8.W, PV.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T3.Z,
+; EG-NEXT: 21(2.942727e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T9.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T7.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.Y, T9.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T8.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T6.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T8.X, T0.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T0.Z, PV.Z, T4.Z, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T4.Z, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T11.W, PV.X, T7.W, PV.Y,
+; EG-NEXT: LSHL T6.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T10.W, literal.x,
+; EG-NEXT: OR_INT * T10.W, T5.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T2.Z,
+; EG-NEXT: SUB_INT T2.Y, T1.Z, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T12.W, PV.W, T1.W,
+; EG-NEXT: SETGE_UINT * T13.W, PV.W, T1.W,
+; EG-NEXT: SUBB_UINT T5.X, T10.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T10.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T12.W, PV.Y, PV.X,
+; EG-NEXT: CNDE_INT * T8.W, T1.Y, T8.W, T0.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T0.Z, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T1.Z, PV.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.Y, T10.W, PV.Z,
+; EG-NEXT: 20(2.802597e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T2.X, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.y,
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T5.X,
+; EG-NEXT: 20(2.802597e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T9.W, PV.Z, T4.Z,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T4.Z,
+; EG-NEXT: 21(2.942727e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T5.X, T7.W, T4.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T0.Z, T8.W, T2.Z, BS:VEC_201
+; EG-NEXT: BIT_ALIGN_INT T7.W, T2.Y, T10.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT * T4.X, T8.W, T2.Z,
+; EG-NEXT: ALU clause starting at 1181:
+; EG-NEXT: SUB_INT T1.Y, T1.Z, T4.Z,
+; EG-NEXT: SETGE_UINT * T6.Z, T9.W, T0.W,
+; EG-NEXT: SETE_INT T10.W, T7.W, T1.W,
+; EG-NEXT: SETGE_UINT * T12.W, T7.W, T1.W,
+; EG-NEXT: SUBB_UINT T8.X, T9.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, T6.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T10.W, T1.Y, T4.X,
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: SUB_INT T4.X, T7.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T0.Z, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T10.W, T3.Y, T1.Z, PV.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 19(2.662467e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T5.Z, T3.Z,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x,
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T10.W, PV.X, T8.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T7.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T7.W, PV.Z, T4.Z,
+; EG-NEXT: SETGE_UINT * T10.W, PV.Z, T4.Z,
+; EG-NEXT: 20(2.802597e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T8.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T8.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.Y, T9.W, literal.x,
+; EG-NEXT: OR_INT T7.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T5.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T9.X, T5.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.Z, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T10.W, PV.X, T8.W, PV.Y,
+; EG-NEXT: LSHL T5.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T5.Z, T7.W, T0.W,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T11.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T11.W, T6.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T0.W,
+; EG-NEXT: SUB_INT * T2.Y, T0.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, T11.W, T4.W,
+; EG-NEXT: SETE_INT T12.W, T9.W, T3.Z, BS:VEC_201
+; EG-NEXT: SETGE_UINT * T13.W, T9.W, T3.Z,
+; EG-NEXT: SUBB_UINT T6.X, T11.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T11.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T12.W, T2.Y, T4.X,
+; EG-NEXT: CNDE_INT * T7.W, T1.Y, T7.W, T5.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T3.Z,
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T5.Z, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T0.Z, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T11.W, PV.Y, T11.W, PV.Z,
+; EG-NEXT: 19(2.662467e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T3.X, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.W, T7.W, literal.y,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T6.X,
+; EG-NEXT: 18(2.522337e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PV.W, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T1.W,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T1.W,
+; EG-NEXT: 19(2.662467e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T6.X, T8.W, T2.Z,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T5.Z, T7.W, T0.W, BS:VEC_120/SCL_212
+; EG-NEXT: BIT_ALIGN_INT T8.W, T2.Y, T11.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T0.W,
+; EG-NEXT: SUB_INT * T1.Y, T0.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, T9.W, T4.W,
+; EG-NEXT: SETE_INT T11.W, T8.W, T3.Z, BS:VEC_201
+; EG-NEXT: SETGE_UINT * T12.W, T8.W, T3.Z,
+; EG-NEXT: SUBB_UINT T9.X, T9.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T11.W, T1.Y, T4.X,
+; EG-NEXT: CNDE_INT * T7.W, T3.Y, T7.W, T5.Z,
+; EG-NEXT: SUB_INT T4.X, T8.W, T3.Z,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T5.Z, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T11.W, T3.Y, T0.Z, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 18(2.522337e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T1.Z, T4.Z,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.W, T7.W, literal.x,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T11.W, PV.X, T9.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T8.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PV.W, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T8.W, PV.Z, T1.W,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T1.W,
+; EG-NEXT: 18(2.522337e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T9.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T7.W, T0.W,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.Y, T9.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T8.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T6.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T8.X, T1.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W,
+; EG-NEXT: SETE_INT * T1.Z, PV.Z, T3.Z, BS:VEC_021/SCL_122
+; EG-NEXT: ALU clause starting at 1296:
+; EG-NEXT: SETGE_UINT T9.W, T5.Z, T3.Z,
+; EG-NEXT: CNDE_INT * T11.W, T9.X, T7.W, T2.Y,
+; EG-NEXT: LSHL T6.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, T1.Z, PV.W, T1.Y,
+; EG-NEXT: SUB_INT T1.Z, T8.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T9.W, T4.X, T10.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T10.W, T5.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T4.W,
+; EG-NEXT: SUB_INT T2.Y, T5.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T6.Z, PS, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T12.W, T9.W, T4.Z,
+; EG-NEXT: SETGE_UINT * T13.W, T9.W, T4.Z,
+; EG-NEXT: SUBB_UINT T5.X, T10.W, T2.Z,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, T6.Z,
+; EG-NEXT: SUB_INT T6.Z, T10.W, T2.Z,
+; EG-NEXT: SUB_INT T12.W, T2.Y, T4.X,
+; EG-NEXT: CNDE_INT * T8.W, T1.Y, T8.W, T1.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T4.Z,
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T1.Z, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T5.Z, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, PV.Y, T10.W, PV.Z,
+; EG-NEXT: 17(2.382207e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T0.Y, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.W, T8.W, literal.y,
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T5.X,
+; EG-NEXT: 17(2.382207e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T9.W, PV.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T3.Z,
+; EG-NEXT: 17(2.382207e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T5.X, T7.W, T0.W,
+; EG-NEXT: CNDE_INT * T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T8.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T7.W, T2.Y, T10.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T4.W,
+; EG-NEXT: SUB_INT T1.Y, T5.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T6.Z, PS, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T10.W, T7.W, T4.Z,
+; EG-NEXT: SETGE_UINT * T12.W, T7.W, T4.Z,
+; EG-NEXT: SUBB_UINT T8.X, T9.W, T2.Z,
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, T6.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T2.Z,
+; EG-NEXT: SUB_INT T10.W, T1.Y, T4.X,
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T1.Z,
+; EG-NEXT: SUB_INT T4.X, T7.W, T4.Z,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T1.Z, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T10.W, T3.Y, T5.Z, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T0.Z, T1.W,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T10.W, PV.X, T8.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T7.W, PS,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T7.W, PV.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T10.W, PV.Z, T3.Z,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T8.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T8.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.Y, T9.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T7.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T5.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T9.X, T0.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T0.Z, PV.Z, T4.Z, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T4.Z, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T10.W, PV.X, T8.W, PV.Y,
+; EG-NEXT: LSHL T5.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T0.Z, T7.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T11.W, literal.x,
+; EG-NEXT: OR_INT * T11.W, T6.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T2.Z,
+; EG-NEXT: SUB_INT T2.Y, T1.Z, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T12.W, PV.W, T1.W,
+; EG-NEXT: SETGE_UINT * T13.W, PV.W, T1.W,
+; EG-NEXT: SUBB_UINT T6.X, T11.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T11.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T12.W, PV.Y, PV.X,
+; EG-NEXT: CNDE_INT * T7.W, T1.Y, T7.W, T0.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T0.Z, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T1.Z, PV.W,
+; EG-NEXT: CNDE_INT * T11.W, PV.Y, T11.W, PV.Z,
+; EG-NEXT: 15(2.101948e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T2.X, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T7.W, literal.y,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T6.X,
+; EG-NEXT: 15(2.101948e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T2.Z,
+; EG-NEXT: SETE_INT * T9.W, PV.Z, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: ALU clause starting at 1410:
+; EG-NEXT: SETGE_UINT * T12.W, T1.Z, T4.Z,
+; EG-NEXT: SUBB_UINT T6.X, T8.W, T4.W,
+; EG-NEXT: CNDE_INT * T3.Y, T9.W, PV.W, T0.Z, BS:VEC_201
+; EG-NEXT: SUB_INT T0.Z, T7.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T8.W, T2.Y, T11.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T2.Z,
+; EG-NEXT: SUB_INT T1.Y, T1.Z, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T11.W, PV.W, T1.W,
+; EG-NEXT: SETGE_UINT * T12.W, PV.W, T1.W,
+; EG-NEXT: SUBB_UINT T9.X, T9.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T11.W, PV.Y, PV.X,
+; EG-NEXT: CNDE_INT * T7.W, T3.Y, T7.W, T0.Z,
+; EG-NEXT: SUB_INT T4.X, T8.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T0.Z, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T11.W, T3.Y, T1.Z, PV.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 14(1.961818e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T5.Z, T3.Z,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T7.W, literal.x,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T11.W, PV.X, T9.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T8.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T8.W, PV.Z, T4.Z,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T4.Z,
+; EG-NEXT: 15(2.101948e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T9.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T7.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.Y, T9.W, literal.x,
+; EG-NEXT: OR_INT T8.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T6.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T8.X, T5.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.Z, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T11.W, PV.X, T7.W, PV.Y,
+; EG-NEXT: LSHL T6.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T5.Z, T8.W, T0.W,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T10.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T10.W, T5.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T0.W,
+; EG-NEXT: SUB_INT * T2.Y, T0.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, T10.W, T4.W,
+; EG-NEXT: SETE_INT T12.W, T9.W, T3.Z, BS:VEC_201
+; EG-NEXT: SETGE_UINT * T13.W, T9.W, T3.Z,
+; EG-NEXT: SUBB_UINT T5.X, T10.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T10.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T12.W, T2.Y, T4.X,
+; EG-NEXT: CNDE_INT * T8.W, T1.Y, T8.W, T5.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T3.Z,
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T5.Z, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T0.Z, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T10.W, PV.Y, T10.W, PV.Z,
+; EG-NEXT: 14(1.961818e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T3.X, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.W, T8.W, literal.y,
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T5.X,
+; EG-NEXT: 13(1.821688e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PV.W, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T1.W,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T1.W,
+; EG-NEXT: 14(1.961818e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T5.X, T7.W, T2.Z,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T5.Z, T8.W, T0.W, BS:VEC_120/SCL_212
+; EG-NEXT: BIT_ALIGN_INT T7.W, T2.Y, T10.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T0.W,
+; EG-NEXT: SUB_INT * T1.Y, T0.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, T9.W, T4.W,
+; EG-NEXT: SETE_INT T10.W, T7.W, T3.Z, BS:VEC_201
+; EG-NEXT: SETGE_UINT * T12.W, T7.W, T3.Z,
+; EG-NEXT: SUBB_UINT T8.X, T9.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T10.W, T1.Y, T4.X,
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T5.Z,
+; EG-NEXT: SUB_INT T4.X, T7.W, T3.Z,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T5.Z, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T10.W, T3.Y, T0.Z, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 13(1.821688e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T1.Z, T4.Z,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.W, T8.W, literal.x,
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T10.W, PV.X, T8.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T7.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PV.W, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T7.W, PV.Z, T1.W,
+; EG-NEXT: SETGE_UINT * T10.W, PV.Z, T1.W,
+; EG-NEXT: 13(1.821688e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT * T8.X, PV.W, PS, PV.Z,
+; EG-NEXT: ALU clause starting at 1525:
+; EG-NEXT: SUB_INT T2.Y, T8.W, T0.W,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, T1.Y, T9.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T7.W, T3.Y, T4.X, BS:VEC_102/SCL_221
+; EG-NEXT: SUB_INT * T9.W, T10.X, T5.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T9.X, T1.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T1.Z, PV.Z, T3.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T3.Z, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, T8.X, T8.W, PV.Y,
+; EG-NEXT: LSHL T5.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T1.Z, T7.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T11.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T11.W, T6.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T4.W,
+; EG-NEXT: SUB_INT T2.Y, T5.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T6.Z, PS, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T12.W, T9.W, T4.Z,
+; EG-NEXT: SETGE_UINT * T13.W, T9.W, T4.Z,
+; EG-NEXT: SUBB_UINT T6.X, T11.W, T2.Z,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, T6.Z,
+; EG-NEXT: SUB_INT T6.Z, T11.W, T2.Z,
+; EG-NEXT: SUB_INT T12.W, T2.Y, T4.X,
+; EG-NEXT: CNDE_INT * T7.W, T1.Y, T7.W, T1.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T4.Z,
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T1.Z, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T5.Z, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T11.W, PV.Y, T11.W, PV.Z,
+; EG-NEXT: 12(1.681558e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T0.Y, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.W, T7.W, literal.y,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T6.X,
+; EG-NEXT: 12(1.681558e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T9.W, PV.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T3.Z,
+; EG-NEXT: 12(1.681558e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T6.X, T8.W, T0.W,
+; EG-NEXT: CNDE_INT * T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T7.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T8.W, T2.Y, T11.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T4.W,
+; EG-NEXT: SUB_INT T1.Y, T5.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T6.Z, PS, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T11.W, T8.W, T4.Z,
+; EG-NEXT: SETGE_UINT * T12.W, T8.W, T4.Z,
+; EG-NEXT: SUBB_UINT T9.X, T9.W, T2.Z,
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, T6.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T2.Z,
+; EG-NEXT: SUB_INT T11.W, T1.Y, T4.X,
+; EG-NEXT: CNDE_INT * T7.W, T3.Y, T7.W, T1.Z,
+; EG-NEXT: SUB_INT T4.X, T8.W, T4.Z,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T1.Z, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T11.W, T3.Y, T5.Z, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 11(1.541428e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T0.Z, T1.W,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.W, T7.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T11.W, PV.X, T9.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T8.W, PS,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T8.W, PV.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T3.Z,
+; EG-NEXT: 11(1.541428e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T9.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T7.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.Y, T9.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T8.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T6.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T8.X, T0.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T0.Z, PV.Z, T4.Z, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T4.Z, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T11.W, PV.X, T7.W, PV.Y,
+; EG-NEXT: LSHL T6.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T0.Z, T8.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T10.W, literal.x,
+; EG-NEXT: OR_INT * T10.W, T5.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T2.Z,
+; EG-NEXT: SUB_INT T2.Y, T1.Z, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T12.W, PV.W, T1.W,
+; EG-NEXT: SETGE_UINT * T13.W, PV.W, T1.W,
+; EG-NEXT: SUBB_UINT T5.X, T10.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T10.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T12.W, PV.Y, PV.X,
+; EG-NEXT: CNDE_INT * T8.W, T1.Y, T8.W, T0.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T0.Z, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T1.Z, PV.W,
+; EG-NEXT: CNDE_INT * T10.W, PV.Y, T10.W, PV.Z,
+; EG-NEXT: 10(1.401298e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T2.X, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT * T1.Z, PV.W, T8.W, literal.y,
+; EG-NEXT: 10(1.401298e-44), 31(4.344025e-44)
+; EG-NEXT: ALU clause starting at 1640:
+; EG-NEXT: OR_INT T8.W, T2.Y, T0.Z,
+; EG-NEXT: SUB_INT * T12.W, T4.X, T5.X,
+; EG-NEXT: BFE_UINT T4.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T2.Z, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, T1.Z, T4.Z, BS:VEC_210
+; EG-NEXT: SETGE_UINT * T12.W, T1.Z, T4.Z,
+; EG-NEXT: 11(1.541428e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T5.X, T7.W, T4.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T0.Z, T8.W, T2.Z, BS:VEC_201
+; EG-NEXT: BIT_ALIGN_INT T7.W, T2.Y, T10.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T2.Z,
+; EG-NEXT: SUB_INT T1.Y, T1.Z, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T10.W, PV.W, T1.W,
+; EG-NEXT: SETGE_UINT * T12.W, PV.W, T1.W,
+; EG-NEXT: SUBB_UINT T8.X, T9.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T10.W, PV.Y, PV.X,
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T0.Z,
+; EG-NEXT: SUB_INT T4.X, T7.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T0.Z, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T10.W, T3.Y, T1.Z, PV.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T5.Z, T3.Z,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T8.W, literal.x,
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T10.W, PV.X, T8.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T7.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T7.W, PV.Z, T4.Z,
+; EG-NEXT: SETGE_UINT * T10.W, PV.Z, T4.Z,
+; EG-NEXT: 10(1.401298e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T8.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T8.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.Y, T9.W, literal.x,
+; EG-NEXT: OR_INT T7.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T5.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T9.X, T5.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.Z, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T10.W, PV.X, T8.W, PV.Y,
+; EG-NEXT: LSHL T5.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T5.Z, T7.W, T0.W,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T11.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T11.W, T6.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T0.W,
+; EG-NEXT: SUB_INT * T2.Y, T0.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, T11.W, T4.W,
+; EG-NEXT: SETE_INT T12.W, T9.W, T3.Z, BS:VEC_201
+; EG-NEXT: SETGE_UINT * T13.W, T9.W, T3.Z,
+; EG-NEXT: SUBB_UINT T6.X, T11.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T11.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T12.W, T2.Y, T4.X,
+; EG-NEXT: CNDE_INT * T7.W, T1.Y, T7.W, T5.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T3.Z,
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T5.Z, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T0.Z, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T11.W, PV.Y, T11.W, PV.Z,
+; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T3.X, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.W, T7.W, literal.y,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T6.X,
+; EG-NEXT: 8(1.121039e-44), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PV.W, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETE_INT T9.W, PV.Z, T1.W,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T1.W,
+; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T6.X, T8.W, T2.Z,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T5.Z, T7.W, T0.W, BS:VEC_120/SCL_212
+; EG-NEXT: BIT_ALIGN_INT T8.W, T2.Y, T11.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T0.W,
+; EG-NEXT: SUB_INT * T1.Y, T0.Z, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, T9.W, T4.W,
+; EG-NEXT: SETE_INT T11.W, T8.W, T3.Z, BS:VEC_201
+; EG-NEXT: SETGE_UINT * T12.W, T8.W, T3.Z,
+; EG-NEXT: SUBB_UINT T9.X, T9.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T11.W, T1.Y, T4.X,
+; EG-NEXT: CNDE_INT * T7.W, T3.Y, T7.W, T5.Z,
+; EG-NEXT: SUB_INT T4.X, T8.W, T3.Z,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T5.Z, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T11.W, T3.Y, T0.Z, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T1.Z, T4.Z,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.W, T7.W, literal.x,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T11.W, PV.X, T9.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ALU clause starting at 1755:
+; EG-NEXT: BFE_UINT T4.X, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT * T1.Y, T2.Y, T8.W, T11.W,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T5.Z, T7.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T8.W, T0.Z, T1.W,
+; EG-NEXT: SETGE_UINT * T11.W, T0.Z, T1.W,
+; EG-NEXT: CNDE_INT T9.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T7.W, T0.W,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, T1.Y, T9.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T8.W, T3.Y, T4.X, BS:VEC_102/SCL_221
+; EG-NEXT: SUB_INT * T9.W, T10.X, T6.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T8.X, T1.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T1.Z, PV.Z, T3.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T3.Z, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T11.W, PV.X, T7.W, PV.Y,
+; EG-NEXT: LSHL T6.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T1.Z, T8.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T10.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T10.W, T5.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T4.W,
+; EG-NEXT: SUB_INT T2.Y, T5.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T6.Z, PS, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T12.W, T9.W, T4.Z,
+; EG-NEXT: SETGE_UINT * T13.W, T9.W, T4.Z,
+; EG-NEXT: SUBB_UINT T5.X, T10.W, T2.Z,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, T6.Z,
+; EG-NEXT: SUB_INT T6.Z, T10.W, T2.Z,
+; EG-NEXT: SUB_INT T12.W, T2.Y, T4.X,
+; EG-NEXT: CNDE_INT * T8.W, T1.Y, T8.W, T1.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T4.Z,
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT T1.Z, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T5.Z, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T10.W, PV.Y, T10.W, PV.Z,
+; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T7.X, T0.Y, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.W, T8.W, literal.y,
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T12.W, PV.X, T5.X,
+; EG-NEXT: 7(9.809089e-45), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T9.W, PV.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T3.Z,
+; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T5.X, T7.W, T0.W,
+; EG-NEXT: CNDE_INT * T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T1.Z, T8.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T7.W, T2.Y, T10.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T8.W, T4.W,
+; EG-NEXT: SUB_INT T1.Y, T5.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T6.Z, PS, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T10.W, T7.W, T4.Z,
+; EG-NEXT: SETGE_UINT * T12.W, T7.W, T4.Z,
+; EG-NEXT: SUBB_UINT T8.X, T9.W, T2.Z,
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, T6.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T2.Z,
+; EG-NEXT: SUB_INT T10.W, T1.Y, T4.X,
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T1.Z,
+; EG-NEXT: SUB_INT T4.X, T7.W, T4.Z,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T1.Z, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T10.W, T3.Y, T5.Z, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 6(8.407791e-45), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T0.Z, T1.W,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T5.Z, PV.W, T8.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T8.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T10.W, PV.X, T8.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T7.W, PS,
+; EG-NEXT: SETGE_UINT T1.Z, PV.W, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T7.W, PV.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T10.W, PV.Z, T3.Z,
+; EG-NEXT: 6(8.407791e-45), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T8.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T8.W, T4.W,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.Y, T9.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T7.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T5.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T9.X, T0.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T0.Z, PV.Z, T4.Z, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T4.Z, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T10.W, PV.X, T8.W, PV.Y,
+; EG-NEXT: LSHL T5.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T0.Z, T7.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T11.W, literal.x,
+; EG-NEXT: OR_INT * T11.W, T6.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T2.Z,
+; EG-NEXT: SUB_INT T2.Y, T1.Z, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T12.W, PV.W, T1.W,
+; EG-NEXT: SETGE_UINT * T13.W, PV.W, T1.W,
+; EG-NEXT: SUBB_UINT T6.X, T11.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T11.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T12.W, PV.Y, PV.X,
+; EG-NEXT: CNDE_INT * T7.W, T1.Y, T7.W, T0.Z,
+; EG-NEXT: SUB_INT T4.X, T9.W, T1.W,
+; EG-NEXT: LSHL T2.Y, PS, 1,
+; EG-NEXT: BFE_UINT * T0.Z, T3.X, literal.x, 1,
+; EG-NEXT: 5(7.006492e-45), 0(0.000000e+00)
+; EG-NEXT: ALU clause starting at 1871:
+; EG-NEXT: CNDE_INT T12.W, T1.Y, T1.Z, T12.W,
+; EG-NEXT: CNDE_INT * T11.W, T3.Y, T11.W, T6.Z,
+; EG-NEXT: BFE_UINT T7.X, T2.X, literal.x, 1,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T7.W, literal.y,
+; EG-NEXT: OR_INT T7.W, T2.Y, T0.Z,
+; EG-NEXT: SUB_INT * T12.W, T4.X, T6.X,
+; EG-NEXT: 5(7.006492e-45), 31(4.344025e-44)
+; EG-NEXT: BFE_UINT T4.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T2.Y, T3.Y, T9.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T9.W, PV.Z, T4.Z,
+; EG-NEXT: SETGE_UINT * T12.W, PV.Z, T4.Z,
+; EG-NEXT: 6(8.407791e-45), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T6.X, T8.W, T4.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T0.Z, T7.W, T2.Z, BS:VEC_201
+; EG-NEXT: BIT_ALIGN_INT T8.W, T2.Y, T11.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T1.Y, T4.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T4.X, T7.W, T2.Z,
+; EG-NEXT: SUB_INT T1.Y, T1.Z, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T6.Z, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T11.W, PV.W, T1.W,
+; EG-NEXT: SETGE_UINT * T12.W, PV.W, T1.W,
+; EG-NEXT: SUBB_UINT T9.X, T9.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T2.Y, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T6.Z, T9.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T11.W, PV.Y, PV.X,
+; EG-NEXT: CNDE_INT * T7.W, T3.Y, T7.W, T0.Z,
+; EG-NEXT: SUB_INT T4.X, T8.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T0.Z, T3.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T11.W, T3.Y, T1.Z, PV.W,
+; EG-NEXT: CNDE_INT * T9.W, PV.Y, T9.W, PV.Z,
+; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T10.X, T5.Z, T3.Z,
+; EG-NEXT: LSHL T3.Y, PS, 1,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.W, T7.W, literal.x,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T11.W, PV.X, T9.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T2.Y, T8.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T8.W, PV.Z, T4.Z,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T4.Z,
+; EG-NEXT: 5(7.006492e-45), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T9.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T2.Y, T7.W, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.Y, T9.W, literal.x,
+; EG-NEXT: OR_INT T8.W, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T10.X, T6.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT T4.X, T8.X, T5.Z, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.W, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T5.Z, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: SETGE_UINT T9.W, PV.Z, T1.W, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T11.W, PV.X, T7.W, PV.Y,
+; EG-NEXT: LSHL T6.X, PS, 1,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T5.Z, T8.W, T0.W,
+; EG-NEXT: BIT_ALIGN_INT T9.W, PV.X, T10.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T10.W, T5.X, T7.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T4.X, T3.X, literal.x, 1,
+; EG-NEXT: SETGE_UINT T2.Y, PS, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T6.Z, PV.W, T3.Z,
+; EG-NEXT: SETGE_UINT T12.W, PV.W, T3.Z,
+; EG-NEXT: CNDE_INT * T13.W, PV.Y, T8.W, PV.Z,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: LSHL T5.X, PS, 1,
+; EG-NEXT: CNDE_INT T2.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: SUB_INT T5.Z, T10.W, T4.W,
+; EG-NEXT: SUBB_UINT T12.W, T10.W, T4.W,
+; EG-NEXT: SUB_INT * T14.W, T9.W, T3.Z,
+; EG-NEXT: SUBB_UINT T7.X, T7.W, T2.Z,
+; EG-NEXT: SUBB_UINT * T3.Y, T8.W, T0.W, BS:VEC_120/SCL_212
+; EG-NEXT: SUB_INT T6.Z, T0.Z, T1.W,
+; EG-NEXT: SUB_INT * T7.W, T14.W, T12.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T8.W, T2.Y, T10.W, T5.Z,
+; EG-NEXT: SUB_INT T8.X, T1.Z, T4.Z,
+; EG-NEXT: LSHL T4.Y, PV.W, 1,
+; EG-NEXT: BFE_UINT T5.Z, T2.X, literal.x, 1,
+; EG-NEXT: CNDE_INT T7.W, T2.Y, T9.W, T7.W,
+; EG-NEXT: SUB_INT * T9.W, T6.Z, T3.Y,
+; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT T10.X, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT T1.Y, T1.Y, T0.Z, PS, BS:VEC_120/SCL_212
+; EG-NEXT: BIT_ALIGN_INT T0.Z, PV.W, T8.W, literal.y,
+; EG-NEXT: OR_INT T7.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT * T8.W, PV.X, T7.X,
+; EG-NEXT: 4(5.605194e-45), 31(4.344025e-44)
+; EG-NEXT: CNDE_INT T7.X, T9.X, T1.Z, PS,
+; EG-NEXT: SETGE_UINT T2.Y, PV.W, T4.W,
+; EG-NEXT: SETE_INT T1.Z, PV.Z, T3.Z, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T8.W, PV.Y, T13.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT * T9.W, T5.X, PV.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T5.X, T0.Z, T3.Z,
+; EG-NEXT: SUBB_UINT T1.Y, T7.W, T4.W,
+; EG-NEXT: SETGE_UINT * T5.Z, PS, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T10.W, T8.W, T1.W,
+; EG-NEXT: SETGE_UINT * T12.W, T8.W, T1.W,
+; EG-NEXT: SUB_INT T8.X, T0.Z, T3.Z,
+; EG-NEXT: CNDE_INT T3.Y, PV.W, PS, T5.Z,
+; EG-NEXT: SUB_INT T5.Z, T9.W, T0.W,
+; EG-NEXT: SUBB_UINT * T10.W, T9.W, T0.W,
+; EG-NEXT: SUB_INT * T12.W, T8.W, T1.W,
+; EG-NEXT: SUB_INT T9.X, PV.W, T10.W,
+; EG-NEXT: CNDE_INT * T4.Y, T3.Y, T9.W, T5.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T5.Z, T7.W, T4.W,
+; EG-NEXT: SUB_INT T9.W, T8.X, T1.Y,
+; EG-NEXT: CNDE_INT * T10.W, T1.Z, T5.X, T2.Y,
+; EG-NEXT: ALU clause starting at 1985:
+; EG-NEXT: CNDE_INT T5.X, T10.W, T0.Z, T9.W,
+; EG-NEXT: CNDE_INT T1.Y, T10.W, T7.W, T5.Z,
+; EG-NEXT: LSHL T0.Z, T4.Y, 1,
+; EG-NEXT: BFE_UINT * T7.W, T0.Y, literal.x, 1, BS:VEC_120/SCL_212
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT * T8.W, T3.Y, T8.W, T9.X,
+; EG-NEXT: BIT_ALIGN_INT T8.X, PV.W, T4.Y, literal.x,
+; EG-NEXT: OR_INT T2.Y, T0.Z, T7.W,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, T5.X, T1.Y, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT * T7.W, T7.X, T11.W, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: OR_INT * T8.W, T6.X, T4.X,
+; EG-NEXT: SETGE_UINT T4.X, PV.W, T2.Z,
+; EG-NEXT: SETE_INT T3.Y, T7.W, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T1.Z, T7.W, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: BFE_UINT T9.W, T2.X, literal.x, 1,
+; EG-NEXT: LSHL * T10.W, T1.Y, 1,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: OR_INT T5.X, PS, PV.W,
+; EG-NEXT: CNDE_INT T1.Y, PV.Y, PV.Z, PV.X,
+; EG-NEXT: SUB_INT T1.Z, T8.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SUBB_UINT T9.W, T8.W, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT * T10.W, T7.W, T4.Z,
+; EG-NEXT: SUB_INT T4.X, PS, PV.W,
+; EG-NEXT: CNDE_INT T3.Y, PV.Y, T8.W, PV.Z,
+; EG-NEXT: SETGE_UINT T1.Z, PV.X, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T8.W, T0.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T9.W, T0.Z, T3.Z,
+; EG-NEXT: CNDE_INT T6.X, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT T4.Y, T5.X, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: LSHL T1.Z, PV.Y, 1,
+; EG-NEXT: BFE_UINT T8.W, T3.X, literal.x, 1, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT * T7.W, T1.Y, T7.W, PV.X,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T4.X, PS, T3.Y, literal.x,
+; EG-NEXT: OR_INT T1.Y, PV.Z, PV.W,
+; EG-NEXT: CNDE_INT T1.Z, PV.X, T5.X, PV.Y,
+; EG-NEXT: SUBB_UINT T7.W, T2.Y, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT * T8.W, T8.X, T1.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T7.X, PS, PV.W,
+; EG-NEXT: LSHL T3.Y, PV.Z, 1,
+; EG-NEXT: SETGE_UINT T5.Z, PV.Y, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T7.W, PV.X, T4.Z,
+; EG-NEXT: SETGE_UINT * T8.W, PV.X, T4.Z,
+; EG-NEXT: BFE_UINT T9.X, T2.X, literal.x, 1,
+; EG-NEXT: SETGE_UINT T4.Y, T2.Y, T0.W,
+; EG-NEXT: SETE_INT T6.Z, T8.X, T1.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T7.W, PV.W, PS, PV.Z,
+; EG-NEXT: SUB_INT * T8.W, T1.Y, T2.Z,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T10.X, T1.Y, T2.Z,
+; EG-NEXT: SUB_INT T5.Y, T4.X, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SUBB_UINT * T5.Z, T5.X, T4.W, BS:VEC_120/SCL_212
+; EG-NEXT: SUB_INT T9.W, T0.Z, T3.Z,
+; EG-NEXT: CNDE_INT * T8.W, T7.W, T1.Y, T8.W,
+; EG-NEXT: SETGE_UINT T5.X, T8.X, T1.W,
+; EG-NEXT: LSHL T1.Y, PS, 1,
+; EG-NEXT: BFE_UINT T7.Z, T3.X, 1, 1, BS:VEC_201
+; EG-NEXT: SUB_INT T9.W, PV.W, T5.Z,
+; EG-NEXT: SUB_INT * T10.W, T5.Y, T10.X,
+; EG-NEXT: CNDE_INT T4.X, T7.W, T4.X, PS,
+; EG-NEXT: CNDE_INT T5.Y, T6.X, T0.Z, PV.W,
+; EG-NEXT: OR_INT T0.Z, PV.Y, PV.Z,
+; EG-NEXT: CNDE_INT T7.W, T6.Z, PV.X, T4.Y, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT * T9.W, T2.Y, T0.W,
+; EG-NEXT: CNDE_INT T5.X, PV.W, T2.Y, PS,
+; EG-NEXT: SETGE_UINT T1.Y, PV.Z, T2.Z,
+; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.Y, T1.Z, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T8.W, PV.X, T8.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T3.Y, T9.X,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT T4.X, PV.W, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T2.Y, PV.W, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PS, T4.W,
+; EG-NEXT: SETE_INT T10.W, PV.Z, T3.Z,
+; EG-NEXT: SETGE_UINT * T11.W, PV.Z, T3.Z,
+; EG-NEXT: CNDE_INT T6.X, PV.W, PS, PV.Z,
+; EG-NEXT: CNDE_INT T1.Y, PV.X, PV.Y, T1.Y,
+; EG-NEXT: LSHL T5.Z, T5.X, 1, BS:VEC_201
+; EG-NEXT: BFE_UINT T10.W, T0.Y, literal.x, 1,
+; EG-NEXT: CNDE_INT * T7.W, T7.W, T8.X, T7.X,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T4.X, T0.Z, T2.Z,
+; EG-NEXT: SUBB_UINT T2.Y, T0.Z, T2.Z,
+; EG-NEXT: SUB_INT T6.Z, T8.W, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T7.W, PS, T5.X, literal.x,
+; EG-NEXT: OR_INT * T10.W, PV.Z, PV.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T5.X, PS, T0.W,
+; EG-NEXT: SETE_INT T3.Y, PV.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PV.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T11.W, PV.Z, PV.Y,
+; EG-NEXT: CNDE_INT * T12.W, T1.Y, T0.Z, PV.X,
+; EG-NEXT: SUBB_UINT * T4.X, T10.W, T0.W,
+; EG-NEXT: SUB_INT T2.Y, T7.W, T1.W,
+; EG-NEXT: LSHL T0.Z, T12.W, 1, BS:VEC_201
+; EG-NEXT: AND_INT * T13.W, T3.X, 1,
+; EG-NEXT: CNDE_INT * T8.W, T1.Y, T8.W, T11.W,
+; EG-NEXT: SUB_INT T3.X, T10.W, T0.W,
+; EG-NEXT: BIT_ALIGN_INT * T1.Y, PV.W, T12.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: OR_INT T0.Z, T0.Z, T13.W,
+; EG-NEXT: SUB_INT T8.W, T2.Y, T4.X,
+; EG-NEXT: CNDE_INT * T11.W, T3.Y, T5.Z, T5.X,
+; EG-NEXT: SUB_INT T4.X, T9.W, T4.W,
+; EG-NEXT: CNDE_INT T2.Y, PS, T7.W, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T5.Z, PV.Z, T2.Z,
+; EG-NEXT: SETE_INT * T7.W, T1.Y, T4.Z, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT * T8.W, T11.W, T10.W, T3.X,
+; EG-NEXT: SETGE_UINT T3.X, T1.Y, T4.Z,
+; EG-NEXT: SUBB_UINT T3.Y, T0.Z, T2.Z, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT * T4.Z, T1.Y, T4.Z,
+; EG-NEXT: ALU clause starting at 2098:
+; EG-NEXT: BFE_UINT T10.W, T0.Y, 1, 1,
+; EG-NEXT: LSHL * T11.W, T8.W, 1,
+; EG-NEXT: SUBB_UINT T5.X, T9.W, T4.W,
+; EG-NEXT: SUB_INT T4.Y, T1.Z, T3.Z,
+; EG-NEXT: OR_INT T6.Z, PS, PV.W,
+; EG-NEXT: SUB_INT * T10.W, T4.Z, T3.Y, BS:VEC_201
+; EG-NEXT: CNDE_INT * T7.W, T7.W, T3.X, T5.Z,
+; EG-NEXT: CNDE_INT T3.X, PV.W, T1.Y, T10.W,
+; EG-NEXT: SETGE_UINT T1.Y, T6.Z, T0.W,
+; EG-NEXT: SUB_INT T4.Z, T4.Y, T5.X,
+; EG-NEXT: BIT_ALIGN_INT * T8.W, T2.Y, T8.W, literal.x, BS:VEC_201
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT * T9.W, T6.X, T9.W, T4.X,
+; EG-NEXT: SETE_INT T4.X, T8.W, T1.W,
+; EG-NEXT: SETGE_UINT T2.Y, T8.W, T1.W,
+; EG-NEXT: LSHL T5.Z, PV.W, 1,
+; EG-NEXT: BFE_UINT T10.W, T2.X, 1, 1,
+; EG-NEXT: CNDE_INT * T11.W, T6.X, T1.Z, T4.Z,
+; EG-NEXT: BIT_ALIGN_INT T5.X, PS, T9.W, literal.x, BS:VEC_021/SCL_122
+; EG-NEXT: OR_INT T3.Y, PV.Z, PV.W,
+; EG-NEXT: CNDE_INT T1.Z, PV.X, PV.Y, T1.Y,
+; EG-NEXT: SUB_INT T9.W, T6.Z, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: XOR_INT * T10.W, T3.X, T5.W,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T3.X, PS, T5.W,
+; EG-NEXT: CNDE_INT T1.Y, PV.Z, T6.Z, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT T4.Z, PV.Y, T4.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETE_INT T9.W, PV.X, T3.Z,
+; EG-NEXT: SETGE_UINT * T10.W, PV.X, T3.Z,
+; EG-NEXT: CNDE_INT T4.X, PV.W, PS, PV.Z,
+; EG-NEXT: LSHL T2.Y, PV.Y, 1,
+; EG-NEXT: AND_INT T4.Z, T0.Y, 1,
+; EG-NEXT: SUBB_UINT T9.W, T6.Z, T0.W, BS:VEC_102/SCL_221
+; EG-NEXT: SUB_INT * T10.W, T8.W, T1.W,
+; EG-NEXT: SUB_INT T6.X, T3.Y, T4.W,
+; EG-NEXT: SUB_INT T0.Y, PS, PV.W,
+; EG-NEXT: SUBB_UINT T5.Z, T3.Y, T4.W,
+; EG-NEXT: SUB_INT T9.W, T5.X, T3.Z,
+; EG-NEXT: OR_INT * T10.W, PV.Y, PV.Z,
+; EG-NEXT: SUB_INT T7.X, PS, T0.W,
+; EG-NEXT: SUB_INT T2.Y, PV.W, PV.Z,
+; EG-NEXT: CNDE_INT T1.Z, T1.Z, T8.W, PV.Y, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T8.W, T4.X, T3.Y, PV.X,
+; EG-NEXT: SUB_INT * T9.W, T0.Z, T2.Z,
+; EG-NEXT: CNDE_INT T6.X, T7.W, T0.Z, PS,
+; EG-NEXT: LSHL T0.Y, PV.W, 1,
+; EG-NEXT: AND_INT T0.Z, T2.X, 1,
+; EG-NEXT: BIT_ALIGN_INT T7.W, PV.Z, T1.Y, literal.x,
+; EG-NEXT: CNDE_INT * T9.W, T4.X, T5.X, PV.Y,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T2.X, T10.W, T0.W,
+; EG-NEXT: SETE_INT T1.Y, PV.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGE_UINT * T1.Z, PV.W, T1.W, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT T8.W, T9.W, T8.W, literal.x,
+; EG-NEXT: OR_INT * T9.W, T0.Y, T0.Z,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: SETGE_UINT T4.X, PS, T4.W,
+; EG-NEXT: SETE_INT T0.Y, PV.W, T3.Z,
+; EG-NEXT: SETGE_UINT T0.Z, PV.W, T3.Z,
+; EG-NEXT: SUBB_UINT T11.W, PS, T4.W,
+; EG-NEXT: SUB_INT * T12.W, PV.W, T3.Z,
+; EG-NEXT: SUBB_UINT * T5.X, T10.W, T0.W,
+; EG-NEXT: SUB_INT * T2.Y, T7.W, T1.W,
+; EG-NEXT: SUB_INT * T2.Z, T9.W, T4.W,
+; EG-NEXT: SUB_INT T0.W, T12.W, T11.W,
+; EG-NEXT: CNDE_INT * T1.W, T0.Y, T0.Z, T4.X,
+; EG-NEXT: CNDE_INT T4.X, PS, T8.W, PV.W, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T0.Y, PS, T9.W, T2.Z, BS:VEC_102/SCL_221
+; EG-NEXT: SUB_INT T0.Z, T2.Y, T5.X,
+; EG-NEXT: CNDE_INT T0.W, T1.Y, T1.Z, T2.X, BS:VEC_210
+; EG-NEXT: XOR_INT * T1.W, T6.X, T5.W,
+; EG-NEXT: SUBB_UINT T2.X, PS, T5.W,
+; EG-NEXT: CNDE_INT T1.Y, PV.W, T7.W, PV.Z, BS:VEC_021/SCL_122
+; EG-NEXT: XOR_INT T0.Z, PV.Y, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: XOR_INT * T4.W, PV.X, T3.W, BS:VEC_102/SCL_221
+; EG-NEXT: CNDE_INT * T0.W, T0.W, T10.W, T7.X,
+; EG-NEXT: XOR_INT T4.X, PV.W, T6.W,
+; EG-NEXT: SUB_INT T0.Y, T4.W, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUBB_UINT T1.Z, T0.Z, T3.W, BS:VEC_021/SCL_122
+; EG-NEXT: XOR_INT T0.W, T1.Y, T6.W,
+; EG-NEXT: SUB_INT * T7.W, T3.X, T2.X,
+; EG-NEXT: SUB_INT T2.X, PV.W, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T7.Y, PV.Y, PV.Z,
+; EG-NEXT: SUBB_UINT T1.Z, PV.X, T6.W, BS:VEC_021/SCL_122
+; EG-NEXT: XOR_INT T0.W, T1.X, T2.W,
+; EG-NEXT: XOR_INT * T4.W, T0.X, T2.W,
+; EG-NEXT: SUB_INT T0.Y, PS, T2.W,
+; EG-NEXT: SUB_INT T7.Z, T1.W, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUBB_UINT T1.W, PV.W, T2.W,
+; EG-NEXT: SUB_INT * T4.W, PV.X, PV.Z,
+; EG-NEXT: SUB_INT T7.X, T0.Z, T3.W,
+; EG-NEXT: SUB_INT T4.Y, PV.Y, PV.W,
+; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.x,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: LSHR T0.X, PV.W, literal.x,
+; EG-NEXT: SUB_INT T4.Z, T4.X, T6.W, BS:VEC_102/SCL_221
+; EG-NEXT: SUB_INT * T4.X, T0.W, T2.W,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr <4 x i64>, ptr addrspace(1) %in, i64 1
%num = load <4 x i64>, ptr addrspace(1) %in
%den = load <4 x i64>, ptr addrspace(1) %den_ptr
@@ -117,6 +8881,210 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
}
define amdgpu_kernel void @srem_v4i64_4(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GCN-LABEL: srem_v4i64_4:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v8, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx4 v[0:3], v8, s[2:3]
+; GCN-NEXT: global_load_dwordx4 v[4:7], v8, s[2:3] offset:16
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_ashrrev_i32_e32 v9, 31, v1
+; GCN-NEXT: v_lshrrev_b32_e32 v9, 30, v9
+; GCN-NEXT: v_ashrrev_i32_e32 v10, 31, v3
+; GCN-NEXT: v_add_co_u32_e32 v9, vcc, v0, v9
+; GCN-NEXT: v_lshrrev_b32_e32 v10, 30, v10
+; GCN-NEXT: v_addc_co_u32_e32 v13, vcc, 0, v1, vcc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_ashrrev_i32_e32 v11, 31, v5
+; GCN-NEXT: v_add_co_u32_e32 v10, vcc, v2, v10
+; GCN-NEXT: v_lshrrev_b32_e32 v11, 30, v11
+; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, 0, v3, vcc
+; GCN-NEXT: v_ashrrev_i32_e32 v12, 31, v7
+; GCN-NEXT: v_add_co_u32_e32 v11, vcc, v4, v11
+; GCN-NEXT: v_lshrrev_b32_e32 v12, 30, v12
+; GCN-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v5, vcc
+; GCN-NEXT: v_add_co_u32_e32 v12, vcc, v6, v12
+; GCN-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v7, vcc
+; GCN-NEXT: v_and_b32_e32 v9, -4, v9
+; GCN-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v9
+; GCN-NEXT: v_and_b32_e32 v10, -4, v10
+; GCN-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-NEXT: v_sub_co_u32_e32 v2, vcc, v2, v10
+; GCN-NEXT: v_and_b32_e32 v11, -4, v11
+; GCN-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v14, vcc
+; GCN-NEXT: v_sub_co_u32_e32 v4, vcc, v4, v11
+; GCN-NEXT: v_and_b32_e32 v12, -4, v12
+; GCN-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v15, vcc
+; GCN-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v12
+; GCN-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v16, vcc
+; GCN-NEXT: global_store_dwordx4 v8, v[4:7], s[0:1] offset:16
+; GCN-NEXT: global_store_dwordx4 v8, v[0:3], s[0:1]
+; GCN-NEXT: s_endpgm
+;
+; TAHITI-LABEL: srem_v4i64_4:
+; TAHITI: ; %bb.0:
+; TAHITI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; TAHITI-NEXT: s_mov_b32 s3, 0xf000
+; TAHITI-NEXT: s_mov_b32 s2, -1
+; TAHITI-NEXT: s_mov_b32 s10, s2
+; TAHITI-NEXT: s_mov_b32 s11, s3
+; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
+; TAHITI-NEXT: s_mov_b32 s8, s6
+; TAHITI-NEXT: s_mov_b32 s9, s7
+; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; TAHITI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
+; TAHITI-NEXT: s_mov_b32 s0, s4
+; TAHITI-NEXT: s_mov_b32 s1, s5
+; TAHITI-NEXT: s_waitcnt vmcnt(1)
+; TAHITI-NEXT: v_ashrrev_i32_e32 v8, 31, v1
+; TAHITI-NEXT: v_lshrrev_b32_e32 v8, 30, v8
+; TAHITI-NEXT: v_ashrrev_i32_e32 v9, 31, v3
+; TAHITI-NEXT: v_add_i32_e32 v8, vcc, v0, v8
+; TAHITI-NEXT: v_lshrrev_b32_e32 v9, 30, v9
+; TAHITI-NEXT: v_addc_u32_e32 v12, vcc, 0, v1, vcc
+; TAHITI-NEXT: s_waitcnt vmcnt(0)
+; TAHITI-NEXT: v_ashrrev_i32_e32 v10, 31, v5
+; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v2, v9
+; TAHITI-NEXT: v_lshrrev_b32_e32 v10, 30, v10
+; TAHITI-NEXT: v_addc_u32_e32 v13, vcc, 0, v3, vcc
+; TAHITI-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; TAHITI-NEXT: v_add_i32_e32 v10, vcc, v4, v10
+; TAHITI-NEXT: v_lshrrev_b32_e32 v11, 30, v11
+; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, 0, v5, vcc
+; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v6, v11
+; TAHITI-NEXT: v_addc_u32_e32 v15, vcc, 0, v7, vcc
+; TAHITI-NEXT: v_and_b32_e32 v8, -4, v8
+; TAHITI-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
+; TAHITI-NEXT: v_and_b32_e32 v9, -4, v9
+; TAHITI-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; TAHITI-NEXT: v_sub_i32_e32 v2, vcc, v2, v9
+; TAHITI-NEXT: v_and_b32_e32 v10, -4, v10
+; TAHITI-NEXT: v_subb_u32_e32 v3, vcc, v3, v13, vcc
+; TAHITI-NEXT: v_sub_i32_e32 v4, vcc, v4, v10
+; TAHITI-NEXT: v_and_b32_e32 v11, -4, v11
+; TAHITI-NEXT: v_subb_u32_e32 v5, vcc, v5, v14, vcc
+; TAHITI-NEXT: v_sub_i32_e32 v6, vcc, v6, v11
+; TAHITI-NEXT: v_subb_u32_e32 v7, vcc, v7, v15, vcc
+; TAHITI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; TAHITI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; TAHITI-NEXT: s_endpgm
+;
+; TONGA-LABEL: srem_v4i64_4:
+; TONGA: ; %bb.0:
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; TONGA-NEXT: s_waitcnt lgkmcnt(0)
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; TONGA-NEXT: s_add_u32 s4, s2, 16
+; TONGA-NEXT: s_addc_u32 s5, s3, 0
+; TONGA-NEXT: v_mov_b32_e32 v4, s4
+; TONGA-NEXT: v_mov_b32_e32 v5, s5
+; TONGA-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; TONGA-NEXT: v_mov_b32_e32 v9, s1
+; TONGA-NEXT: v_mov_b32_e32 v8, s0
+; TONGA-NEXT: s_add_u32 s0, s0, 16
+; TONGA-NEXT: s_addc_u32 s1, s1, 0
+; TONGA-NEXT: v_mov_b32_e32 v11, s1
+; TONGA-NEXT: v_mov_b32_e32 v10, s0
+; TONGA-NEXT: s_waitcnt vmcnt(1)
+; TONGA-NEXT: v_ashrrev_i32_e32 v12, 31, v1
+; TONGA-NEXT: v_lshrrev_b32_e32 v12, 30, v12
+; TONGA-NEXT: v_ashrrev_i32_e32 v13, 31, v3
+; TONGA-NEXT: v_add_u32_e32 v12, vcc, v0, v12
+; TONGA-NEXT: v_lshrrev_b32_e32 v13, 30, v13
+; TONGA-NEXT: v_addc_u32_e32 v16, vcc, 0, v1, vcc
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_ashrrev_i32_e32 v14, 31, v5
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, v2, v13
+; TONGA-NEXT: v_lshrrev_b32_e32 v14, 30, v14
+; TONGA-NEXT: v_addc_u32_e32 v17, vcc, 0, v3, vcc
+; TONGA-NEXT: v_ashrrev_i32_e32 v15, 31, v7
+; TONGA-NEXT: v_add_u32_e32 v14, vcc, v4, v14
+; TONGA-NEXT: v_lshrrev_b32_e32 v15, 30, v15
+; TONGA-NEXT: v_addc_u32_e32 v18, vcc, 0, v5, vcc
+; TONGA-NEXT: v_add_u32_e32 v15, vcc, v6, v15
+; TONGA-NEXT: v_addc_u32_e32 v19, vcc, 0, v7, vcc
+; TONGA-NEXT: v_and_b32_e32 v12, -4, v12
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v12
+; TONGA-NEXT: v_and_b32_e32 v13, -4, v13
+; TONGA-NEXT: v_subb_u32_e32 v1, vcc, v1, v16, vcc
+; TONGA-NEXT: v_sub_u32_e32 v2, vcc, v2, v13
+; TONGA-NEXT: v_and_b32_e32 v14, -4, v14
+; TONGA-NEXT: v_subb_u32_e32 v3, vcc, v3, v17, vcc
+; TONGA-NEXT: v_sub_u32_e32 v4, vcc, v4, v14
+; TONGA-NEXT: v_and_b32_e32 v15, -4, v15
+; TONGA-NEXT: v_subb_u32_e32 v5, vcc, v5, v18, vcc
+; TONGA-NEXT: v_sub_u32_e32 v6, vcc, v6, v15
+; TONGA-NEXT: v_subb_u32_e32 v7, vcc, v7, v19, vcc
+; TONGA-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
+; TONGA-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
+; TONGA-NEXT: s_endpgm
+;
+; EG-LABEL: srem_v4i64_4:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 48, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T3.XYZW, T0.X, 0
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T4.XYZW, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_128 T1.XYZW, T0.X, 16, #1
+; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: ASHR * T1.W, T1.W, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: ASHR T2.Z, T0.Y, literal.x,
+; EG-NEXT: ASHR T2.W, T1.Y, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: LSHR * T1.W, PV.W, literal.y,
+; EG-NEXT: 31(4.344025e-44), 30(4.203895e-44)
+; EG-NEXT: ADD_INT T0.Y, T1.Z, PS,
+; EG-NEXT: ASHR T3.Z, T0.W, literal.x,
+; EG-NEXT: LSHR T0.W, PV.W, literal.y,
+; EG-NEXT: LSHR * T2.W, PV.Z, literal.y,
+; EG-NEXT: 31(4.344025e-44), 30(4.203895e-44)
+; EG-NEXT: ADD_INT T2.X, T0.X, PS,
+; EG-NEXT: ADD_INT T1.Y, T1.X, PV.W, BS:VEC_120/SCL_212
+; EG-NEXT: LSHR T2.Z, PV.Z, literal.x,
+; EG-NEXT: ADDC_UINT T1.W, T1.Z, T1.W,
+; EG-NEXT: AND_INT * T3.W, PV.Y, literal.y,
+; EG-NEXT: 30(4.203895e-44), -4(nan)
+; EG-NEXT: SUBB_UINT T3.X, T1.Z, PS,
+; EG-NEXT: BFE_INT T0.Y, PV.W, 0.0, 1,
+; EG-NEXT: ADD_INT T3.Z, T0.Z, PV.Z, BS:VEC_120/SCL_212
+; EG-NEXT: ADDC_UINT T0.W, T1.X, T0.W,
+; EG-NEXT: AND_INT * T1.W, PV.Y, literal.x,
+; EG-NEXT: -4(nan), 0(0.000000e+00)
+; EG-NEXT: ADDC_UINT T4.X, T0.Z, T2.Z,
+; EG-NEXT: SUBB_UINT T1.Y, T1.X, PS,
+; EG-NEXT: BFE_INT T2.Z, PV.W, 0.0, 1,
+; EG-NEXT: AND_INT T0.W, PV.Z, literal.x,
+; EG-NEXT: SUB_INT * T4.W, PV.Y, PV.X,
+; EG-NEXT: -4(nan), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T3.X, T0.Z, PV.W,
+; EG-NEXT: SUB_INT T4.Y, PV.Z, PV.Y,
+; EG-NEXT: BFE_INT T2.Z, PV.X, 0.0, 1,
+; EG-NEXT: ADDC_UINT T2.W, T0.X, T2.W,
+; EG-NEXT: AND_INT * T5.W, T2.X, literal.x,
+; EG-NEXT: -4(nan), 0(0.000000e+00)
+; EG-NEXT: SUBB_UINT T0.Y, T0.X, PS,
+; EG-NEXT: SUB_INT T4.Z, T1.Z, T3.W,
+; EG-NEXT: BFE_INT T2.W, PV.W, 0.0, 1,
+; EG-NEXT: SUB_INT * T3.W, PV.Z, PV.X,
+; EG-NEXT: SUB_INT T4.X, T1.X, T1.W,
+; EG-NEXT: SUB_INT T3.Y, PV.W, PV.Y,
+; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.x,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: LSHR T1.X, PV.W, literal.x,
+; EG-NEXT: SUB_INT T3.Z, T0.Z, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT * T3.X, T0.X, T5.W,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: LSHR * T0.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%num = load <4 x i64>, ptr addrspace(1) %in
%result = srem <4 x i64> %num, <i64 4, i64 4, i64 4, i64 4>
store <4 x i64> %result, ptr addrspace(1) %out
>From 344228ebf45f9bd1f7626fdcd3c0fada0f0c8385 Mon Sep 17 00:00:00 2001
From: Amir Ayupov <aaupov at fb.com>
Date: Tue, 2 Jul 2024 09:18:59 -0700
Subject: [PATCH 009/246] [BOLT] Drop macro-fusion alignment (#97358)
9d0754ada5dbbc0c009bcc2f7824488419cc5530 dropped MC support required for
optimal macro-fusion alignment in BOLT. Remove the support in BOLT as
performance measurements with large binaries didn't show a significant
improvement.
Test Plan:
macro-fusion alignment was never upstreamed, so no upstream tests are
affected.
---
bolt/include/bolt/Core/BinaryBasicBlock.h | 9 -----
bolt/include/bolt/Core/BinaryContext.h | 4 --
bolt/include/bolt/Core/BinaryFunction.h | 4 --
bolt/include/bolt/Core/MCPlusBuilder.h | 7 ----
bolt/lib/Core/BinaryBasicBlock.cpp | 39 -------------------
bolt/lib/Core/BinaryEmitter.cpp | 37 ------------------
bolt/lib/Core/BinaryFunction.cpp | 25 ------------
bolt/lib/Passes/BinaryPasses.cpp | 20 ----------
bolt/lib/Rewrite/RewriteInstance.cpp | 22 -----------
.../Target/AArch64/AArch64MCPlusBuilder.cpp | 4 --
bolt/lib/Target/X86/X86MCPlusBuilder.cpp | 34 ----------------
11 files changed, 205 deletions(-)
diff --git a/bolt/include/bolt/Core/BinaryBasicBlock.h b/bolt/include/bolt/Core/BinaryBasicBlock.h
index a57b70714fe38..9a9d7b8735d71 100644
--- a/bolt/include/bolt/Core/BinaryBasicBlock.h
+++ b/bolt/include/bolt/Core/BinaryBasicBlock.h
@@ -842,15 +842,6 @@ class BinaryBasicBlock {
bool analyzeBranch(const MCSymbol *&TBB, const MCSymbol *&FBB,
MCInst *&CondBranch, MCInst *&UncondBranch);
- /// Return true if iterator \p I is pointing to the first instruction in
- /// a pair that could be macro-fused.
- bool isMacroOpFusionPair(const_iterator I) const;
-
- /// If the basic block has a pair of instructions suitable for macro-fusion,
- /// return iterator to the first instruction of the pair.
- /// Otherwise return end().
- const_iterator getMacroOpFusionPair() const;
-
/// Printer required for printing dominator trees.
void printAsOperand(raw_ostream &OS, bool PrintType = true) {
if (PrintType)
diff --git a/bolt/include/bolt/Core/BinaryContext.h b/bolt/include/bolt/Core/BinaryContext.h
index 4ec3de3da1bf8..73932c4ca2fb3 100644
--- a/bolt/include/bolt/Core/BinaryContext.h
+++ b/bolt/include/bolt/Core/BinaryContext.h
@@ -698,10 +698,6 @@ class BinaryContext {
/// Binary-wide aggregated stats.
struct BinaryStats {
- /// Stats for macro-fusion.
- uint64_t MissedMacroFusionPairs{0};
- uint64_t MissedMacroFusionExecCount{0};
-
/// Stats for stale profile matching:
/// the total number of basic blocks in the profile
uint32_t NumStaleBlocks{0};
diff --git a/bolt/include/bolt/Core/BinaryFunction.h b/bolt/include/bolt/Core/BinaryFunction.h
index 1048e50cd8058..7d0afee4d1b78 100644
--- a/bolt/include/bolt/Core/BinaryFunction.h
+++ b/bolt/include/bolt/Core/BinaryFunction.h
@@ -835,10 +835,6 @@ class BinaryFunction {
/// them.
void calculateLoopInfo();
- /// Calculate missed macro-fusion opportunities and update BinaryContext
- /// stats.
- void calculateMacroOpFusionStats();
-
/// Returns if BinaryDominatorTree has been constructed for this function.
bool hasDomTree() const { return BDT != nullptr; }
diff --git a/bolt/include/bolt/Core/MCPlusBuilder.h b/bolt/include/bolt/Core/MCPlusBuilder.h
index 83f4d4c649fd8..ab07f07e49845 100644
--- a/bolt/include/bolt/Core/MCPlusBuilder.h
+++ b/bolt/include/bolt/Core/MCPlusBuilder.h
@@ -930,13 +930,6 @@ class MCPlusBuilder {
/// Return true if the instruction is encoded using EVEX (AVX-512).
virtual bool hasEVEXEncoding(const MCInst &Inst) const { return false; }
- /// Return true if a pair of instructions represented by \p Insts
- /// could be fused into a single uop.
- virtual bool isMacroOpFusionPair(ArrayRef<MCInst> Insts) const {
- llvm_unreachable("not implemented");
- return false;
- }
-
struct X86MemOperand {
unsigned BaseRegNum;
int64_t ScaleImm;
diff --git a/bolt/lib/Core/BinaryBasicBlock.cpp b/bolt/lib/Core/BinaryBasicBlock.cpp
index a4b9a7f558cd8..2a2192b79bb4b 100644
--- a/bolt/lib/Core/BinaryBasicBlock.cpp
+++ b/bolt/lib/Core/BinaryBasicBlock.cpp
@@ -404,45 +404,6 @@ bool BinaryBasicBlock::analyzeBranch(const MCSymbol *&TBB, const MCSymbol *&FBB,
CondBranch, UncondBranch);
}
-bool BinaryBasicBlock::isMacroOpFusionPair(const_iterator I) const {
- auto &MIB = Function->getBinaryContext().MIB;
- ArrayRef<MCInst> Insts = Instructions;
- return MIB->isMacroOpFusionPair(Insts.slice(I - begin()));
-}
-
-BinaryBasicBlock::const_iterator
-BinaryBasicBlock::getMacroOpFusionPair() const {
- if (!Function->getBinaryContext().isX86())
- return end();
-
- if (getNumNonPseudos() < 2 || succ_size() != 2)
- return end();
-
- auto RI = getLastNonPseudo();
- assert(RI != rend() && "cannot have an empty block with 2 successors");
-
- BinaryContext &BC = Function->getBinaryContext();
-
- // Skip instruction if it's an unconditional branch following
- // a conditional one.
- if (BC.MIB->isUnconditionalBranch(*RI))
- ++RI;
-
- if (!BC.MIB->isConditionalBranch(*RI))
- return end();
-
- // Start checking with instruction preceding the conditional branch.
- ++RI;
- if (RI == rend())
- return end();
-
- auto II = std::prev(RI.base()); // convert to a forward iterator
- if (isMacroOpFusionPair(II))
- return II;
-
- return end();
-}
-
MCInst *BinaryBasicBlock::getTerminatorBefore(MCInst *Pos) {
BinaryContext &BC = Function->getBinaryContext();
auto Itr = rbegin();
diff --git a/bolt/lib/Core/BinaryEmitter.cpp b/bolt/lib/Core/BinaryEmitter.cpp
index 5793963f9b80d..f6dfa249f9a9f 100644
--- a/bolt/lib/Core/BinaryEmitter.cpp
+++ b/bolt/lib/Core/BinaryEmitter.cpp
@@ -38,19 +38,6 @@ extern cl::opt<bool> PreserveBlocksAlignment;
cl::opt<bool> AlignBlocks("align-blocks", cl::desc("align basic blocks"),
cl::cat(BoltOptCategory));
-cl::opt<MacroFusionType>
-AlignMacroOpFusion("align-macro-fusion",
- cl::desc("fix instruction alignment for macro-fusion (x86 relocation mode)"),
- cl::init(MFT_HOT),
- cl::values(clEnumValN(MFT_NONE, "none",
- "do not insert alignment no-ops for macro-fusion"),
- clEnumValN(MFT_HOT, "hot",
- "only insert alignment no-ops on hot execution paths (default)"),
- clEnumValN(MFT_ALL, "all",
- "always align instructions to allow macro-fusion")),
- cl::ZeroOrMore,
- cl::cat(BoltRelocCategory));
-
static cl::list<std::string>
BreakFunctionNames("break-funcs",
cl::CommaSeparated,
@@ -453,20 +440,7 @@ void BinaryEmitter::emitFunctionBody(BinaryFunction &BF, FunctionFragment &FF,
Streamer.emitLabel(EntrySymbol);
}
- // Check if special alignment for macro-fusion is needed.
- bool MayNeedMacroFusionAlignment =
- (opts::AlignMacroOpFusion == MFT_ALL) ||
- (opts::AlignMacroOpFusion == MFT_HOT && BB->getKnownExecutionCount());
- BinaryBasicBlock::const_iterator MacroFusionPair;
- if (MayNeedMacroFusionAlignment) {
- MacroFusionPair = BB->getMacroOpFusionPair();
- if (MacroFusionPair == BB->end())
- MayNeedMacroFusionAlignment = false;
- }
-
SMLoc LastLocSeen;
- // Remember if the last instruction emitted was a prefix.
- bool LastIsPrefix = false;
for (auto I = BB->begin(), E = BB->end(); I != E; ++I) {
MCInst &Instr = *I;
@@ -479,16 +453,6 @@ void BinaryEmitter::emitFunctionBody(BinaryFunction &BF, FunctionFragment &FF,
continue;
}
- // Handle macro-fusion alignment. If we emitted a prefix as
- // the last instruction, we should've already emitted the associated
- // alignment hint, so don't emit it twice.
- if (MayNeedMacroFusionAlignment && !LastIsPrefix &&
- I == MacroFusionPair) {
- // This assumes the second instruction in the macro-op pair will get
- // assigned to its own MCRelaxableFragment. Since all JCC instructions
- // are relaxable, we should be safe.
- }
-
if (!EmitCodeOnly) {
// A symbol to be emitted before the instruction to mark its location.
MCSymbol *InstrLabel = BC.MIB->getInstLabel(Instr);
@@ -525,7 +489,6 @@ void BinaryEmitter::emitFunctionBody(BinaryFunction &BF, FunctionFragment &FF,
}
Streamer.emitInstruction(Instr, *BC.STI);
- LastIsPrefix = BC.MIB->isPrefix(Instr);
}
}
diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp
index f8530c424b8e9..89c05e3ab005b 100644
--- a/bolt/lib/Core/BinaryFunction.cpp
+++ b/bolt/lib/Core/BinaryFunction.cpp
@@ -2279,8 +2279,6 @@ void BinaryFunction::postProcessCFG() {
postProcessBranches();
}
- calculateMacroOpFusionStats();
-
// The final cleanup of intermediate structures.
clearList(IgnoredBranches);
@@ -2297,29 +2295,6 @@ void BinaryFunction::postProcessCFG() {
"invalid CFG detected after post-processing");
}
-void BinaryFunction::calculateMacroOpFusionStats() {
- if (!getBinaryContext().isX86())
- return;
- for (const BinaryBasicBlock &BB : blocks()) {
- auto II = BB.getMacroOpFusionPair();
- if (II == BB.end())
- continue;
-
- // Check offset of the second instruction.
- // FIXME: arch-specific.
- const uint32_t Offset = BC.MIB->getOffsetWithDefault(*std::next(II), 0);
- if (!Offset || (getAddress() + Offset) % 64)
- continue;
-
- LLVM_DEBUG(dbgs() << "\nmissed macro-op fusion at address 0x"
- << Twine::utohexstr(getAddress() + Offset)
- << " in function " << *this << "; executed "
- << BB.getKnownExecutionCount() << " times.\n");
- ++BC.Stats.MissedMacroFusionPairs;
- BC.Stats.MissedMacroFusionExecCount += BB.getKnownExecutionCount();
- }
-}
-
void BinaryFunction::removeTagsFromProfile() {
for (BinaryBasicBlock *BB : BasicBlocks) {
if (BB->ExecutionCount == BinaryBasicBlock::COUNT_NO_PROFILE)
diff --git a/bolt/lib/Passes/BinaryPasses.cpp b/bolt/lib/Passes/BinaryPasses.cpp
index ecc2c08a30324..fa95ad7324ac1 100644
--- a/bolt/lib/Passes/BinaryPasses.cpp
+++ b/bolt/lib/Passes/BinaryPasses.cpp
@@ -44,7 +44,6 @@ namespace opts {
extern cl::OptionCategory BoltCategory;
extern cl::OptionCategory BoltOptCategory;
-extern cl::opt<bolt::MacroFusionType> AlignMacroOpFusion;
extern cl::opt<unsigned> Verbosity;
extern cl::opt<bool> EnableBAT;
extern cl::opt<unsigned> ExecutionCountThreshold;
@@ -1637,25 +1636,6 @@ Error PrintProgramStats::runOnFunctions(BinaryContext &BC) {
}
}
- // Print information on missed macro-fusion opportunities seen on input.
- if (BC.Stats.MissedMacroFusionPairs) {
- BC.outs() << format(
- "BOLT-INFO: the input contains %zu (dynamic count : %zu)"
- " opportunities for macro-fusion optimization",
- BC.Stats.MissedMacroFusionPairs, BC.Stats.MissedMacroFusionExecCount);
- switch (opts::AlignMacroOpFusion) {
- case MFT_NONE:
- BC.outs() << ". Use -align-macro-fusion to fix.\n";
- break;
- case MFT_HOT:
- BC.outs() << ". Will fix instances on a hot path.\n";
- break;
- case MFT_ALL:
- BC.outs() << " that are going to be fixed\n";
- break;
- }
- }
-
// Collect and print information about suboptimal code layout on input.
if (opts::ReportBadLayout) {
std::vector<BinaryFunction *> SuboptimalFuncs;
diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp
index 8248c1cd7fadb..ded2f577237fe 100644
--- a/bolt/lib/Rewrite/RewriteInstance.cpp
+++ b/bolt/lib/Rewrite/RewriteInstance.cpp
@@ -75,7 +75,6 @@ extern cl::opt<bool> X86AlignBranchWithin32BBoundaries;
namespace opts {
-extern cl::opt<MacroFusionType> AlignMacroOpFusion;
extern cl::list<std::string> HotTextMoveSections;
extern cl::opt<bool> Hugify;
extern cl::opt<bool> Instrument;
@@ -1969,12 +1968,6 @@ void RewriteInstance::adjustCommandLineOptions() {
if (RuntimeLibrary *RtLibrary = BC->getRuntimeLibrary())
RtLibrary->adjustCommandLineOptions(*BC);
- if (opts::AlignMacroOpFusion != MFT_NONE && !BC->isX86()) {
- BC->outs()
- << "BOLT-INFO: disabling -align-macro-fusion on non-x86 platform\n";
- opts::AlignMacroOpFusion = MFT_NONE;
- }
-
if (BC->isX86() && BC->MAB->allowAutoPadding()) {
if (!BC->HasRelocations) {
BC->errs()
@@ -1985,13 +1978,6 @@ void RewriteInstance::adjustCommandLineOptions() {
BC->outs()
<< "BOLT-WARNING: using mitigation for Intel JCC erratum, layout "
"may take several minutes\n";
- opts::AlignMacroOpFusion = MFT_NONE;
- }
-
- if (opts::AlignMacroOpFusion != MFT_NONE && !BC->HasRelocations) {
- BC->outs() << "BOLT-INFO: disabling -align-macro-fusion in non-relocation "
- "mode\n";
- opts::AlignMacroOpFusion = MFT_NONE;
}
if (opts::SplitEH && !BC->HasRelocations) {
@@ -2013,14 +1999,6 @@ void RewriteInstance::adjustCommandLineOptions() {
opts::StrictMode = true;
}
- if (BC->isX86() && BC->HasRelocations &&
- opts::AlignMacroOpFusion == MFT_HOT && !ProfileReader) {
- BC->outs()
- << "BOLT-INFO: enabling -align-macro-fusion=all since no profile "
- "was specified\n";
- opts::AlignMacroOpFusion = MFT_ALL;
- }
-
if (!BC->HasRelocations &&
opts::ReorderFunctions != ReorderFunctions::RT_NONE) {
BC->errs() << "BOLT-ERROR: function reordering only works when "
diff --git a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp
index 5220d305b838d..06f79dee3378f 100644
--- a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp
+++ b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp
@@ -141,10 +141,6 @@ class AArch64MCPlusBuilder : public MCPlusBuilder {
*AArch64ExprB.getSubExpr(), Comp);
}
- bool isMacroOpFusionPair(ArrayRef<MCInst> Insts) const override {
- return false;
- }
-
bool shortenInstruction(MCInst &, const MCSubtargetInfo &) const override {
return false;
}
diff --git a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp
index 5bd77958934f9..37136f4a5c551 100644
--- a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp
+++ b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp
@@ -661,40 +661,6 @@ class X86MCPlusBuilder : public MCPlusBuilder {
return (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
}
- bool isMacroOpFusionPair(ArrayRef<MCInst> Insts) const override {
- const auto *I = Insts.begin();
- while (I != Insts.end() && isPrefix(*I))
- ++I;
- if (I == Insts.end())
- return false;
-
- const MCInst &FirstInst = *I;
- ++I;
- while (I != Insts.end() && isPrefix(*I))
- ++I;
- if (I == Insts.end())
- return false;
- const MCInst &SecondInst = *I;
-
- if (!isConditionalBranch(SecondInst))
- return false;
- // Cannot fuse if the first instruction uses RIP-relative memory.
- if (hasPCRelOperand(FirstInst))
- return false;
-
- const X86::FirstMacroFusionInstKind CmpKind =
- X86::classifyFirstOpcodeInMacroFusion(FirstInst.getOpcode());
- if (CmpKind == X86::FirstMacroFusionInstKind::Invalid)
- return false;
-
- X86::CondCode CC = static_cast<X86::CondCode>(getCondCode(SecondInst));
- X86::SecondMacroFusionInstKind BranchKind =
- X86::classifySecondCondCodeInMacroFusion(CC);
- if (BranchKind == X86::SecondMacroFusionInstKind::Invalid)
- return false;
- return X86::isMacroFused(CmpKind, BranchKind);
- }
-
std::optional<X86MemOperand>
evaluateX86MemoryOperand(const MCInst &Inst) const override {
int MemOpNo = getMemoryOperandNo(Inst);
>From e3500ea48868651b3d1ef79a7f6a57d45877414a Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 2 Jul 2024 09:26:31 -0700
Subject: [PATCH 010/246] [LangRef] Replace 'the argument' with 'the first
argument' in llvm.abs description. NFC (#97386)
Make it more clear which argument we're referring to.
Similar for vp.abs.
---
llvm/docs/LangRef.rst | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index e2c47204e628f..426f56d22dd79 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -14740,16 +14740,16 @@ The return type must match the first argument type.
The second argument must be a constant and is a flag to indicate whether the
result value of the '``llvm.abs``' intrinsic is a
-:ref:`poison value <poisonvalues>` if the argument is statically or dynamically
-an ``INT_MIN`` value.
+:ref:`poison value <poisonvalues>` if the first argument is statically or
+dynamically an ``INT_MIN`` value.
Semantics:
""""""""""
The '``llvm.abs``' intrinsic returns the magnitude (always positive) of the
-argument or each element of a vector argument.". If the argument is ``INT_MIN``,
-then the result is also ``INT_MIN`` if ``is_int_min_poison == 0`` and
-``poison`` otherwise.
+first argument or each element of a vector argument.". If the first argument is
+``INT_MIN``, then the result is also ``INT_MIN`` if ``is_int_min_poison == 0``
+and ``poison`` otherwise.
.. _int_smax:
@@ -20812,10 +20812,10 @@ Arguments:
The first operand and the result have the same vector of integer type. The
second operand is the vector mask and has the same number of elements as the
result vector type. The third operand is the explicit vector length of the
-operation. The fourth argument must be a constant and is a flag to indicate
+operation. The fourth operand must be a constant and is a flag to indicate
whether the result value of the '``llvm.vp.abs``' intrinsic is a
-:ref:`poison value <poisonvalues>` if the argument is statically or dynamically
-an ``INT_MIN`` value.
+:ref:`poison value <poisonvalues>` if the first argument is statically or
+dynamically an ``INT_MIN`` value.
Semantics:
""""""""""
>From e414bf9fffcb9b6010c7eb08406696a9de931d66 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 2 Jul 2024 09:26:54 -0700
Subject: [PATCH 011/246] [LangRef] Document the fourth operand for
vp.cttz/ctlz. NFC (#97387)
---
llvm/docs/LangRef.rst | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 426f56d22dd79..b6f8a24937df6 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24503,7 +24503,9 @@ Arguments:
The first operand and the result have the same vector of integer type. The
second operand is the vector mask and has the same number of elements as the
result vector type. The third operand is the explicit vector length of the
-operation.
+operation. The fourth operand is a constant flag that indicates whether the
+intrinsic returns a valid result if the first operand is zero. If the first
+operand is zero and the fourth operand is true, the result is poison.
Semantics:
""""""""""
@@ -24550,7 +24552,9 @@ Arguments:
The first operand and the result have the same vector of integer type. The
second operand is the vector mask and has the same number of elements as the
result vector type. The third operand is the explicit vector length of the
-operation.
+operation. The fourth operand is a constant flag that indicates whether the
+intrinsic returns a valid result if the first operand is zero. If the first
+operand is zero and the fourth operand is true, the result is poison.
Semantics:
""""""""""
>From 7359edbc0981280e029701aa1ddee7ed313126dc Mon Sep 17 00:00:00 2001
From: Michael Klemm <michael.klemm at amd.com>
Date: Tue, 2 Jul 2024 18:37:33 +0200
Subject: [PATCH 012/246] [Flang][runtime] Distinguish CPU time and elapsed
time for cpu_time and system_clock (#96652)
The current implementation for `system_clock()` returns the CPU time
instead of elapsed wallclock time. This PR fixes the issue and makes
`system_clock()` correctly return elapsed time.
---
flang/runtime/time-intrinsic.cpp | 113 +++++++++++++++++--------------
flang/test/Runtime/no-cpp-dep.c | 2 +
2 files changed, 64 insertions(+), 51 deletions(-)
diff --git a/flang/runtime/time-intrinsic.cpp b/flang/runtime/time-intrinsic.cpp
index a141fe63764a7..ac372b249fa4b 100644
--- a/flang/runtime/time-intrinsic.cpp
+++ b/flang/runtime/time-intrinsic.cpp
@@ -64,20 +64,29 @@ template <typename Unused = void> double GetCpuTime(fallback_implementation) {
// clock_gettime is implemented in the pthread library for MinGW.
// Using it here would mean that all programs that link libFortranRuntime are
// required to also link to pthread. Instead, don't use the function.
-#undef CLOCKID
-#elif defined CLOCK_PROCESS_CPUTIME_ID
-#define CLOCKID CLOCK_PROCESS_CPUTIME_ID
+#undef CLOCKID_CPU_TIME
+#undef CLOCKID_ELAPSED_TIME
+#else
+// Determine what clock to use for CPU time.
+#if defined CLOCK_PROCESS_CPUTIME_ID
+#define CLOCKID_CPU_TIME CLOCK_PROCESS_CPUTIME_ID
#elif defined CLOCK_THREAD_CPUTIME_ID
-#define CLOCKID CLOCK_THREAD_CPUTIME_ID
-#elif defined CLOCK_MONOTONIC
-#define CLOCKID CLOCK_MONOTONIC
+#define CLOCKID_CPU_TIME CLOCK_THREAD_CPUTIME_ID
+#else
+#undef CLOCKID_CPU_TIME
+#endif
+
+// Determine what clock to use for elapsed time.
+#if defined CLOCK_MONOTONIC
+#define CLOCKID_ELAPSED_TIME CLOCK_MONOTONIC
#elif defined CLOCK_REALTIME
-#define CLOCKID CLOCK_REALTIME
+#define CLOCKID_ELAPSED_TIME CLOCK_REALTIME
#else
-#undef CLOCKID
+#undef CLOCKID_ELAPSED_TIME
+#endif
#endif
-#ifdef CLOCKID
+#ifdef CLOCKID_CPU_TIME
// POSIX implementation using clock_gettime. This is only enabled where
// clock_gettime is available.
template <typename T = int, typename U = struct timespec>
@@ -86,17 +95,26 @@ double GetCpuTime(preferred_implementation,
T ClockId = 0, U *Timespec = nullptr,
decltype(clock_gettime(ClockId, Timespec)) *Enabled = nullptr) {
struct timespec tspec;
- if (clock_gettime(CLOCKID, &tspec) == 0) {
+ if (clock_gettime(CLOCKID_CPU_TIME, &tspec) == 0) {
return tspec.tv_nsec * 1.0e-9 + tspec.tv_sec;
}
// Return some negative value to represent failure.
return -1.0;
}
-#endif
+#endif // CLOCKID_CPU_TIME
using count_t = std::int64_t;
using unsigned_count_t = std::uint64_t;
+// POSIX implementation using clock_gettime where available. The clock_gettime
+// result is in nanoseconds, which is converted as necessary to
+// - deciseconds for kind 1
+// - milliseconds for kinds 2, 4
+// - nanoseconds for kinds 8, 16
+constexpr unsigned_count_t DS_PER_SEC{10u};
+constexpr unsigned_count_t MS_PER_SEC{1'000u};
+constexpr unsigned_count_t NS_PER_SEC{1'000'000'000u};
+
// Computes HUGE(INT(0,kind)) as an unsigned integer value.
static constexpr inline unsigned_count_t GetHUGE(int kind) {
if (kind > 8) {
@@ -105,51 +123,49 @@ static constexpr inline unsigned_count_t GetHUGE(int kind) {
return (unsigned_count_t{1} << ((8 * kind) - 1)) - 1;
}
-// This is the fallback implementation, which should work everywhere. Note that
-// in general we can't recover after std::clock has reached its maximum value.
+// Function converts a std::timespec_t into the desired count to
+// be returned by the timing functions in accordance with the requested
+// kind at the call site.
+count_t ConvertTimeSpecToCount(int kind, const std::timespec &tspec) {
+ const unsigned_count_t huge{GetHUGE(kind)};
+ unsigned_count_t sec{static_cast<unsigned_count_t>(tspec.tv_sec)};
+ unsigned_count_t nsec{static_cast<unsigned_count_t>(tspec.tv_nsec)};
+ if (kind >= 8) {
+ return (sec * NS_PER_SEC + nsec) % (huge + 1);
+ } else if (kind >= 2) {
+ return (sec * MS_PER_SEC + (nsec / (NS_PER_SEC / MS_PER_SEC))) % (huge + 1);
+ } else { // kind == 1
+ return (sec * DS_PER_SEC + (nsec / (NS_PER_SEC / DS_PER_SEC))) % (huge + 1);
+ }
+}
+
+// This is the fallback implementation, which should work everywhere.
template <typename Unused = void>
count_t GetSystemClockCount(int kind, fallback_implementation) {
- std::clock_t timestamp{std::clock()};
- if (timestamp == static_cast<std::clock_t>(-1)) {
+ std::timespec tspec;
+
+ if (std::timespec_get(&tspec, TIME_UTC) < 0) {
// Return -HUGE(COUNT) to represent failure.
return -static_cast<count_t>(GetHUGE(kind));
}
- // Convert the timestamp to std::uint64_t with wrap-around. The timestamp is
- // most likely a floating-point value (since C'11), so compute the modulus
- // carefully when one is required.
- constexpr auto maxUnsignedCount{std::numeric_limits<unsigned_count_t>::max()};
- if constexpr (std::numeric_limits<std::clock_t>::max() > maxUnsignedCount) {
- timestamp -= maxUnsignedCount * std::floor(timestamp / maxUnsignedCount);
- }
- unsigned_count_t unsignedCount{static_cast<unsigned_count_t>(timestamp)};
- // Return the modulus of the unsigned integral count with HUGE(COUNT)+1.
- // The result is a signed integer but never negative.
- return static_cast<count_t>(unsignedCount % (GetHUGE(kind) + 1));
+
+ // Compute the timestamp as seconds plus nanoseconds in accordance
+ // with the requested kind at the call site.
+ return ConvertTimeSpecToCount(kind, tspec);
}
template <typename Unused = void>
count_t GetSystemClockCountRate(int kind, fallback_implementation) {
- return CLOCKS_PER_SEC;
+ return kind >= 8 ? NS_PER_SEC : kind >= 2 ? MS_PER_SEC : DS_PER_SEC;
}
template <typename Unused = void>
count_t GetSystemClockCountMax(int kind, fallback_implementation) {
- constexpr auto max_clock_t{std::numeric_limits<std::clock_t>::max()};
unsigned_count_t maxCount{GetHUGE(kind)};
- return max_clock_t <= maxCount ? static_cast<count_t>(max_clock_t)
- : static_cast<count_t>(maxCount);
+ return maxCount;
}
-// POSIX implementation using clock_gettime where available. The clock_gettime
-// result is in nanoseconds, which is converted as necessary to
-// - deciseconds for kind 1
-// - milliseconds for kinds 2, 4
-// - nanoseconds for kinds 8, 16
-constexpr unsigned_count_t DS_PER_SEC{10u};
-constexpr unsigned_count_t MS_PER_SEC{1'000u};
-constexpr unsigned_count_t NS_PER_SEC{1'000'000'000u};
-
-#ifdef CLOCKID
+#ifdef CLOCKID_ELAPSED_TIME
template <typename T = int, typename U = struct timespec>
count_t GetSystemClockCount(int kind, preferred_implementation,
// We need some dummy parameters to pass to decltype(clock_gettime).
@@ -157,20 +173,15 @@ count_t GetSystemClockCount(int kind, preferred_implementation,
decltype(clock_gettime(ClockId, Timespec)) *Enabled = nullptr) {
struct timespec tspec;
const unsigned_count_t huge{GetHUGE(kind)};
- if (clock_gettime(CLOCKID, &tspec) != 0) {
+ if (clock_gettime(CLOCKID_ELAPSED_TIME, &tspec) != 0) {
return -huge; // failure
}
- unsigned_count_t sec{static_cast<unsigned_count_t>(tspec.tv_sec)};
- unsigned_count_t nsec{static_cast<unsigned_count_t>(tspec.tv_nsec)};
- if (kind >= 8) {
- return (sec * NS_PER_SEC + nsec) % (huge + 1);
- } else if (kind >= 2) {
- return (sec * MS_PER_SEC + (nsec / (NS_PER_SEC / MS_PER_SEC))) % (huge + 1);
- } else { // kind == 1
- return (sec * DS_PER_SEC + (nsec / (NS_PER_SEC / DS_PER_SEC))) % (huge + 1);
- }
+
+ // Compute the timestamp as seconds plus nanoseconds in accordance
+ // with the requested kind at the call site.
+ return ConvertTimeSpecToCount(kind, tspec);
}
-#endif
+#endif // CLOCKID_ELAPSED_TIME
template <typename T = int, typename U = struct timespec>
count_t GetSystemClockCountRate(int kind, preferred_implementation,
diff --git a/flang/test/Runtime/no-cpp-dep.c b/flang/test/Runtime/no-cpp-dep.c
index 654bebed345b1..606a5d189f719 100644
--- a/flang/test/Runtime/no-cpp-dep.c
+++ b/flang/test/Runtime/no-cpp-dep.c
@@ -30,6 +30,7 @@ int32_t RTNAME(ArgumentCount)();
int32_t RTNAME(GetCommandArgument)(int32_t, const struct Descriptor *,
const struct Descriptor *, const struct Descriptor *);
int32_t RTNAME(GetEnvVariable)();
+int64_t RTNAME(SystemClockCount)(int kind);
int main() {
double x = RTNAME(CpuTime)();
@@ -37,5 +38,6 @@ int main() {
int32_t c = RTNAME(ArgumentCount)();
int32_t v = RTNAME(GetCommandArgument)(0, 0, 0, 0);
int32_t e = RTNAME(GetEnvVariable)("FOO", 0, 0);
+ int64_t t = RTNAME(SystemClockCount)(8);
return x + c + v + e;
}
>From 87de49753d4bd860fed5165e9411c703107ad3a5 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 2 Jul 2024 09:39:59 -0700
Subject: [PATCH 013/246] [RISCV] Remove IgnoreUnknown from
RISCVISAInfo::parseArchString. (#97372)
This isn't used in tree, and thus I don't know what the expectations for
its behavior really are. The original usage of this feature has been replaced
by parseNormalizedArchString.
---
llvm/include/llvm/TargetParser/RISCVISAInfo.h | 3 +-
llvm/lib/TargetParser/RISCVISAInfo.cpp | 36 ++++----------
.../TargetParser/RISCVISAInfoTest.cpp | 48 -------------------
3 files changed, 9 insertions(+), 78 deletions(-)
diff --git a/llvm/include/llvm/TargetParser/RISCVISAInfo.h b/llvm/include/llvm/TargetParser/RISCVISAInfo.h
index 5e9cf67eddcfd..ba2965600decd 100644
--- a/llvm/include/llvm/TargetParser/RISCVISAInfo.h
+++ b/llvm/include/llvm/TargetParser/RISCVISAInfo.h
@@ -36,8 +36,7 @@ class RISCVISAInfo {
/// default version will be used (as ignoring the base is not possible).
static llvm::Expected<std::unique_ptr<RISCVISAInfo>>
parseArchString(StringRef Arch, bool EnableExperimentalExtension,
- bool ExperimentalExtensionVersionCheck = true,
- bool IgnoreUnknown = false);
+ bool ExperimentalExtensionVersionCheck = true);
/// Parse RISC-V ISA info from an arch string that is already in normalized
/// form (as defined in the psABI). Unlike parseArchString, this function
diff --git a/llvm/lib/TargetParser/RISCVISAInfo.cpp b/llvm/lib/TargetParser/RISCVISAInfo.cpp
index 869be57928890..b4fd067a1ed7a 100644
--- a/llvm/lib/TargetParser/RISCVISAInfo.cpp
+++ b/llvm/lib/TargetParser/RISCVISAInfo.cpp
@@ -522,8 +522,7 @@ RISCVISAInfo::parseNormalizedArchString(StringRef Arch) {
llvm::Expected<std::unique_ptr<RISCVISAInfo>>
RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
- bool ExperimentalExtensionVersionCheck,
- bool IgnoreUnknown) {
+ bool ExperimentalExtensionVersionCheck) {
// RISC-V ISA strings must be [a-z0-9_]
if (!llvm::all_of(
Arch, [](char C) { return isDigit(C) || isLower(C) || C == '_'; }))
@@ -567,7 +566,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
NewArch += ArchWithoutProfile.str();
}
return parseArchString(NewArch, EnableExperimentalExtension,
- ExperimentalExtensionVersionCheck, IgnoreUnknown);
+ ExperimentalExtensionVersionCheck);
}
}
@@ -601,16 +600,8 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
// Baseline is `i` or `e`
if (auto E = getExtensionVersion(
StringRef(&Baseline, 1), Exts, Major, Minor, ConsumeLength,
- EnableExperimentalExtension, ExperimentalExtensionVersionCheck)) {
- if (!IgnoreUnknown)
- return std::move(E);
- // If IgnoreUnknown, then ignore an unrecognised version of the baseline
- // ISA and just use the default supported version.
- consumeError(std::move(E));
- auto Version = findDefaultVersion(StringRef(&Baseline, 1));
- Major = Version->Major;
- Minor = Version->Minor;
- }
+ EnableExperimentalExtension, ExperimentalExtensionVersionCheck))
+ return std::move(E);
// Postpone AddExtension until end of this function
SeenExtMap[StringRef(&Baseline, 1).str()] = {Major, Minor};
@@ -677,11 +668,10 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
Ext = StringRef();
assert(!Type.empty() && "Empty type?");
- if (!IgnoreUnknown && Name.size() == Type.size())
+ if (Name.size() == Type.size())
return createStringError(errc::invalid_argument,
Desc + " name missing after '" + Type + "'");
} else {
- // FIXME: Could it be ignored by IgnoreUnknown?
return createStringError(errc::invalid_argument,
"invalid standard user-level extension '" +
Twine(Ext.front()) + "'");
@@ -690,27 +680,17 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
unsigned Major, Minor, ConsumeLength;
if (auto E = getExtensionVersion(Name, Vers, Major, Minor, ConsumeLength,
EnableExperimentalExtension,
- ExperimentalExtensionVersionCheck)) {
- if (!IgnoreUnknown)
- return E;
-
- consumeError(std::move(E));
- if (Name.size() == 1)
- Ext = Ext.substr(ConsumeLength);
- continue;
- }
+ ExperimentalExtensionVersionCheck))
+ return E;
if (Name.size() == 1)
Ext = Ext.substr(ConsumeLength);
// Check if duplicated extension.
- if (!IgnoreUnknown && SeenExtMap.contains(Name.str()))
+ if (SeenExtMap.contains(Name.str()))
return createStringError(errc::invalid_argument,
"duplicated " + Desc + " '" + Name + "'");
- if (IgnoreUnknown && !RISCVISAInfo::isSupportedExtension(Name))
- continue;
-
SeenExtMap[Name.str()] = {Major, Minor};
} while (!Ext.empty());
}
diff --git a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
index f944ac87b2cb0..eb5437b1ae3d4 100644
--- a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
@@ -341,25 +341,6 @@ TEST(ParseArchString, RejectsUnrecognizedExtensionNamesByDefault) {
"unsupported non-standard user-level extension 'xmadeup'");
}
-TEST(ParseArchString, IgnoresUnrecognizedExtensionNamesWithIgnoreUnknown) {
- for (StringRef Input : {"rv32i_zmadeup", "rv64i_smadeup", "rv64i_xmadeup"}) {
- auto MaybeISAInfo = RISCVISAInfo::parseArchString(Input, true, false, true);
- ASSERT_THAT_EXPECTED(MaybeISAInfo, Succeeded());
- RISCVISAInfo &Info = **MaybeISAInfo;
- const auto &Exts = Info.getExtensions();
- EXPECT_EQ(Exts.size(), 1UL);
- EXPECT_TRUE(Exts.at("i") == (RISCVISAUtils::ExtensionVersion{2, 1}));
- }
-
- // Checks that supported extensions aren't incorrectly ignored when a
- // version is present (an early version of the patch had this mistake).
- auto MaybeISAInfo =
- RISCVISAInfo::parseArchString("rv32i_zbc1p0_xmadeup", true, false, true);
- ASSERT_THAT_EXPECTED(MaybeISAInfo, Succeeded());
- const auto &Exts = (*MaybeISAInfo)->getExtensions();
- EXPECT_TRUE(Exts.at("zbc") == (RISCVISAUtils::ExtensionVersion{1, 0}));
-}
-
TEST(ParseArchString, AcceptsVersionInLongOrShortForm) {
for (StringRef Input : {"rv64i2p1"}) {
auto MaybeISAInfo = RISCVISAInfo::parseArchString(Input, true);
@@ -393,35 +374,6 @@ TEST(ParseArchString, RejectsUnrecognizedExtensionVersionsByDefault) {
"unsupported version number 10.10 for extension 'zifencei'");
}
-TEST(ParseArchString,
- UsesDefaultVersionForUnrecognisedBaseISAVersionWithIgnoreUnknown) {
- for (StringRef Input : {"rv32i0p1", "rv32i99p99", "rv64i0p1", "rv64i99p99"}) {
- auto MaybeISAInfo = RISCVISAInfo::parseArchString(Input, true, false, true);
- ASSERT_THAT_EXPECTED(MaybeISAInfo, Succeeded());
- const auto &Exts = (*MaybeISAInfo)->getExtensions();
- EXPECT_EQ(Exts.size(), 1UL);
- EXPECT_TRUE(Exts.at("i") == (RISCVISAUtils::ExtensionVersion{2, 1}));
- }
- for (StringRef Input : {"rv32e0p1", "rv32e99p99", "rv64e0p1", "rv64e99p99"}) {
- auto MaybeISAInfo = RISCVISAInfo::parseArchString(Input, true, false, true);
- ASSERT_THAT_EXPECTED(MaybeISAInfo, Succeeded());
- const auto &Exts = (*MaybeISAInfo)->getExtensions();
- EXPECT_EQ(Exts.size(), 1UL);
- EXPECT_TRUE(Exts.at("e") == (RISCVISAUtils::ExtensionVersion{2, 0}));
- }
-}
-
-TEST(ParseArchString,
- IgnoresExtensionsWithUnrecognizedVersionsWithIgnoreUnknown) {
- for (StringRef Input : {"rv32im1p1", "rv64i_svnapot10p9", "rv32i_zicsr0p5"}) {
- auto MaybeISAInfo = RISCVISAInfo::parseArchString(Input, true, false, true);
- ASSERT_THAT_EXPECTED(MaybeISAInfo, Succeeded());
- const auto &Exts = (*MaybeISAInfo)->getExtensions();
- EXPECT_EQ(Exts.size(), 1UL);
- EXPECT_TRUE(Exts.at("i") == (RISCVISAUtils::ExtensionVersion{2, 1}));
- }
-}
-
TEST(ParseArchString, AcceptsUnderscoreSplittingExtensions) {
for (StringRef Input : {"rv32imafdczifencei", "rv32i_m_a_f_d_c_zifencei"}) {
auto MaybeISAInfo = RISCVISAInfo::parseArchString(Input, true);
>From 7217201dd9cb216e95dfd80dd11ab3785bfe7455 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Tue, 2 Jul 2024 10:02:49 -0700
Subject: [PATCH 014/246] [RISCV] Add coverage of build vectors with rv32 + v +
bitmanip
rv32+v allows cases with XLEN < ELEN, and since we're about to add
a bitmanip specific lowering, it's good to have coverage of this
corner case.
---
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll | 6 +++++-
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll | 6 +++++-
2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index d7ffed3b01ddb..def8fb5abf506 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32V
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+zba,+zbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32VB
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64V
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+rva22u64 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RVA22U64
@@ -1501,3 +1502,6 @@ define <8 x double> @buildvec_v8f64_zvl512(double %e0, double %e1, double %e2, d
%v7 = insertelement <8 x double> %v6, double %e7, i64 7
ret <8 x double> %v7
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32V: {{.*}}
+; RV32VB: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index 6cd69bac46e3c..b3f4cabd56a0e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-ONLY
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zba,+zbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32VB
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64V,RV64V-ONLY
; RUN: llc -mtriple=riscv64 -mattr=+v,+rva22u64 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64V,RVA22U64
; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVE32
@@ -2171,3 +2172,6 @@ define <16 x i8> @buildvec_v16i8_loads_undef_scattered(ptr %p) {
%v16 = insertelement <16 x i8> %v15, i8 %ld16, i32 15
ret <16 x i8> %v16
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32-ONLY: {{.*}}
+; RV32VB: {{.*}}
>From e94a00c3b8bf81fcd441c868644612fc887c7170 Mon Sep 17 00:00:00 2001
From: Fangrui Song <i at maskray.me>
Date: Tue, 2 Jul 2024 10:10:31 -0700
Subject: [PATCH 015/246] [MC] Use a stub ctor for MCAsmLayout
and replace MCAssembler::Layout with a bool.
This mostly completes "[MC] Start merging MCAsmLayout into MCAssembler".
Note: BOLT used a dummy `MCAsmLayout` to call `getSymbolOffset`, which
is technically not supported. There is some discussion in
https://reviews.llvm.org/D154604 .
The revert f80a4072ced41b52363c63df28fea9a649f7f89e is incorrect and
actually broke bots.
---
llvm/include/llvm/MC/MCAsmLayout.h | 11 +----------
llvm/include/llvm/MC/MCAssembler.h | 5 ++---
llvm/lib/MC/MCAssembler.cpp | 9 ++++-----
llvm/lib/MC/MCExpr.cpp | 4 ++--
llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp | 2 +-
llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp | 2 +-
6 files changed, 11 insertions(+), 22 deletions(-)
diff --git a/llvm/include/llvm/MC/MCAsmLayout.h b/llvm/include/llvm/MC/MCAsmLayout.h
index 765cc1ebb7c79..33fae0a0f9766 100644
--- a/llvm/include/llvm/MC/MCAsmLayout.h
+++ b/llvm/include/llvm/MC/MCAsmLayout.h
@@ -9,21 +9,12 @@
#ifndef LLVM_MC_MCASMLAYOUT_H
#define LLVM_MC_MCASMLAYOUT_H
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallVector.h"
-
namespace llvm {
class MCAssembler;
-class MCSection;
class MCAsmLayout {
- MCAssembler &Assembler;
-
public:
- MCAsmLayout(MCAssembler &Assembler);
-
- /// Get the assembler object this is a layout for.
- MCAssembler &getAssembler() const { return Assembler; }
+ MCAsmLayout(MCAssembler &) {}
};
} // end namespace llvm
diff --git a/llvm/include/llvm/MC/MCAssembler.h b/llvm/include/llvm/MC/MCAssembler.h
index 1e476ae61dec6..df5ad0e7bdf4b 100644
--- a/llvm/include/llvm/MC/MCAssembler.h
+++ b/llvm/include/llvm/MC/MCAssembler.h
@@ -116,7 +116,7 @@ class MCAssembler {
std::unique_ptr<MCCodeEmitter> Emitter;
std::unique_ptr<MCObjectWriter> Writer;
- MCAsmLayout *Layout = nullptr;
+ bool HasLayout = false;
bool RelaxAll = false;
bool SubsectionsViaSymbols = false;
bool IncrementalLinkerCompatible = false;
@@ -354,8 +354,7 @@ class MCAssembler {
IncrementalLinkerCompatible = Value;
}
- MCAsmLayout *getLayout() const { return Layout; }
- bool hasLayout() const { return Layout; }
+ bool hasLayout() const { return HasLayout; }
bool getRelaxAll() const { return RelaxAll; }
void setRelaxAll(bool Value) { RelaxAll = Value; }
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp
index 0a6bb52a3b8f4..2fe4a3cbec9a6 100644
--- a/llvm/lib/MC/MCAssembler.cpp
+++ b/llvm/lib/MC/MCAssembler.cpp
@@ -381,8 +381,6 @@ uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const {
llvm_unreachable("invalid fragment kind");
}
-MCAsmLayout::MCAsmLayout(MCAssembler &Asm) : Assembler(Asm) {}
-
// Compute the amount of padding required before the fragment \p F to
// obey bundling restrictions, where \p FOffset is the fragment's offset in
// its section and \p FSize is the fragment's size.
@@ -547,7 +545,7 @@ uint64_t MCAssembler::getSymbolOffset(const MCSymbol &S) const {
}
const MCSymbol *MCAssembler::getBaseSymbol(const MCSymbol &Symbol) const {
- assert(Layout);
+ assert(HasLayout);
if (!Symbol.isVariable())
return &Symbol;
@@ -584,6 +582,7 @@ const MCSymbol *MCAssembler::getBaseSymbol(const MCSymbol &Symbol) const {
}
uint64_t MCAssembler::getSectionAddressSize(const MCSection &Sec) const {
+ assert(HasLayout);
// The size is the last fragment's end offset.
const MCFragment &F = *Sec.curFragList()->Tail;
return getFragmentOffset(F) + computeFragmentSize(F);
@@ -968,7 +967,7 @@ void MCAssembler::layout(MCAsmLayout &Layout) {
}
// Layout until everything fits.
- this->Layout = &Layout;
+ this->HasLayout = true;
while (layoutOnce()) {
if (getContext().hadError())
return;
@@ -1081,7 +1080,7 @@ void MCAssembler::Finish() {
// Write the object file.
stats::ObjectBytes += getWriter().writeObject(*this);
- this->Layout = nullptr;
+ HasLayout = false;
}
bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup,
diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp
index 82795399900c2..0a175ade68d78 100644
--- a/llvm/lib/MC/MCExpr.cpp
+++ b/llvm/lib/MC/MCExpr.cpp
@@ -626,7 +626,7 @@ static void AttemptToFoldSymbolOffsetDifference(
// separated by a linker-relaxable instruction. If the section contains
// instructions and InSet is false (not expressions in directive like
// .size/.fill), disable the fast path.
- const MCAsmLayout *Layout = Asm->getLayout();
+ bool Layout = Asm->hasLayout();
if (Layout && (InSet || !SecA.hasInstructions() ||
!(Asm->getContext().getTargetTriple().isRISCV() ||
Asm->getContext().getTargetTriple().isLoongArch()))) {
@@ -817,7 +817,6 @@ bool MCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
const SectionAddrMap *Addrs,
bool InSet) const {
++stats::MCExprEvaluate;
- MCAsmLayout *Layout = Asm ? Asm->getLayout() : nullptr;
switch (getKind()) {
case Target:
return cast<MCTargetExpr>(this)->evaluateAsRelocatableImpl(Res, Asm, Fixup);
@@ -830,6 +829,7 @@ bool MCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(this);
const MCSymbol &Sym = SRE->getSymbol();
const auto Kind = SRE->getKind();
+ bool Layout = Asm && Asm->hasLayout();
// Evaluate recursively if this is a variable.
if (Sym.isVariable() && (Kind == MCSymbolRefExpr::VK_None || Layout) &&
diff --git a/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp b/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp
index 87355561c1cb5..5386df7f4afcd 100644
--- a/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp
+++ b/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp
@@ -79,7 +79,7 @@ bool AVRMCExpr::evaluateAsRelocatableImpl(MCValue &Result,
if (Value.isAbsolute()) {
Result = MCValue::get(evaluateAsInt64(Value.getConstant()));
} else {
- if (!Asm || !Asm->getLayout())
+ if (!Asm || !Asm->hasLayout())
return false;
MCContext &Context = Asm->getContext();
diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp
index 05fc733825113..cc1d98105b0cb 100644
--- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp
+++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp
@@ -122,7 +122,7 @@ bool PPCMCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
Res = MCValue::get(Result);
} else {
- if (!Asm || !Asm->getLayout())
+ if (!Asm || !Asm->hasLayout())
return false;
MCContext &Context = Asm->getContext();
>From 60cd3eb880fe48d192a58c64a1e38e875fc65377 Mon Sep 17 00:00:00 2001
From: Kevin Frei <kevinfrei at users.noreply.github.com>
Date: Tue, 2 Jul 2024 10:14:26 -0700
Subject: [PATCH 016/246] Reduce llvm-gsymutil memory usage (#91023)
llvm-gsymutil eats a lot of RAM. On some large binaries, it causes OOM's on smaller hardware, consuming well over 64GB of RAM. This change frees line tables once we're done with them, and frees DWARFUnits's DIE's when we finish processing each DU, though they may get reconstituted if there are references from other DU's during processing. Once the conversion is complete, all DIE's are freed. The reduction in peak memory usage from these changes showed between 7-12% in my tests.
The double-checked locking around the creation & freeing of the data structures was tested on a 166 core system. I validated that it trivially malfunctioned without the locks (and with stupid reordering of the locks) and worked reliably with them.
---------
Co-authored-by: Kevin Frei <freik at meta.com>
---
llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h | 23 +++++-
llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp | 81 ++++++++++++++++---
llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp | 15 +++-
3 files changed, 105 insertions(+), 14 deletions(-)
diff --git a/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
index 80c27aea89312..26ef7db718dd5 100644
--- a/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
+++ b/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
@@ -22,6 +22,7 @@
#include "llvm/DebugInfo/DWARF/DWARFLocationExpression.h"
#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/RWMutex.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
@@ -257,6 +258,10 @@ class DWARFUnit {
std::shared_ptr<DWARFUnit> DWO;
+ mutable llvm::sys::RWMutex FreeDIEsMutex;
+ mutable llvm::sys::RWMutex ExtractCUDieMutex;
+ mutable llvm::sys::RWMutex ExtractNonCUDIEsMutex;
+
protected:
friend dwarf_linker::parallel::CompileUnit;
@@ -566,6 +571,9 @@ class DWARFUnit {
Error tryExtractDIEsIfNeeded(bool CUDieOnly);
+ /// clearDIEs - Clear parsed DIEs to keep memory usage low.
+ void clearDIEs(bool KeepCUDie);
+
private:
/// Size in bytes of the .debug_info data associated with this compile unit.
size_t getDebugInfoSize() const {
@@ -577,13 +585,22 @@ class DWARFUnit {
/// hasn't already been done
void extractDIEsIfNeeded(bool CUDieOnly);
+ /// extracCUDieIfNeeded - Parse CU DIE if it hasn't already been done.
+ /// Only to be used from extractDIEsIfNeeded, which holds the correct locks.
+ bool extractCUDieIfNeeded(bool CUDieOnly, bool &HasCUDie);
+
+ /// extractNonCUDIEsIfNeeded - Parses non-CU DIE's for a given CU if needed.
+ /// Only to be used from extractDIEsIfNeeded, which holds the correct locks.
+ Error extractNonCUDIEsIfNeeded(bool HasCUDie);
+
+ /// extractNonCUDIEsHelper - helper to be invoked *only* from inside
+ /// tryExtractDIEsIfNeeded, which holds the correct locks.
+ Error extractNonCUDIEsHelper();
+
/// extractDIEsToVector - Appends all parsed DIEs to a vector.
void extractDIEsToVector(bool AppendCUDie, bool AppendNonCUDIEs,
std::vector<DWARFDebugInfoEntry> &DIEs) const;
- /// clearDIEs - Clear parsed DIEs to keep memory usage low.
- void clearDIEs(bool KeepCUDie);
-
/// parseDWO - Parses .dwo file for current compile unit. Returns true if
/// it was actually constructed.
/// The \p AlternativeLocation specifies an alternative location to get
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp b/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp
index bdd04b00f557b..2760cef7edfdb 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp
@@ -495,21 +495,78 @@ void DWARFUnit::extractDIEsIfNeeded(bool CUDieOnly) {
Context.getRecoverableErrorHandler()(std::move(e));
}
-Error DWARFUnit::tryExtractDIEsIfNeeded(bool CUDieOnly) {
- if ((CUDieOnly && !DieArray.empty()) ||
- DieArray.size() > 1)
- return Error::success(); // Already parsed.
+static bool DoubleCheckedRWLocker(llvm::sys::RWMutex &Mutex,
+ const std::function<bool()> &reader,
+ const std::function<void()> &writer) {
+ {
+ llvm::sys::ScopedReader Lock(Mutex);
+ if (reader())
+ return true;
+ }
+ llvm::sys::ScopedWriter Lock(Mutex);
+ if (reader())
+ return true;
+ // If we get here, then the reader function returned false. This means that
+ // no one else is currently writing to this data structure and it's safe for
+ // us to write to it now. The scoped writer lock guarantees there are no
+ // other readers or writers at this point.
+ writer();
+ return false;
+}
- bool HasCUDie = !DieArray.empty();
- extractDIEsToVector(!HasCUDie, !CUDieOnly, DieArray);
+// Helper to safely check if the Compile-Unit DIE has been extracted already.
+// If not, then extract it, and return false, indicating that it was *not*
+// already extracted.
+bool DWARFUnit::extractCUDieIfNeeded(bool CUDieOnly, bool &HasCUDie) {
+ return DoubleCheckedRWLocker(
+ ExtractCUDieMutex,
+ // Calculate if the CU DIE has been extracted already.
+ [&]() {
+ return ((CUDieOnly && !DieArray.empty()) || DieArray.size() > 1);
+ },
+ // Lambda to extract the CU DIE.
+ [&]() {
+ HasCUDie = !DieArray.empty();
+ extractDIEsToVector(!HasCUDie, !CUDieOnly, DieArray);
+ });
+}
- if (DieArray.empty())
- return Error::success();
+// Helper to safely check if the non-Compile-Unit DIEs have been parsed
+// already. If they haven't been parsed, go ahead and parse them.
+Error DWARFUnit::extractNonCUDIEsIfNeeded(bool HasCUDie) {
+ Error Result = Error::success();
+ DoubleCheckedRWLocker(
+ ExtractNonCUDIEsMutex,
+ // Lambda to check if all DIEs have been extracted already.
+ [=]() { return (DieArray.empty() || HasCUDie); },
+ // Lambda to extract all the DIEs using the helper function
+ [&]() {
+ if (Error E = extractNonCUDIEsHelper()) {
+ // Consume the success placeholder and save the actual error
+ consumeError(std::move(Result));
+ Result = std::move(E);
+ }
+ });
+ return Result;
+}
- // If CU DIE was just parsed, copy several attribute values from it.
- if (HasCUDie)
+Error DWARFUnit::tryExtractDIEsIfNeeded(bool CUDieOnly) {
+ // Acquire the FreeDIEsMutex lock (in read-mode) to prevent the Compile Unit
+ // DIE from being freed by a thread calling clearDIEs() after the CU DIE was
+ // parsed, but before the rest of the DIEs are parsed, as there are no other
+ // locks held during that brief period.
+ llvm::sys::ScopedReader FreeLock(FreeDIEsMutex);
+ bool HasCUDie = false;
+ if (extractCUDieIfNeeded(CUDieOnly, HasCUDie))
return Error::success();
+ // Right here is where the above-mentioned race condition exists.
+ return extractNonCUDIEsIfNeeded(HasCUDie);
+}
+// Helper used from the tryExtractDIEsIfNeeded function: it must already have
+// acquired the ExtractNonCUDIEsMutex for writing.
+Error DWARFUnit::extractNonCUDIEsHelper() {
+ // If CU DIE was just parsed, copy several attribute values from it.
DWARFDie UnitDie(this, &DieArray[0]);
if (std::optional<uint64_t> DWOId =
toUnsigned(UnitDie.find(DW_AT_GNU_dwo_id)))
@@ -653,6 +710,10 @@ bool DWARFUnit::parseDWO(StringRef DWOAlternativeLocation) {
}
void DWARFUnit::clearDIEs(bool KeepCUDie) {
+ // We need to acquire the FreeDIEsMutex lock in write-mode, because we are
+ // going to free the DIEs, when other threads might be trying to create them.
+ llvm::sys::ScopedWriter FreeLock(FreeDIEsMutex);
+
// Do not use resize() + shrink_to_fit() to free memory occupied by dies.
// shrink_to_fit() is a *non-binding* request to reduce capacity() to size().
// It depends on the implementation whether the request is fulfilled.
diff --git a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
index 601686fdd3dd5..e1b30648b6a77 100644
--- a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
+++ b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
@@ -587,6 +587,11 @@ Error DwarfTransformer::convert(uint32_t NumThreads, OutputAggregator &Out) {
DWARFDie Die = getDie(*CU);
CUInfo CUI(DICtx, dyn_cast<DWARFCompileUnit>(CU.get()));
handleDie(Out, CUI, Die);
+ // Release the line table, once we're done.
+ DICtx.clearLineTableForUnit(CU.get());
+ // Free any DIEs that were allocated by the DWARF parser.
+ // If/when they're needed by other CU's, they'll be recreated.
+ CU->clearDIEs(/*KeepCUDie=*/false);
}
} else {
// LLVM Dwarf parser is not thread-safe and we need to parse all DWARF up
@@ -612,11 +617,16 @@ Error DwarfTransformer::convert(uint32_t NumThreads, OutputAggregator &Out) {
DWARFDie Die = getDie(*CU);
if (Die) {
CUInfo CUI(DICtx, dyn_cast<DWARFCompileUnit>(CU.get()));
- pool.async([this, CUI, &LogMutex, &Out, Die]() mutable {
+ pool.async([this, CUI, &CU, &LogMutex, &Out, Die]() mutable {
std::string storage;
raw_string_ostream StrStream(storage);
OutputAggregator ThreadOut(Out.GetOS() ? &StrStream : nullptr);
handleDie(ThreadOut, CUI, Die);
+ // Release the line table once we're done.
+ DICtx.clearLineTableForUnit(CU.get());
+ // Free any DIEs that were allocated by the DWARF parser.
+ // If/when they're needed by other CU's, they'll be recreated.
+ CU->clearDIEs(/*KeepCUDie=*/false);
// Print ThreadLogStorage lines into an actual stream under a lock
std::lock_guard<std::mutex> guard(LogMutex);
if (Out.GetOS()) {
@@ -629,6 +639,9 @@ Error DwarfTransformer::convert(uint32_t NumThreads, OutputAggregator &Out) {
}
pool.wait();
}
+ // Now get rid of all the DIEs that may have been recreated
+ for (const auto &CU : DICtx.compile_units())
+ CU->clearDIEs(/*KeepCUDie=*/false);
size_t FunctionsAddedCount = Gsym.getNumFunctionInfos() - NumBefore;
Out << "Loaded " << FunctionsAddedCount << " functions from DWARF.\n";
return Error::success();
>From 5e564d97e3e99006e725b3ac514675a0157e9762 Mon Sep 17 00:00:00 2001
From: Michael Klemm <michael.klemm at amd.com>
Date: Tue, 2 Jul 2024 19:25:23 +0200
Subject: [PATCH 017/246] [Flang][runtime] Fix compilation errors introduced
with PR #96652 (#97442)
---
flang/runtime/time-intrinsic.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/flang/runtime/time-intrinsic.cpp b/flang/runtime/time-intrinsic.cpp
index ac372b249fa4b..7352dafc9136e 100644
--- a/flang/runtime/time-intrinsic.cpp
+++ b/flang/runtime/time-intrinsic.cpp
@@ -126,7 +126,7 @@ static constexpr inline unsigned_count_t GetHUGE(int kind) {
// Function converts a std::timespec_t into the desired count to
// be returned by the timing functions in accordance with the requested
// kind at the call site.
-count_t ConvertTimeSpecToCount(int kind, const std::timespec &tspec) {
+count_t ConvertTimeSpecToCount(int kind, const struct timespec &tspec) {
const unsigned_count_t huge{GetHUGE(kind)};
unsigned_count_t sec{static_cast<unsigned_count_t>(tspec.tv_sec)};
unsigned_count_t nsec{static_cast<unsigned_count_t>(tspec.tv_nsec)};
@@ -142,9 +142,9 @@ count_t ConvertTimeSpecToCount(int kind, const std::timespec &tspec) {
// This is the fallback implementation, which should work everywhere.
template <typename Unused = void>
count_t GetSystemClockCount(int kind, fallback_implementation) {
- std::timespec tspec;
+ struct timespec tspec;
- if (std::timespec_get(&tspec, TIME_UTC) < 0) {
+ if (timespec_get(&tspec, TIME_UTC) < 0) {
// Return -HUGE(COUNT) to represent failure.
return -static_cast<count_t>(GetHUGE(kind));
}
>From ebdb6f4ef4ba79eb73589fc96a64ce2c6994935d Mon Sep 17 00:00:00 2001
From: Arthur Eubanks <aeubanks at google.com>
Date: Tue, 2 Jul 2024 11:41:26 -0600
Subject: [PATCH 018/246] [MLInliner] Keep track of deleted functions (#97348)
As opposed to using Node::isDead(), which is no longer accurate after
#94815.
This is only used in diagnostics.
---
llvm/include/llvm/Analysis/MLInlineAdvisor.h | 1 +
llvm/lib/Analysis/MLInlineAdvisor.cpp | 10 +++++++---
2 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/llvm/include/llvm/Analysis/MLInlineAdvisor.h b/llvm/include/llvm/Analysis/MLInlineAdvisor.h
index 2aa077fe0e035..0333f457c1a2d 100644
--- a/llvm/include/llvm/Analysis/MLInlineAdvisor.h
+++ b/llvm/include/llvm/Analysis/MLInlineAdvisor.h
@@ -89,6 +89,7 @@ class MLInlineAdvisor : public InlineAdvisor {
int32_t CurrentIRSize = 0;
llvm::SmallPtrSet<const LazyCallGraph::Node *, 1> NodesInLastSCC;
DenseSet<const LazyCallGraph::Node *> AllNodes;
+ DenseSet<Function *> DeadFunctions;
bool ForceStop = false;
ProfileSummaryInfo &PSI;
};
diff --git a/llvm/lib/Analysis/MLInlineAdvisor.cpp b/llvm/lib/Analysis/MLInlineAdvisor.cpp
index 652f0d994b29c..8131acb3f0df3 100644
--- a/llvm/lib/Analysis/MLInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/MLInlineAdvisor.cpp
@@ -311,11 +311,13 @@ void MLInlineAdvisor::onSuccessfulInlining(const MLInlineAdvice &Advice,
int64_t NewCallerAndCalleeEdges =
getCachedFPI(*Caller).DirectCallsToDefinedFunctions;
- if (CalleeWasDeleted)
+ if (CalleeWasDeleted) {
--NodeCount;
- else
+ DeadFunctions.insert(Callee);
+ } else {
NewCallerAndCalleeEdges +=
getCachedFPI(*Callee).DirectCallsToDefinedFunctions;
+ }
EdgeCount += (NewCallerAndCalleeEdges - Advice.CallerAndCalleeEdges);
assert(CurrentIRSize >= 0 && EdgeCount >= 0 && NodeCount >= 0);
}
@@ -493,7 +495,9 @@ void MLInlineAdvisor::print(raw_ostream &OS) const {
OS << "\n";
OS << "[MLInlineAdvisor] FuncLevels:\n";
for (auto I : FunctionLevels)
- OS << (I.first->isDead() ? "<deleted>" : I.first->getFunction().getName())
+ OS << (DeadFunctions.contains(&I.first->getFunction())
+ ? "<deleted>"
+ : I.first->getFunction().getName())
<< " : " << I.second << "\n";
OS << "\n";
>From bf5a2a99b186216e4126dae3b16851f16c50603f Mon Sep 17 00:00:00 2001
From: Pete Steinfeld <47540744+psteinfeld at users.noreply.github.com>
Date: Tue, 2 Jul 2024 10:46:15 -0700
Subject: [PATCH 019/246] [flang] Fix build problem caused by #96807 (#97450)
I had erroneously used a bitwise '&' with boolean operands which caused
a warning on the ARM build. This change fixes that.
---
flang/lib/Evaluate/intrinsics-library.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/flang/lib/Evaluate/intrinsics-library.cpp b/flang/lib/Evaluate/intrinsics-library.cpp
index c3bd3a501c0c5..65636b9956e78 100644
--- a/flang/lib/Evaluate/intrinsics-library.cpp
+++ b/flang/lib/Evaluate/intrinsics-library.cpp
@@ -807,7 +807,7 @@ static bool VerifyAtan2LikeArguments(
template <ArgumentVerifierFunc... F>
static bool CombineVerifiers(
const std::vector<Expr<SomeType>> &args, FoldingContext &context) {
- return (... & F(args, context));
+ return (... && F(args, context));
}
/// Define argument names to be used error messages when the intrinsic have
>From e852725e5d517195de247f30b62ad2c56717958a Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Fri, 23 Nov 2018 16:20:56 -0500
Subject: [PATCH 020/246] Support: Fix typo in comment
---
llvm/lib/Support/Unix/Signals.inc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Support/Unix/Signals.inc b/llvm/lib/Support/Unix/Signals.inc
index 792b0fd66b45d..298fde1a387cc 100644
--- a/llvm/lib/Support/Unix/Signals.inc
+++ b/llvm/lib/Support/Unix/Signals.inc
@@ -289,7 +289,7 @@ static void CreateSigAltStack() {}
static void RegisterHandlers() { // Not signal-safe.
// The mutex prevents other threads from registering handlers while we're
// doing it. We also have to protect the handlers and their count because
- // a signal handler could fire while we're registeting handlers.
+ // a signal handler could fire while we're registering handlers.
static ManagedStatic<sys::SmartMutex<true>> SignalHandlerRegistrationMutex;
sys::SmartScopedLock<true> Guard(*SignalHandlerRegistrationMutex);
>From fdd319655359b005889abf40d1d8a54fbd56059e Mon Sep 17 00:00:00 2001
From: Fangrui Song <i at maskray.me>
Date: Tue, 2 Jul 2024 10:58:24 -0700
Subject: [PATCH 021/246] [ELF] Make start/stop symbols retain associated
discardable output sections
An empty output section specified in the `SECTIONS` command (e.g.
`empty : { *(empty) }`) may be discarded. Due to phase ordering, we
might define `__start_empty`/`__stop_empty` symbols with incorrect
section indexes (usually benign, but could go out of bounds and cause
`readelf -s` to print `BAD`).
```
finalizeSections
addStartStopSymbols // __start_empty is defined
// __start_empty is added to .symtab
sortSections
adjustOutputSections // `empty` is discarded
writeSections
// __start_empty is Defined with an invalid section index
```
Loaders use `st_value` members of the start/stop symbols and expect no
"undefined symbol" linker error, but do not particularly care whether
the symbols are defined or undefined. Let's retain the associated empty
output section so that start/stop symbols will have correct section
indexes.
The approach allows us to remove `LinkerScript::isDiscarded`
(https://reviews.llvm.org/D114179). Also delete the
`findSection(".text")` special case from https://reviews.llvm.org/D46200,
which is unnecessary even before this patch (`elfHeader` would be fine
even with very large executables).
Note: we should be careful not to unnecessarily retain .ARM.exidx, which
would create an empty PT_ARM_EXIDX. ~40 tests would need to be updated.
---
An alternative is to discard the empty output section and keep the
start/stop symbols undefined. This approach needs more code and requires
`LinkerScript::isDiscarded` before we discard empty sections in
``adjustOutputSections`.
Pull Request: https://github.com/llvm/llvm-project/pull/96343
---
lld/ELF/LinkerScript.cpp | 5 --
lld/ELF/LinkerScript.h | 2 -
lld/ELF/Writer.cpp | 50 ++++++++-----------
...st => empty-preinit-array-start-stop.test} | 9 +++-
.../empty-section-start-stop.test | 36 +++++++++++++
lld/test/ELF/linkerscript/sections-gc2.s | 1 +
lld/test/ELF/pre_init_fini_array_missing.s | 29 +++++------
7 files changed, 78 insertions(+), 54 deletions(-)
rename lld/test/ELF/linkerscript/{preinit-array-empty.test => empty-preinit-array-start-stop.test} (83%)
create mode 100644 lld/test/ELF/linkerscript/empty-section-start-stop.test
diff --git a/lld/ELF/LinkerScript.cpp b/lld/ELF/LinkerScript.cpp
index 1ec796a3bdd99..e2208da18dce0 100644
--- a/lld/ELF/LinkerScript.cpp
+++ b/lld/ELF/LinkerScript.cpp
@@ -1184,11 +1184,6 @@ static bool isDiscardable(const OutputSection &sec) {
return true;
}
-bool LinkerScript::isDiscarded(const OutputSection *sec) const {
- return hasSectionsCommand && (getFirstInputSection(sec) == nullptr) &&
- isDiscardable(*sec);
-}
-
static void maybePropagatePhdrs(OutputSection &sec,
SmallVector<StringRef, 0> &phdrs) {
if (sec.phdrs.empty()) {
diff --git a/lld/ELF/LinkerScript.h b/lld/ELF/LinkerScript.h
index db26d026a00ba..43d0850eed718 100644
--- a/lld/ELF/LinkerScript.h
+++ b/lld/ELF/LinkerScript.h
@@ -342,8 +342,6 @@ class LinkerScript final {
void processSymbolAssignments();
void declareSymbols();
- bool isDiscarded(const OutputSection *sec) const;
-
// Used to handle INSERT AFTER statements.
void processInsertCommands();
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index ba794310905de..bce3cd2de7ed2 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -2064,33 +2064,21 @@ template <class ELFT> void Writer<ELFT>::checkExecuteOnly() {
// The linker is expected to define SECNAME_start and SECNAME_end
// symbols for a few sections. This function defines them.
template <class ELFT> void Writer<ELFT>::addStartEndSymbols() {
- // If a section does not exist, there's ambiguity as to how we
- // define _start and _end symbols for an init/fini section. Since
- // the loader assume that the symbols are always defined, we need to
- // always define them. But what value? The loader iterates over all
- // pointers between _start and _end to run global ctors/dtors, so if
- // the section is empty, their symbol values don't actually matter
- // as long as _start and _end point to the same location.
- //
- // That said, we don't want to set the symbols to 0 (which is
- // probably the simplest value) because that could cause some
- // program to fail to link due to relocation overflow, if their
- // program text is above 2 GiB. We use the address of the .text
- // section instead to prevent that failure.
- //
- // In rare situations, the .text section may not exist. If that's the
- // case, use the image base address as a last resort.
- OutputSection *Default = findSection(".text");
- if (!Default)
- Default = Out::elfHeader;
-
+ // If the associated output section does not exist, there is ambiguity as to
+ // how we define _start and _end symbols for an init/fini section. Users
+ // expect no "undefined symbol" linker errors and loaders expect equal
+ // st_value but do not particularly care whether the symbols are defined or
+ // not. We retain the output section so that the section indexes will be
+ // correct.
auto define = [=](StringRef start, StringRef end, OutputSection *os) {
- if (os && !script->isDiscarded(os)) {
- addOptionalRegular(start, os, 0);
- addOptionalRegular(end, os, -1);
+ if (os) {
+ Defined *startSym = addOptionalRegular(start, os, 0);
+ Defined *stopSym = addOptionalRegular(end, os, -1);
+ if (startSym || stopSym)
+ os->usedInExpression = true;
} else {
- addOptionalRegular(start, Default, 0);
- addOptionalRegular(end, Default, 0);
+ addOptionalRegular(start, Out::elfHeader, 0);
+ addOptionalRegular(end, Out::elfHeader, 0);
}
};
@@ -2098,6 +2086,8 @@ template <class ELFT> void Writer<ELFT>::addStartEndSymbols() {
define("__init_array_start", "__init_array_end", Out::initArray);
define("__fini_array_start", "__fini_array_end", Out::finiArray);
+ // As a special case, don't unnecessarily retain .ARM.exidx, which would
+ // create an empty PT_ARM_EXIDX.
if (OutputSection *sec = findSection(".ARM.exidx"))
define("__exidx_start", "__exidx_end", sec);
}
@@ -2112,10 +2102,12 @@ void Writer<ELFT>::addStartStopSymbols(OutputSection &osec) {
StringRef s = osec.name;
if (!isValidCIdentifier(s))
return;
- addOptionalRegular(saver().save("__start_" + s), &osec, 0,
- config->zStartStopVisibility);
- addOptionalRegular(saver().save("__stop_" + s), &osec, -1,
- config->zStartStopVisibility);
+ Defined *startSym = addOptionalRegular(saver().save("__start_" + s), &osec, 0,
+ config->zStartStopVisibility);
+ Defined *stopSym = addOptionalRegular(saver().save("__stop_" + s), &osec, -1,
+ config->zStartStopVisibility);
+ if (startSym || stopSym)
+ osec.usedInExpression = true;
}
static bool needsPtLoad(OutputSection *sec) {
diff --git a/lld/test/ELF/linkerscript/preinit-array-empty.test b/lld/test/ELF/linkerscript/empty-preinit-array-start-stop.test
similarity index 83%
rename from lld/test/ELF/linkerscript/preinit-array-empty.test
rename to lld/test/ELF/linkerscript/empty-preinit-array-start-stop.test
index 696c8ddb622dd..0f6d2e4c0f6a8 100644
--- a/lld/test/ELF/linkerscript/preinit-array-empty.test
+++ b/lld/test/ELF/linkerscript/empty-preinit-array-start-stop.test
@@ -3,7 +3,8 @@
# RUN: llvm-mc -filetype=obj -triple=x86_64 %t/t.s -o %t.o
## PR52534: https://bugs.llvm.org/show_bug.cgi?id=52534
-## Check case where .preinit_array is discarded.
+## Check case where .preinit_array would be discarded in the absence of the
+## start/stop symbols.
## Link should succeed without causing an out of range relocation error.
# RUN: ld.lld -T %t/discarded.script %t.o -o %t1 --image-base=0x80000000
# RUN: llvm-readelf -s %t1 | FileCheck --check-prefixes=CHECK,DISCARDED %s
@@ -15,7 +16,7 @@
# CHECK: [[#%x,ADDR:]] 0 NOTYPE LOCAL HIDDEN [[#]] __preinit_array_start
# CHECK-NEXT: [[#ADDR]] 0 NOTYPE LOCAL HIDDEN [[#]] __preinit_array_end
-# DISCARDED-NEXT: [[#ADDR]] 0 NOTYPE GLOBAL DEFAULT [[#]] _start
+# DISCARDED-NEXT: {{0*}}[[#ADDR-14]] 0 NOTYPE GLOBAL DEFAULT [[#]] _start
# EMPTY-NOT: [[#ADDR]] 0 NOTYPE GLOBAL DEFAULT [[#]] _start
# EMPTY: [[#ADDR]] 0 NOTYPE GLOBAL DEFAULT [[#]] ADDR
@@ -26,8 +27,12 @@ _start:
movq __preinit_array_start at GOTPCREL(%rip),%rax
movq __preinit_array_end at GOTPCREL(%rip),%rax
+.section .rodata,"a"
+.byte 0
+
#--- discarded.script
SECTIONS {
+ .rodata : { *(.rodata); }
.text : { *(.text); }
.preinit_array : { *(.preinit_array); }
}
diff --git a/lld/test/ELF/linkerscript/empty-section-start-stop.test b/lld/test/ELF/linkerscript/empty-section-start-stop.test
new file mode 100644
index 0000000000000..a32a936e644f2
--- /dev/null
+++ b/lld/test/ELF/linkerscript/empty-section-start-stop.test
@@ -0,0 +1,36 @@
+# REQUIRES: x86
+## __start/__stop symbols retain the associated empty sections with C identifier names.
+
+# RUN: rm -rf %t && split-file %s %t
+# RUN: llvm-mc -filetype=obj -triple=x86_64 %t/test.s -o %t.o
+# RUN: ld.lld -T %t/ldscript -o %t.out %t.o -z start-stop-visibility=default
+# RUN: llvm-objdump -h -t %t.out | FileCheck %s
+
+# CHECK: .text
+# CHECK-NEXT: empty1
+# CHECK-NEXT: empty2
+# CHECK-NEXT: empty3
+
+# CHECK: [[#%x,ADDR:]] l empty1 0000000000000000 .hidden __start_empty1
+# CHECK-NEXT: {{0*}}[[#ADDR]] g empty2 0000000000000000 .protected __stop_empty2
+# CHECK-NEXT: {{0*}}[[#ADDR]] g empty3 0000000000000000 __stop_empty3
+
+#--- ldscript
+SECTIONS {
+ .text : { *(.text .text.*) }
+ empty0 : { *(empty0) }
+ empty1 : { *(empty1) }
+ empty2 : { *(empty2) }
+ empty3 : { *(empty3) }
+}
+
+#--- test.s
+.weak __start_empty1, __stop_empty2, __stop_empty3
+.hidden __start_empty1
+.protected __stop_empty2
+
+.globl _start
+_start:
+ movq __start_empty1 at GOTPCREL(%rip),%rax
+ movq __stop_empty2 at GOTPCREL(%rip),%rax
+ movq __stop_empty3 at GOTPCREL(%rip),%rax
diff --git a/lld/test/ELF/linkerscript/sections-gc2.s b/lld/test/ELF/linkerscript/sections-gc2.s
index 76be65fbdb4eb..c2611a7746965 100644
--- a/lld/test/ELF/linkerscript/sections-gc2.s
+++ b/lld/test/ELF/linkerscript/sections-gc2.s
@@ -11,6 +11,7 @@
# CHECK: Idx Name Size VMA Type
# CHECK-NEXT: 0
# CHECK-NEXT: used_in_reloc
+# CHECK-NEXT: used_in_script
# CHECK-NEXT: .text
# CHECK-NEXT: .comment
# CHECK-NEXT: .symtab
diff --git a/lld/test/ELF/pre_init_fini_array_missing.s b/lld/test/ELF/pre_init_fini_array_missing.s
index 22cf5fe9e2bea..a1c2e5da6b6b6 100644
--- a/lld/test/ELF/pre_init_fini_array_missing.s
+++ b/lld/test/ELF/pre_init_fini_array_missing.s
@@ -14,29 +14,26 @@ _start:
call __fini_array_start
call __fini_array_end
-// With no .init_array section the symbols resolve to .text.
-// 0x201120 - (0x201120 + 5) = -5
-// 0x201120 - (0x201125 + 5) = -10
-// ...
+/// Due to __init_array_start/__init_array_end, .init_array is retained.
// CHECK: Disassembly of section .text:
// CHECK-EMPTY:
// CHECK-NEXT: <_start>:
-// CHECK-NEXT: 201120: callq 0x201120
-// CHECK-NEXT: callq 0x201120
-// CHECK-NEXT: callq 0x201120
-// CHECK-NEXT: callq 0x201120
-// CHECK-NEXT: callq 0x201120
-// CHECK-NEXT: callq 0x201120
+// CHECK-NEXT: 201120: callq 0x200000
+// CHECK-NEXT: callq 0x200000
+// CHECK-NEXT: callq 0x200000
+// CHECK-NEXT: callq 0x200000
+// CHECK-NEXT: callq 0x200000
+// CHECK-NEXT: callq 0x200000
// In position-independent binaries, they resolve to .text too.
// PIE: Disassembly of section .text:
// PIE-EMPTY:
// PIE-NEXT: <_start>:
-// PIE-NEXT: 1210: callq 0x1210
-// PIE-NEXT: callq 0x1210
-// PIE-NEXT: callq 0x1210
-// PIE-NEXT: callq 0x1210
-// PIE-NEXT: callq 0x1210
-// PIE-NEXT: callq 0x1210
+// PIE-NEXT: 1210: callq 0x0
+// PIE-NEXT: callq 0x0
+// PIE-NEXT: callq 0x0
+// PIE-NEXT: callq 0x0
+// PIE-NEXT: callq 0x0
+// PIE-NEXT: callq 0x0
>From 5f1743cd074cc7d45744d1acc8db379513b4501c Mon Sep 17 00:00:00 2001
From: Fangrui Song <i at maskray.me>
Date: Tue, 2 Jul 2024 11:00:42 -0700
Subject: [PATCH 022/246] [ELF] Infer EI_OSABI from object files
The first object file whose EI_OSABI is not ELFOSABI_NONE is selected.
This is useful for some OSes to identify themselves. This achieves
similar effects to BFD emulations `ld.lld -m *_fbsd` but is more
lightweight.
Pull Request: https://github.com/llvm/llvm-project/pull/97144
---
lld/ELF/Driver.cpp | 16 +++++++++++-----
lld/test/ELF/basic-freebsd.s | 15 ++++++++++++---
2 files changed, 23 insertions(+), 8 deletions(-)
diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index ed773f5e69f77..73e260073da0c 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -2021,16 +2021,22 @@ void LinkerDriver::inferMachineType() {
if (config->ekind != ELFNoneKind)
return;
+ bool inferred = false;
for (InputFile *f : files) {
if (f->ekind == ELFNoneKind)
continue;
- config->ekind = f->ekind;
- config->emachine = f->emachine;
+ if (!inferred) {
+ inferred = true;
+ config->ekind = f->ekind;
+ config->emachine = f->emachine;
+ config->mipsN32Abi = config->emachine == EM_MIPS && isMipsN32Abi(f);
+ }
config->osabi = f->osabi;
- config->mipsN32Abi = config->emachine == EM_MIPS && isMipsN32Abi(f);
- return;
+ if (f->osabi != ELFOSABI_NONE)
+ return;
}
- error("target emulation unknown: -m or at least one .o file required");
+ if (!inferred)
+ error("target emulation unknown: -m or at least one .o file required");
}
// Parse -z max-page-size=<value>. The default value is defined by
diff --git a/lld/test/ELF/basic-freebsd.s b/lld/test/ELF/basic-freebsd.s
index 078f974424463..a34f568bbcf81 100644
--- a/lld/test/ELF/basic-freebsd.s
+++ b/lld/test/ELF/basic-freebsd.s
@@ -1,15 +1,24 @@
# REQUIRES: x86
# Verify that OSABI is set to the correct value.
-# RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-freebsd %s -o %t
-# RUN: ld.lld %t -o %t2
-# RUN: llvm-readobj --file-headers %t2 | FileCheck %s
+# RUN: rm -rf %t && split-file %s %t && cd %t
+# RUN: llvm-mc -filetype=obj -triple=x86_64 empty.s -o empty.o
+# RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-freebsd a.s -o a.o
+# RUN: llvm-mc -filetype=obj -triple=x86_64-linux gnu.s -o gnu.o
+# RUN: ld.lld a.o -o out
+# RUN: llvm-readobj --file-headers out | FileCheck %s
+# RUN: ld.lld empty.o a.o gnu.o empty.o -o out2
+# RUN: llvm-readobj --file-headers out2 | FileCheck %s
+#--- empty.s
+#--- a.s
.globl _start
_start:
mov $1, %rax
mov $42, %rdi
syscall
+#--- gnu.s
+.section retain,"aR"
# CHECK: ElfHeader {
# CHECK-NEXT: Ident {
>From 0fb3351524acd48c62c06c57ed28cc423db4e99e Mon Sep 17 00:00:00 2001
From: Sam Clegg <sbc at chromium.org>
Date: Tue, 2 Jul 2024 11:23:15 -0700
Subject: [PATCH 023/246] [lld][WebAssembly] Fix for --import-table when
combined with reference types (#97451)
When reference types are enabled clang will generate call_indirect
instructions that explicitly reference the global
`__indirect_function_table` symbol.
In this case the resulting global symbol was not being correctly marked
with explicit import name/module, resulting in the linker reporting
errors when it was referenced.
This issue was reported in
https://github.com/WebAssembly/tool-conventions/issues/158
---
lld/test/wasm/import-table-explicit.s | 26 +++++++++++++++++++
.../{import-table.test => import-table.s} | 16 ++++++------
lld/wasm/SymbolTable.cpp | 9 ++++---
3 files changed, 40 insertions(+), 11 deletions(-)
create mode 100644 lld/test/wasm/import-table-explicit.s
rename lld/test/wasm/{import-table.test => import-table.s} (63%)
diff --git a/lld/test/wasm/import-table-explicit.s b/lld/test/wasm/import-table-explicit.s
new file mode 100644
index 0000000000000..1dc21beba0629
--- /dev/null
+++ b/lld/test/wasm/import-table-explicit.s
@@ -0,0 +1,26 @@
+# RUN: llvm-mc -mattr=+reference-types -filetype=obj -triple=wasm32-unknown-unknown %s -o %t.o
+# RUN: wasm-ld --import-table -o %t.wasm %t.o
+# RUN: obj2yaml %t.wasm | FileCheck %s
+
+.globl __indirect_function_table
+.tabletype __indirect_function_table, funcref
+
+.globl _start
+_start:
+ .functype _start () -> ()
+ i32.const 1
+ call_indirect __indirect_function_table, () -> ()
+ end_function
+
+# Verify the --import-table flag creates a table import
+
+# CHECK: - Type: IMPORT
+# CHECK-NEXT: Imports:
+# CHECK-NEXT: - Module: env
+# CHECK-NEXT: Field: __indirect_function_table
+# CHECK-NEXT: Kind: TABLE
+# CHECK-NEXT: Table:
+# CHECK-NEXT: Index: 0
+# CHECK-NEXT: ElemType: FUNCREF
+# CHECK-NEXT: Limits:
+# CHECK-NEXT: Minimum: 0x1
diff --git a/lld/test/wasm/import-table.test b/lld/test/wasm/import-table.s
similarity index 63%
rename from lld/test/wasm/import-table.test
rename to lld/test/wasm/import-table.s
index 73dc7189bbf28..7a0c94d130276 100644
--- a/lld/test/wasm/import-table.test
+++ b/lld/test/wasm/import-table.s
@@ -1,14 +1,14 @@
-# RUN: llvm-mc -filetype=obj -triple=wasm32-unknown-unknown %p/Inputs/start.s -o %t.start.o
# RUN: llvm-mc -filetype=obj -triple=wasm32-unknown-unknown %s -o %t.o
-# RUN: wasm-ld --export-all --import-table -o %t.wasm %t.start.o %t.o
+# RUN: wasm-ld --export-all --import-table -o %t.wasm %t.o
# RUN: obj2yaml %t.wasm | FileCheck %s
-.globl require_function_table
-require_function_table:
-.functype require_function_table () -> ()
- i32.const 1
- call_indirect () -> ()
- end_function
+.globl _start
+_start:
+ .functype _start () -> ()
+ i32.const 1
+ # call_indirect instruction implicitly references the function table
+ call_indirect () -> ()
+ end_function
# Verify the --import-table flag creates a table import
diff --git a/lld/wasm/SymbolTable.cpp b/lld/wasm/SymbolTable.cpp
index 00c347ea3ef24..081f811cd139d 100644
--- a/lld/wasm/SymbolTable.cpp
+++ b/lld/wasm/SymbolTable.cpp
@@ -681,10 +681,10 @@ TableSymbol *SymbolTable::createUndefinedIndirectFunctionTable(StringRef name) {
WasmTableType *type = make<WasmTableType>();
type->ElemType = ValType::FUNCREF;
type->Limits = limits;
- StringRef module(defaultModule);
uint32_t flags = config->exportTable ? 0 : WASM_SYMBOL_VISIBILITY_HIDDEN;
flags |= WASM_SYMBOL_UNDEFINED;
- Symbol *sym = addUndefinedTable(name, name, module, flags, nullptr, type);
+ Symbol *sym =
+ addUndefinedTable(name, name, defaultModule, flags, nullptr, type);
sym->markLive();
sym->forceExport = config->exportTable;
return cast<TableSymbol>(sym);
@@ -724,8 +724,11 @@ TableSymbol *SymbolTable::resolveIndirectFunctionTable(bool required) {
}
if (config->importTable) {
- if (existing)
+ if (existing) {
+ existing->importModule = defaultModule;
+ existing->importName = functionTableName;
return cast<TableSymbol>(existing);
+ }
if (required)
return createUndefinedIndirectFunctionTable(functionTableName);
} else if ((existing && existing->isLive()) || config->exportTable ||
>From 4d8883759432bb5fe6c5039b88c035d127ce948d Mon Sep 17 00:00:00 2001
From: Sam Clegg <sbc at chromium.org>
Date: Tue, 2 Jul 2024 11:23:41 -0700
Subject: [PATCH 024/246] [lld][WebAssembly] Fix filename when reporting
references to undefined symbols (#97444)
When an undefined symbol is referenced from more than one file we were
reporting all undefined symbols as originating from just one of them.
This came up while working on
https://github.com/WebAssembly/tool-conventions/issues/158 where
undefined symbols in one object file were being reported as coming from
another.
---
lld/test/wasm/unresolved-symbols.s | 53 +++++++++++++++++++-----------
lld/wasm/Relocations.cpp | 8 ++---
2 files changed, 38 insertions(+), 23 deletions(-)
diff --git a/lld/test/wasm/unresolved-symbols.s b/lld/test/wasm/unresolved-symbols.s
index 6e183e1878b3a..7367e6fddf76a 100644
--- a/lld/test/wasm/unresolved-symbols.s
+++ b/lld/test/wasm/unresolved-symbols.s
@@ -1,30 +1,34 @@
-# RUN: llvm-mc -filetype=obj -triple=wasm32-unknown-unknown %s -o %t1.o
+# RUN: split-file %s %t
+# RUN: llvm-mc -filetype=obj -triple=wasm32-unknown-unknown %t/main.s -o %t/main.o
+# RUN: llvm-mc -filetype=obj -triple=wasm32-unknown-unknown %t/secondary.s -o %t/secondary.o
-## Check that %t1.o contains undefined symbol undef_func.
-# RUN: not wasm-ld %t1.o -o /dev/null 2>&1 | \
+## Check that both main.o and secondary.o contain references to the same
+## undefined function and that both are correctly reported.
+# RUN: not wasm-ld --no-gc-sections %t/main.o %t/secondary.o -o /dev/null 2>&1 | \
# RUN: FileCheck -check-prefix=ERRUND %s
-# ERRUND: error: {{.*}}1.o: undefined symbol: undef_func
+# ERRUND: error: {{.*}}main.o: undefined symbol: undef_func
+# ERRUND: error: {{.*}}secondary.o: undefined symbol: undef_func
## report-all is the default one. Check that we get the same error
-# RUN: not wasm-ld %t1.o -o /dev/null --unresolved-symbols=report-all 2>&1 | \
+# RUN: not wasm-ld --no-gc-sections %t/main.o %t/secondary.o -o /dev/null --unresolved-symbols=report-all 2>&1 | \
# RUN: FileCheck -check-prefix=ERRUND %s
## Error out if unknown option value was set.
-# RUN: not wasm-ld %t1.o -o /dev/null --unresolved-symbols=xxx 2>&1 | \
+# RUN: not wasm-ld %t/main.o -o /dev/null --unresolved-symbols=xxx 2>&1 | \
# RUN: FileCheck -check-prefix=ERR1 %s
# ERR1: unknown --unresolved-symbols value: xxx
## Check alias.
-# RUN: not wasm-ld %t1.o -o /dev/null --unresolved-symbols xxx 2>&1 | \
+# RUN: not wasm-ld %t/main.o -o /dev/null --unresolved-symbols xxx 2>&1 | \
# RUN: FileCheck -check-prefix=ERR1 %s
## Ignore all should not produce error and should not produce
## any imports. It should create a stub function in the place of the missing
## function symbol.
-# RUN: wasm-ld %t1.o -o %t2.wasm --unresolved-symbols=ignore-all
+# RUN: wasm-ld %t/main.o -o %t2.wasm --unresolved-symbols=ignore-all
# RUN: obj2yaml %t2.wasm | FileCheck -check-prefix=IGNORE %s
## --warn-unresolved-symbols should behave the same
-# RUN: wasm-ld %t1.o -o %t2.wasm --warn-unresolved-symbols
+# RUN: wasm-ld %t/main.o -o %t2.wasm --warn-unresolved-symbols
# RUN: obj2yaml %t2.wasm | FileCheck -check-prefix=IGNORE %s
# IGNORE-NOT: - Type: IMPORT
@@ -61,7 +65,7 @@
## by importing them but still report errors/warning for missing data symbols.
## `--allow-undefined` should behave like `--import-undefined` +
## `--unresolve-symbols=ignore`
-# RUN: wasm-ld %t1.o -o %t3.wasm --import-undefined --unresolved-symbols=ignore-all
+# RUN: wasm-ld %t/main.o -o %t3.wasm --import-undefined --unresolved-symbols=ignore-all
# RUN: obj2yaml %t3.wasm | FileCheck -check-prefix=IMPORT %s
# IMPORT: - Type: IMPORT
# IMPORT-NEXT: Imports:
@@ -72,23 +76,25 @@
# IMPORT-NEXT: - Type: FUNCTION
## Check that --import-undefined reports unresolved data symbols.
-# RUN: not wasm-ld %t1.o -o %t3.wasm --import-undefined --unresolved-symbols=report-all 2>&1 | FileCheck -check-prefix=IMPORTUNDEFINED %s
-# IMPORTUNDEFINED-NOT: error: {{.*}}1.o: undefined symbol: undef_func
-# IMPORTUNDEFINED: error: {{.*}}1.o: undefined symbol: undef_data
+# RUN: not wasm-ld %t/main.o -o %t3.wasm --import-undefined --unresolved-symbols=report-all 2>&1 | FileCheck -check-prefix=IMPORTUNDEFINED %s
+# IMPORTUNDEFINED-NOT: error: {{.*}}main.o: undefined symbol: undef_func
+# IMPORTUNDEFINED: error: {{.*}}main.o: undefined symbol: undef_data
## Do not report undefines if linking relocatable.
-# RUN: wasm-ld -r %t1.o -o %t4.wasm --unresolved-symbols=report-all
+# RUN: wasm-ld -r %t/main.o -o %t4.wasm --unresolved-symbols=report-all
# RUN: llvm-readobj %t4.wasm > /dev/null 2>&1
-.functype undef_func () -> ()
-.functype get_data_addr () -> (i32)
-.functype get_func_addr () -> (i32)
-
## import-dynamic should fail due to incompatible relocations.
-# RUN: not wasm-ld %t1.o -o %t5.wasm --unresolved-symbols=import-dynamic 2>&1 | FileCheck -check-prefix=ERRNOPIC %s
+# RUN: not wasm-ld %t/main.o -o %t5.wasm --unresolved-symbols=import-dynamic 2>&1 | FileCheck -check-prefix=ERRNOPIC %s
# ERRNOPIC: relocation R_WASM_MEMORY_ADDR_SLEB cannot be used against symbol `undef_data`; recompile with -fPIC
# ERRNOPIC: relocation R_WASM_TABLE_INDEX_SLEB cannot be used against symbol `undef_func`; recompile with -fPIC
+#--- main.s
+
+.functype undef_func () -> ()
+.functype get_data_addr () -> (i32)
+.functype get_func_addr () -> (i32)
+
.globl _start
_start:
.functype _start () -> ()
@@ -112,3 +118,12 @@ get_func_addr:
i32.const undef_func
return
end_function
+
+#--- secondary.s
+
+.functype undef_func () -> ()
+.globl foo
+foo:
+ .functype foo () -> ()
+ call undef_func
+ end_function
diff --git a/lld/wasm/Relocations.cpp b/lld/wasm/Relocations.cpp
index 09b0a24ff011a..09861319e77dc 100644
--- a/lld/wasm/Relocations.cpp
+++ b/lld/wasm/Relocations.cpp
@@ -42,14 +42,14 @@ static bool allowUndefined(const Symbol* sym) {
return config->allowUndefinedSymbols.count(sym->getName()) != 0;
}
-static void reportUndefined(Symbol *sym) {
+static void reportUndefined(ObjFile *file, Symbol *sym) {
if (!allowUndefined(sym)) {
switch (config->unresolvedSymbols) {
case UnresolvedPolicy::ReportError:
- error(toString(sym->getFile()) + ": undefined symbol: " + toString(*sym));
+ error(toString(file) + ": undefined symbol: " + toString(*sym));
break;
case UnresolvedPolicy::Warn:
- warn(toString(sym->getFile()) + ": undefined symbol: " + toString(*sym));
+ warn(toString(file) + ": undefined symbol: " + toString(*sym));
break;
case UnresolvedPolicy::Ignore:
LLVM_DEBUG(dbgs() << "ignoring undefined symbol: " + toString(*sym) +
@@ -171,7 +171,7 @@ void scanRelocations(InputChunk *chunk) {
}
} else if (sym->isUndefined() && !config->relocatable && !sym->isWeak()) {
// Report undefined symbols
- reportUndefined(sym);
+ reportUndefined(file, sym);
}
}
}
>From 3c50cbfda4fc3ad85349167132f7ed809ecc685a Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Tue, 2 Jul 2024 13:23:53 -0500
Subject: [PATCH 025/246] [DeviceRTL] Make defined 'libc' functions weak in
OpenMP (#97356)
Summary:
These functions provide special-case implementations internal to the
OpenMP device runtime. This can potentially conflict with the symbols
pulled in from the actual GPU `libc`. This patch makes these weak, so in
the case that the GPU libc functions exist they will be overridden. This
should not impact performance in the average case because the old
`-mlink-builtin-bitcode` version does internalization, deleting weak,
and the new LTO path will resolve to the strong reference and then
internalize it.
---
offload/DeviceRTL/src/Debug.cpp | 4 ++--
offload/DeviceRTL/src/LibC.cpp | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/offload/DeviceRTL/src/Debug.cpp b/offload/DeviceRTL/src/Debug.cpp
index 31cd54e3de35c..4e16591cc6c51 100644
--- a/offload/DeviceRTL/src/Debug.cpp
+++ b/offload/DeviceRTL/src/Debug.cpp
@@ -26,8 +26,8 @@ using namespace ompx;
extern "C" {
void __assert_assume(bool condition) { __builtin_assume(condition); }
-void __assert_fail(const char *expr, const char *file, unsigned line,
- const char *function) {
+[[gnu::weak]] void __assert_fail(const char *expr, const char *file,
+ unsigned line, const char *function) {
__assert_fail_internal(expr, nullptr, file, line, function);
}
void __assert_fail_internal(const char *expr, const char *msg, const char *file,
diff --git a/offload/DeviceRTL/src/LibC.cpp b/offload/DeviceRTL/src/LibC.cpp
index e587c3057f5ba..4bca5d29643fe 100644
--- a/offload/DeviceRTL/src/LibC.cpp
+++ b/offload/DeviceRTL/src/LibC.cpp
@@ -49,7 +49,7 @@ int32_t omp_vprintf(const char *Format, void *Arguments, uint32_t) {
extern "C" {
-int memcmp(const void *lhs, const void *rhs, size_t count) {
+[[gnu::weak]] int memcmp(const void *lhs, const void *rhs, size_t count) {
auto *L = reinterpret_cast<const unsigned char *>(lhs);
auto *R = reinterpret_cast<const unsigned char *>(rhs);
@@ -60,7 +60,7 @@ int memcmp(const void *lhs, const void *rhs, size_t count) {
return 0;
}
-void memset(void *dst, int C, size_t count) {
+[[gnu::weak]] void memset(void *dst, int C, size_t count) {
auto *dstc = reinterpret_cast<char *>(dst);
for (size_t I = 0; I < count; ++I)
dstc[I] = C;
>From 2f89d4a8c79a2e88f2749c7460886e0d776f3aff Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Tue, 2 Jul 2024 19:32:28 +0100
Subject: [PATCH 026/246] [SCEV] Split collecting and applying rewrite info
from loop guards (NFC) (#97316)
Introduce a new LoopGuards class to track info from loop guards and split
off collecting rewrite info to LoopGuards::collect. This allows users of
applyLoopGuards to collect rewrite info once in cases where the same
loop guards are applied multiple times.
This is used to collect rewrite info once in howFarToZero, which saves a
bit of compile-time:
stage1-O3: -0.04%
stage1-ReleaseThinLTO: -0.02%
stage1-ReleaseLTO-g: -0.04%
stage2-O3: -0.02%
https://llvm-compile-time-tracker.com/compare.php?from=117b53ae38428ca66eaa886fb432e6f09db88fe4&to=4ffb7b2e1c99081ccebe6f236c48a0be2f64b6ff&stat=instructions:u
Notably this improves mafft by -0.9% with -O3, -0.11% with LTO and
-0.12% with stage2-O3.
PR: https://github.com/llvm/llvm-project/pull/97316
---
llvm/include/llvm/Analysis/ScalarEvolution.h | 18 +
llvm/lib/Analysis/ScalarEvolution.cpp | 336 ++++++++++---------
2 files changed, 196 insertions(+), 158 deletions(-)
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 97b30daf4427a..d9bfca763819f 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -1299,8 +1299,26 @@ class ScalarEvolution {
/// sharpen it.
void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags);
+ class LoopGuards {
+ DenseMap<const SCEV *, const SCEV *> RewriteMap;
+ bool PreserveNUW = false;
+ bool PreserveNSW = false;
+ ScalarEvolution &SE;
+
+ LoopGuards(ScalarEvolution &SE) : SE(SE) {}
+
+ public:
+ /// Collect rewrite map for loop guards for loop \p L, together with flags
+ /// indicating if NUW and NSW can be preserved during rewriting.
+ static LoopGuards collect(const Loop *L, ScalarEvolution &SE);
+
+ /// Try to apply the collected loop guards to \p Expr.
+ const SCEV *rewrite(const SCEV *Expr) const;
+ };
+
/// Try to apply information from loop guards for \p L to \p Expr.
const SCEV *applyLoopGuards(const SCEV *Expr, const Loop *L);
+ const SCEV *applyLoopGuards(const SCEV *Expr, const LoopGuards &Guards);
/// Return true if the loop has no abnormal exits. That is, if the loop
/// is not infinite, it must exit through an explicit edge in the CFG.
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index e998fe9452ad7..430e1c6d8f8c6 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -10490,8 +10490,9 @@ ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V,
if (!isLoopInvariant(Step, L))
return getCouldNotCompute();
+ LoopGuards Guards = LoopGuards::collect(L, *this);
// Specialize step for this loop so we get context sensitive facts below.
- const SCEV *StepWLG = applyLoopGuards(Step, L);
+ const SCEV *StepWLG = applyLoopGuards(Step, Guards);
// For positive steps (counting up until unsigned overflow):
// N = -Start/Step (as unsigned)
@@ -10508,7 +10509,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V,
// N = Distance (as unsigned)
if (StepC &&
(StepC->getValue()->isOne() || StepC->getValue()->isMinusOne())) {
- APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L));
+ APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, Guards));
MaxBECount = APIntOps::umin(MaxBECount, getUnsignedRangeMax(Distance));
// When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
@@ -10549,7 +10550,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V,
getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
const SCEV *ConstantMax = getCouldNotCompute();
if (Exact != getCouldNotCompute()) {
- APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L));
+ APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, Guards));
ConstantMax =
getConstant(APIntOps::umin(MaxInt, getUnsignedRangeMax(Exact)));
}
@@ -10566,7 +10567,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V,
const SCEV *M = E;
if (E != getCouldNotCompute()) {
- APInt MaxWithGuards = getUnsignedRangeMax(applyLoopGuards(E, L));
+ APInt MaxWithGuards = getUnsignedRangeMax(applyLoopGuards(E, Guards));
M = getConstant(APIntOps::umin(MaxWithGuards, getUnsignedRangeMax(E)));
}
auto *S = isa<SCEVCouldNotCompute>(E) ? M : E;
@@ -15086,112 +15087,9 @@ bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS,
return false;
}
-/// A rewriter to replace SCEV expressions in Map with the corresponding entry
-/// in the map. It skips AddRecExpr because we cannot guarantee that the
-/// replacement is loop invariant in the loop of the AddRec.
-class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
- const DenseMap<const SCEV *, const SCEV *> ⤅
-
- SCEV::NoWrapFlags FlagMask = SCEV::FlagAnyWrap;
-
-public:
- SCEVLoopGuardRewriter(ScalarEvolution &SE,
- DenseMap<const SCEV *, const SCEV *> &M,
- bool PreserveNUW, bool PreserveNSW)
- : SCEVRewriteVisitor(SE), Map(M) {
- if (PreserveNUW)
- FlagMask = ScalarEvolution::setFlags(FlagMask, SCEV::FlagNUW);
- if (PreserveNSW)
- FlagMask = ScalarEvolution::setFlags(FlagMask, SCEV::FlagNSW);
- }
-
- const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
-
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
- auto I = Map.find(Expr);
- if (I == Map.end())
- return Expr;
- return I->second;
- }
-
- const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
- auto I = Map.find(Expr);
- if (I == Map.end()) {
- // If we didn't find the extact ZExt expr in the map, check if there's an
- // entry for a smaller ZExt we can use instead.
- Type *Ty = Expr->getType();
- const SCEV *Op = Expr->getOperand(0);
- unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
- while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
- Bitwidth > Op->getType()->getScalarSizeInBits()) {
- Type *NarrowTy = IntegerType::get(SE.getContext(), Bitwidth);
- auto *NarrowExt = SE.getZeroExtendExpr(Op, NarrowTy);
- auto I = Map.find(NarrowExt);
- if (I != Map.end())
- return SE.getZeroExtendExpr(I->second, Ty);
- Bitwidth = Bitwidth / 2;
- }
-
- return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitZeroExtendExpr(
- Expr);
- }
- return I->second;
- }
-
- const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
- auto I = Map.find(Expr);
- if (I == Map.end())
- return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSignExtendExpr(
- Expr);
- return I->second;
- }
-
- const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) {
- auto I = Map.find(Expr);
- if (I == Map.end())
- return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitUMinExpr(Expr);
- return I->second;
- }
-
- const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) {
- auto I = Map.find(Expr);
- if (I == Map.end())
- return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSMinExpr(Expr);
- return I->second;
- }
-
- const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- bool Changed = false;
- for (const auto *Op : Expr->operands()) {
- Operands.push_back(SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visit(Op));
- Changed |= Op != Operands.back();
- }
- // We are only replacing operands with equivalent values, so transfer the
- // flags from the original expression.
- return !Changed
- ? Expr
- : SE.getAddExpr(Operands, ScalarEvolution::maskFlags(
- Expr->getNoWrapFlags(), FlagMask));
- }
-
- const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- bool Changed = false;
- for (const auto *Op : Expr->operands()) {
- Operands.push_back(SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visit(Op));
- Changed |= Op != Operands.back();
- }
- // We are only replacing operands with equivalent values, so transfer the
- // flags from the original expression.
- return !Changed
- ? Expr
- : SE.getMulExpr(Operands, ScalarEvolution::maskFlags(
- Expr->getNoWrapFlags(), FlagMask));
- }
-};
-
-const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
+ScalarEvolution::LoopGuards
+ScalarEvolution::LoopGuards::collect(const Loop *L, ScalarEvolution &SE) {
+ LoopGuards Guards(SE);
SmallVector<const SCEV *> ExprsToRewrite;
auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
const SCEV *RHS,
@@ -15211,7 +15109,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// Check for a condition of the form (-C1 + X < C2). InstCombine will
// create this form when combining two checks of the form (X u< C2 + C1) and
// (X >=u C1).
- auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap,
+ auto MatchRangeCheckIdiom = [&SE, Predicate, LHS, RHS, &RewriteMap,
&ExprsToRewrite]() {
auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS);
if (!AddExpr || AddExpr->getNumOperands() != 2)
@@ -15232,9 +15130,10 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
return false;
auto I = RewriteMap.find(LHSUnknown);
const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown;
- RewriteMap[LHSUnknown] = getUMaxExpr(
- getConstant(ExactRegion.getUnsignedMin()),
- getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax())));
+ RewriteMap[LHSUnknown] = SE.getUMaxExpr(
+ SE.getConstant(ExactRegion.getUnsignedMin()),
+ SE.getUMinExpr(RewrittenLHS,
+ SE.getConstant(ExactRegion.getUnsignedMax())));
ExprsToRewrite.push_back(LHSUnknown);
return true;
};
@@ -15287,7 +15186,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
APInt Rem = ExprVal.urem(DivisorVal);
if (!Rem.isZero())
// return the SCEV: Expr + Divisor - Expr % Divisor
- return getConstant(ExprVal + DivisorVal - Rem);
+ return SE.getConstant(ExprVal + DivisorVal - Rem);
return Expr;
};
@@ -15302,7 +15201,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
return Expr;
APInt Rem = ExprVal.urem(DivisorVal);
// return the SCEV: Expr - Expr % Divisor
- return getConstant(ExprVal - Rem);
+ return SE.getConstant(ExprVal - Rem);
};
// Apply divisibilty by \p Divisor on MinMaxExpr with constant values,
@@ -15318,14 +15217,14 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
return MinMaxExpr;
auto IsMin =
isa<SCEVSMinExpr>(MinMaxExpr) || isa<SCEVUMinExpr>(MinMaxExpr);
- assert(isKnownNonNegative(MinMaxLHS) &&
+ assert(SE.isKnownNonNegative(MinMaxLHS) &&
"Expected non-negative operand!");
auto *DivisibleExpr =
IsMin ? GetPreviousSCEVDividesByDivisor(MinMaxLHS, Divisor)
: GetNextSCEVDividesByDivisor(MinMaxLHS, Divisor);
SmallVector<const SCEV *> Ops = {
ApplyDivisibiltyOnMinMaxExpr(MinMaxRHS, Divisor), DivisibleExpr};
- return getMinMaxExpr(SCTy, Ops);
+ return SE.getMinMaxExpr(SCTy, Ops);
};
// If we have LHS == 0, check if LHS is computing a property of some unknown
@@ -15337,14 +15236,14 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// explicitly express that.
const SCEV *URemLHS = nullptr;
const SCEV *URemRHS = nullptr;
- if (matchURem(LHS, URemLHS, URemRHS)) {
+ if (SE.matchURem(LHS, URemLHS, URemRHS)) {
if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) {
auto I = RewriteMap.find(LHSUnknown);
const SCEV *RewrittenLHS =
I != RewriteMap.end() ? I->second : LHSUnknown;
RewrittenLHS = ApplyDivisibiltyOnMinMaxExpr(RewrittenLHS, URemRHS);
const auto *Multiple =
- getMulExpr(getUDivExpr(RewrittenLHS, URemRHS), URemRHS);
+ SE.getMulExpr(SE.getUDivExpr(RewrittenLHS, URemRHS), URemRHS);
RewriteMap[LHSUnknown] = Multiple;
ExprsToRewrite.push_back(LHSUnknown);
return;
@@ -15353,7 +15252,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
}
// Do not apply information for constants or if RHS contains an AddRec.
- if (isa<SCEVConstant>(LHS) || containsAddRecurrence(RHS))
+ if (isa<SCEVConstant>(LHS) || SE.containsAddRecurrence(RHS))
return;
// If RHS is SCEVUnknown, make sure the information is applied to it.
@@ -15412,7 +15311,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// Return true if Expr known to divide by \p DividesBy.
std::function<bool(const SCEV *, const SCEV *&)> IsKnownToDivideBy =
[&](const SCEV *Expr, const SCEV *DividesBy) {
- if (getURemExpr(Expr, DividesBy)->isZero())
+ if (SE.getURemExpr(Expr, DividesBy)->isZero())
return true;
if (auto *MinMax = dyn_cast<SCEVMinMaxExpr>(Expr))
return IsKnownToDivideBy(MinMax->getOperand(0), DividesBy) &&
@@ -15438,21 +15337,21 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// We cannot express strict predicates in SCEV, so instead we replace them
// with non-strict ones against plus or minus one of RHS depending on the
// predicate.
- const SCEV *One = getOne(RHS->getType());
+ const SCEV *One = SE.getOne(RHS->getType());
switch (Predicate) {
case CmpInst::ICMP_ULT:
if (RHS->getType()->isPointerTy())
return;
- RHS = getUMaxExpr(RHS, One);
+ RHS = SE.getUMaxExpr(RHS, One);
[[fallthrough]];
case CmpInst::ICMP_SLT: {
- RHS = getMinusSCEV(RHS, One);
+ RHS = SE.getMinusSCEV(RHS, One);
RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(RHS, DividesBy) : RHS;
break;
}
case CmpInst::ICMP_UGT:
case CmpInst::ICMP_SGT:
- RHS = getAddExpr(RHS, One);
+ RHS = SE.getAddExpr(RHS, One);
RHS = DividesBy ? GetNextSCEVDividesByDivisor(RHS, DividesBy) : RHS;
break;
case CmpInst::ICMP_ULE:
@@ -15486,25 +15385,25 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
switch (Predicate) {
case CmpInst::ICMP_ULT:
case CmpInst::ICMP_ULE:
- To = getUMinExpr(FromRewritten, RHS);
+ To = SE.getUMinExpr(FromRewritten, RHS);
if (auto *UMax = dyn_cast<SCEVUMaxExpr>(FromRewritten))
EnqueueOperands(UMax);
break;
case CmpInst::ICMP_SLT:
case CmpInst::ICMP_SLE:
- To = getSMinExpr(FromRewritten, RHS);
+ To = SE.getSMinExpr(FromRewritten, RHS);
if (auto *SMax = dyn_cast<SCEVSMaxExpr>(FromRewritten))
EnqueueOperands(SMax);
break;
case CmpInst::ICMP_UGT:
case CmpInst::ICMP_UGE:
- To = getUMaxExpr(FromRewritten, RHS);
+ To = SE.getUMaxExpr(FromRewritten, RHS);
if (auto *UMin = dyn_cast<SCEVUMinExpr>(FromRewritten))
EnqueueOperands(UMin);
break;
case CmpInst::ICMP_SGT:
case CmpInst::ICMP_SGE:
- To = getSMaxExpr(FromRewritten, RHS);
+ To = SE.getSMaxExpr(FromRewritten, RHS);
if (auto *SMin = dyn_cast<SCEVSMinExpr>(FromRewritten))
EnqueueOperands(SMin);
break;
@@ -15517,7 +15416,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
cast<SCEVConstant>(RHS)->getValue()->isNullValue()) {
const SCEV *OneAlignedUp =
DividesBy ? GetNextSCEVDividesByDivisor(One, DividesBy) : One;
- To = getUMaxExpr(FromRewritten, OneAlignedUp);
+ To = SE.getUMaxExpr(FromRewritten, OneAlignedUp);
}
break;
default:
@@ -15532,22 +15431,23 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
BasicBlock *Header = L->getHeader();
SmallVector<PointerIntPair<Value *, 1, bool>> Terms;
// First, collect information from assumptions dominating the loop.
- for (auto &AssumeVH : AC.assumptions()) {
+ for (auto &AssumeVH : SE.AC.assumptions()) {
if (!AssumeVH)
continue;
auto *AssumeI = cast<CallInst>(AssumeVH);
- if (!DT.dominates(AssumeI, Header))
+ if (!SE.DT.dominates(AssumeI, Header))
continue;
Terms.emplace_back(AssumeI->getOperand(0), true);
}
// Second, collect information from llvm.experimental.guards dominating the loop.
- auto *GuardDecl = F.getParent()->getFunction(
+ auto *GuardDecl = SE.F.getParent()->getFunction(
Intrinsic::getName(Intrinsic::experimental_guard));
if (GuardDecl)
for (const auto *GU : GuardDecl->users())
if (const auto *Guard = dyn_cast<IntrinsicInst>(GU))
- if (Guard->getFunction() == Header->getParent() && DT.dominates(Guard, Header))
+ if (Guard->getFunction() == Header->getParent() &&
+ SE.DT.dominates(Guard, Header))
Terms.emplace_back(Guard->getArgOperand(0), true);
// Third, collect conditions from dominating branches. Starting at the loop
@@ -15557,7 +15457,8 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// TODO: share this logic with isLoopEntryGuardedByCond.
for (std::pair<const BasicBlock *, const BasicBlock *> Pair(
L->getLoopPredecessor(), Header);
- Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
+ Pair.first;
+ Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
const BranchInst *LoopEntryPredicate =
dyn_cast<BranchInst>(Pair.first->getTerminator());
@@ -15568,11 +15469,10 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
LoopEntryPredicate->getSuccessor(0) == Pair.second);
}
- // Now apply the information from the collected conditions to RewriteMap.
- // Conditions are processed in reverse order, so the earliest conditions is
- // processed first. This ensures the SCEVs with the shortest dependency chains
- // are constructed first.
- DenseMap<const SCEV *, const SCEV *> RewriteMap;
+ // Now apply the information from the collected conditions to
+ // Guards.RewriteMap. Conditions are processed in reverse order, so the
+ // earliest conditions is processed first. This ensures the SCEVs with the
+ // shortest dependency chains are constructed first.
for (auto [Term, EnterIfTrue] : reverse(Terms)) {
SmallVector<Value *, 8> Worklist;
SmallPtrSet<Value *, 8> Visited;
@@ -15585,9 +15485,9 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) {
auto Predicate =
EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate();
- const auto *LHS = getSCEV(Cmp->getOperand(0));
- const auto *RHS = getSCEV(Cmp->getOperand(1));
- CollectCondition(Predicate, LHS, RHS, RewriteMap);
+ const auto *LHS = SE.getSCEV(Cmp->getOperand(0));
+ const auto *RHS = SE.getSCEV(Cmp->getOperand(1));
+ CollectCondition(Predicate, LHS, RHS, Guards.RewriteMap);
continue;
}
@@ -15600,18 +15500,17 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
}
}
- if (RewriteMap.empty())
- return Expr;
-
// Let the rewriter preserve NUW/NSW flags if the unsigned/signed ranges of
// the replacement expressions are contained in the ranges of the replaced
// expressions.
- bool PreserveNUW = true;
- bool PreserveNSW = true;
+ Guards.PreserveNUW = true;
+ Guards.PreserveNSW = true;
for (const SCEV *Expr : ExprsToRewrite) {
- const SCEV *RewriteTo = RewriteMap[Expr];
- PreserveNUW &= getUnsignedRange(Expr).contains(getUnsignedRange(RewriteTo));
- PreserveNSW &= getSignedRange(Expr).contains(getSignedRange(RewriteTo));
+ const SCEV *RewriteTo = Guards.RewriteMap[Expr];
+ Guards.PreserveNUW &=
+ SE.getUnsignedRange(Expr).contains(SE.getUnsignedRange(RewriteTo));
+ Guards.PreserveNSW &=
+ SE.getSignedRange(Expr).contains(SE.getSignedRange(RewriteTo));
}
// Now that all rewrite information is collect, rewrite the collected
@@ -15619,13 +15518,134 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// sub-expressions.
if (ExprsToRewrite.size() > 1) {
for (const SCEV *Expr : ExprsToRewrite) {
- const SCEV *RewriteTo = RewriteMap[Expr];
- RewriteMap.erase(Expr);
- SCEVLoopGuardRewriter Rewriter(*this, RewriteMap, PreserveNUW,
- PreserveNSW);
- RewriteMap.insert({Expr, Rewriter.visit(RewriteTo)});
+ const SCEV *RewriteTo = Guards.RewriteMap[Expr];
+ Guards.RewriteMap.erase(Expr);
+ Guards.RewriteMap.insert({Expr, Guards.rewrite(RewriteTo)});
}
}
- SCEVLoopGuardRewriter Rewriter(*this, RewriteMap, PreserveNUW, PreserveNSW);
+ return Guards;
+}
+
+const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
+ /// A rewriter to replace SCEV expressions in Map with the corresponding entry
+ /// in the map. It skips AddRecExpr because we cannot guarantee that the
+ /// replacement is loop invariant in the loop of the AddRec.
+ class SCEVLoopGuardRewriter
+ : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
+ const DenseMap<const SCEV *, const SCEV *> ⤅
+
+ SCEV::NoWrapFlags FlagMask = SCEV::FlagAnyWrap;
+
+ public:
+ SCEVLoopGuardRewriter(ScalarEvolution &SE,
+ const ScalarEvolution::LoopGuards &Guards)
+ : SCEVRewriteVisitor(SE), Map(Guards.RewriteMap) {
+ if (Guards.PreserveNUW)
+ FlagMask = ScalarEvolution::setFlags(FlagMask, SCEV::FlagNUW);
+ if (Guards.PreserveNSW)
+ FlagMask = ScalarEvolution::setFlags(FlagMask, SCEV::FlagNSW);
+ }
+
+ const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
+
+ const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ auto I = Map.find(Expr);
+ if (I == Map.end())
+ return Expr;
+ return I->second;
+ }
+
+ const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
+ auto I = Map.find(Expr);
+ if (I == Map.end()) {
+ // If we didn't find the extact ZExt expr in the map, check if there's
+ // an entry for a smaller ZExt we can use instead.
+ Type *Ty = Expr->getType();
+ const SCEV *Op = Expr->getOperand(0);
+ unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
+ while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
+ Bitwidth > Op->getType()->getScalarSizeInBits()) {
+ Type *NarrowTy = IntegerType::get(SE.getContext(), Bitwidth);
+ auto *NarrowExt = SE.getZeroExtendExpr(Op, NarrowTy);
+ auto I = Map.find(NarrowExt);
+ if (I != Map.end())
+ return SE.getZeroExtendExpr(I->second, Ty);
+ Bitwidth = Bitwidth / 2;
+ }
+
+ return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitZeroExtendExpr(
+ Expr);
+ }
+ return I->second;
+ }
+
+ const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
+ auto I = Map.find(Expr);
+ if (I == Map.end())
+ return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSignExtendExpr(
+ Expr);
+ return I->second;
+ }
+
+ const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) {
+ auto I = Map.find(Expr);
+ if (I == Map.end())
+ return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitUMinExpr(Expr);
+ return I->second;
+ }
+
+ const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) {
+ auto I = Map.find(Expr);
+ if (I == Map.end())
+ return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSMinExpr(Expr);
+ return I->second;
+ }
+
+ const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ bool Changed = false;
+ for (const auto *Op : Expr->operands()) {
+ Operands.push_back(
+ SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visit(Op));
+ Changed |= Op != Operands.back();
+ }
+ // We are only replacing operands with equivalent values, so transfer the
+ // flags from the original expression.
+ return !Changed ? Expr
+ : SE.getAddExpr(Operands,
+ ScalarEvolution::maskFlags(
+ Expr->getNoWrapFlags(), FlagMask));
+ }
+
+ const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ bool Changed = false;
+ for (const auto *Op : Expr->operands()) {
+ Operands.push_back(
+ SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visit(Op));
+ Changed |= Op != Operands.back();
+ }
+ // We are only replacing operands with equivalent values, so transfer the
+ // flags from the original expression.
+ return !Changed ? Expr
+ : SE.getMulExpr(Operands,
+ ScalarEvolution::maskFlags(
+ Expr->getNoWrapFlags(), FlagMask));
+ }
+ };
+
+ if (RewriteMap.empty())
+ return Expr;
+
+ SCEVLoopGuardRewriter Rewriter(SE, *this);
return Rewriter.visit(Expr);
}
+
+const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
+ return applyLoopGuards(Expr, LoopGuards::collect(L, *this));
+}
+
+const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr,
+ const LoopGuards &Guards) {
+ return Guards.rewrite(Expr);
+}
>From 072e81db7a974bfb27b9b65d73330de7dd739821 Mon Sep 17 00:00:00 2001
From: Aaron Ballman <aaron at aaronballman.com>
Date: Tue, 2 Jul 2024 14:44:20 -0400
Subject: [PATCH 027/246] Revert "[Clang][Comments] Attach comments to decl
even if preproc directives are in between (#88367)"
This reverts commit 9f04d75b2bd8ba83863db74ebe1a5c08cfc5815c.
There was post-commit feedback on the direction this PR took.
---
clang/docs/ReleaseNotes.rst | 16 --
clang/lib/AST/ASTContext.cpp | 7 +-
clang/lib/Headers/amxcomplexintrin.h | 18 --
clang/lib/Headers/ia32intrin.h | 7 -
clang/test/Index/annotate-comments.cpp | 5 +-
clang/unittests/AST/DeclTest.cpp | 310 -------------------------
6 files changed, 5 insertions(+), 358 deletions(-)
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 181d10008bc8c..1537eaaba0c66 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -147,22 +147,6 @@ Clang Frontend Potentially Breaking Changes
- The ``hasTypeLoc`` AST matcher will no longer match a ``classTemplateSpecializationDecl``;
existing uses should switch to ``templateArgumentLoc`` or ``hasAnyTemplateArgumentLoc`` instead.
-- The comment parser now matches comments to declarations even if there is a
- preprocessor macro in between the comment and declaration. This change is
- intended to improve Clang's support for parsing documentation comments and
- to better conform to Doxygen's behavior.
-
- This has the potential to cause ``-Wdocumentation`` warnings, especially in
- cases where a function-like macro has a documentation comment and is followed
- immediately by a normal function. The function-like macro's documentation
- comments will be attributed to the subsequent function and may cause
- ``-Wdocumentation`` warnings such as mismatched parameter names, or invalid
- return documentation comments.
-
- In cases where the ``-Wdocumentation`` warnings are thrown, the suggested fix
- is to document the declaration following the macro so that the warnings are
- fixed.
-
Clang Python Bindings Potentially Breaking Changes
--------------------------------------------------
- Renamed ``CursorKind`` variant 272 from ``OMP_TEAMS_DISTRIBUTE_DIRECTIVE``
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 17ca5ad52d78e..84deaf5429df7 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -284,10 +284,9 @@ RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
StringRef Text(Buffer + CommentEndOffset,
DeclLocDecomp.second - CommentEndOffset);
- // There should be no other declarations between comment and declaration.
- // Preprocessor directives are implicitly allowed to be between a comment and
- // its associated decl.
- if (Text.find_last_of(";{}@") != StringRef::npos)
+ // There should be no other declarations or preprocessor directives between
+ // comment and declaration.
+ if (Text.find_last_of(";{}#@") != StringRef::npos)
return nullptr;
return CommentBeforeDecl;
diff --git a/clang/lib/Headers/amxcomplexintrin.h b/clang/lib/Headers/amxcomplexintrin.h
index 6ae40d2eab5c8..84ef972fcadf0 100644
--- a/clang/lib/Headers/amxcomplexintrin.h
+++ b/clang/lib/Headers/amxcomplexintrin.h
@@ -107,24 +107,6 @@
/// The 2nd source tile. Max size is 1024 Bytes.
#define _tile_cmmrlfp16ps(dst, a, b) __builtin_ia32_tcmmrlfp16ps(dst, a, b)
-/// Perform matrix multiplication of two tiles containing complex elements and
-/// accumulate the results into a packed single precision tile.
-///
-/// \param m
-/// The number of rows in the first tile and the number of rows in the result
-/// tile.
-/// \param n
-/// The number of columns in the second tile and the number of columns in the
-/// result tile.
-/// \param k
-/// The number of columns in the first tile and the number of rows in the
-/// second tile.
-/// \param dst
-/// Pointer to the destination tile where the result will be stored.
-/// \param src1
-/// Pointer to the first source tile.
-/// \param src2
-/// Pointer to the second source tile.
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX
_tile_cmmimfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
diff --git a/clang/lib/Headers/ia32intrin.h b/clang/lib/Headers/ia32intrin.h
index 1cdc53584a1f5..8e65f232a0def 100644
--- a/clang/lib/Headers/ia32intrin.h
+++ b/clang/lib/Headers/ia32intrin.h
@@ -533,13 +533,6 @@ __rdtscp(unsigned int *__A) {
/// \see __rdpmc
#define _rdpmc(A) __rdpmc(A)
-/// Invalidates the contents of the processor's internal caches.
-/// This function writes back and invalidates all modified cache lines in
-/// the processor.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the \c WBINVD instruction.
static __inline__ void __DEFAULT_FN_ATTRS
_wbinvd(void) {
__builtin_ia32_wbinvd();
diff --git a/clang/test/Index/annotate-comments.cpp b/clang/test/Index/annotate-comments.cpp
index bff25d46cf80e..6f9f8f0bbbc9e 100644
--- a/clang/test/Index/annotate-comments.cpp
+++ b/clang/test/Index/annotate-comments.cpp
@@ -204,9 +204,9 @@ void isdoxy45(void);
/// Ggg. IS_DOXYGEN_END
void isdoxy46(void);
-/// isdoxy47 IS_DOXYGEN_SINGLE
+/// IS_DOXYGEN_NOT_ATTACHED
#define FOO
-void isdoxy47(void);
+void notdoxy47(void);
/// IS_DOXYGEN_START Aaa bbb
/// \param ccc
@@ -330,7 +330,6 @@ void isdoxy54(int);
// CHECK: annotate-comments.cpp:185:6: FunctionDecl=isdoxy44:{{.*}} BriefComment=[IS_DOXYGEN_START Aaa bbb ccc.]
// CHECK: annotate-comments.cpp:195:6: FunctionDecl=isdoxy45:{{.*}} BriefComment=[Ddd eee. Fff.]
// CHECK: annotate-comments.cpp:205:6: FunctionDecl=isdoxy46:{{.*}} BriefComment=[Ddd eee. Fff.]
-// CHECK: annotate-comments.cpp:209:6: FunctionDecl=isdoxy47:{{.*}} isdoxy47 IS_DOXYGEN_SINGLE
// CHECK: annotate-comments.cpp:214:6: FunctionDecl=isdoxy48:{{.*}} BriefComment=[IS_DOXYGEN_START Aaa bbb]
// CHECK: annotate-comments.cpp:218:6: FunctionDecl=isdoxy49:{{.*}} BriefComment=[IS_DOXYGEN_START Aaa]
// CHECK: annotate-comments.cpp:222:6: FunctionDecl=isdoxy50:{{.*}} BriefComment=[Returns ddd IS_DOXYGEN_END]
diff --git a/clang/unittests/AST/DeclTest.cpp b/clang/unittests/AST/DeclTest.cpp
index 8dca011ba8779..16aa2b50b7a06 100644
--- a/clang/unittests/AST/DeclTest.cpp
+++ b/clang/unittests/AST/DeclTest.cpp
@@ -576,313 +576,3 @@ void instantiate_template() {
EXPECT_EQ(GetNameInfoRange(Matches[1]), "<input.cc:6:14, col:15>");
EXPECT_EQ(GetNameInfoRange(Matches[2]), "<input.cc:6:14, col:15>");
}
-
-TEST(Decl, CommentsAttachedToDecl1) {
- const SmallVector<StringRef> Sources{
- R"(
- /// Test comment
- void f();
- )",
-
- R"(
- /// Test comment
-
- void f();
- )",
-
- R"(
- /// Test comment
- #if 0
- // tralala
- #endif
- void f();
- )",
-
- R"(
- /// Test comment
-
- #if 0
- // tralala
- #endif
-
- void f();
- )",
-
- R"(
- /// Test comment
- #ifdef DOCS
- template<typename T>
- #endif
- void f();
- )",
-
- R"(
- /// Test comment
-
- #ifdef DOCS
- template<typename T>
- #endif
-
- void f();
- )",
- };
-
- for (const auto code : Sources) {
- auto AST = tooling::buildASTFromCodeWithArgs(code, /*Args=*/{"-std=c++20"});
- ASTContext &Ctx = AST->getASTContext();
-
- auto const *F = selectFirst<FunctionDecl>(
- "id", match(functionDecl(hasName("f")).bind("id"), Ctx));
- ASSERT_NE(F, nullptr);
-
- auto const *C = Ctx.getRawCommentForDeclNoCache(F);
- ASSERT_NE(C, nullptr);
- EXPECT_EQ(C->getRawText(Ctx.getSourceManager()), "/// Test comment");
- }
-}
-
-TEST(Decl, CommentsAttachedToDecl2) {
- const SmallVector<StringRef> Sources{
- R"(
- /** Test comment
- */
- void f();
- )",
-
- R"(
- /** Test comment
- */
-
- void f();
- )",
-
- R"(
- /** Test comment
- */
- #if 0
- /* tralala */
- #endif
- void f();
- )",
-
- R"(
- /** Test comment
- */
-
- #if 0
- /* tralala */
- #endif
-
- void f();
- )",
-
- R"(
- /** Test comment
- */
- #ifdef DOCS
- template<typename T>
- #endif
- void f();
- )",
-
- R"(
- /** Test comment
- */
-
- #ifdef DOCS
- template<typename T>
- #endif
-
- void f();
- )",
- };
-
- for (const auto code : Sources) {
- auto AST = tooling::buildASTFromCodeWithArgs(code, /*Args=*/{"-std=c++20"});
- ASTContext &Ctx = AST->getASTContext();
-
- auto const *F = selectFirst<FunctionDecl>(
- "id", match(functionDecl(hasName("f")).bind("id"), Ctx));
- ASSERT_NE(F, nullptr);
-
- auto const *C = Ctx.getRawCommentForDeclNoCache(F);
- ASSERT_NE(C, nullptr);
- EXPECT_EQ(C->getRawText(Ctx.getSourceManager()),
- "/** Test comment\n */");
- }
-}
-
-TEST(Decl, CommentsAttachedToDecl3) {
- const SmallVector<StringRef> Sources{
- R"(
- /// @brief Test comment
- void f();
- )",
-
- R"(
- /// @brief Test comment
-
- void f();
- )",
-
- R"(
- /// @brief Test comment
- #if 0
- // tralala
- #endif
- void f();
- )",
-
- R"(
- /// @brief Test comment
-
- #if 0
- // tralala
- #endif
-
- void f();
- )",
-
- R"(
- /// @brief Test comment
- #ifdef DOCS
- template<typename T>
- #endif
- void f();
- )",
-
- R"(
- /// @brief Test comment
-
- #ifdef DOCS
- template<typename T>
- #endif
-
- void f();
- )",
- };
-
- for (const auto code : Sources) {
- auto AST = tooling::buildASTFromCodeWithArgs(code, /*Args=*/{"-std=c++20"});
- ASTContext &Ctx = AST->getASTContext();
-
- auto const *F = selectFirst<FunctionDecl>(
- "id", match(functionDecl(hasName("f")).bind("id"), Ctx));
- ASSERT_NE(F, nullptr);
-
- auto const *C = Ctx.getRawCommentForDeclNoCache(F);
- ASSERT_NE(C, nullptr);
- EXPECT_EQ(C->getRawText(Ctx.getSourceManager()), "/// @brief Test comment");
- }
-}
-
-TEST(Decl, CommentsAttachedToDecl4) {
- const SmallVector<StringRef> Sources{
- R"(
- /** \brief Test comment
- */
- void f();
- )",
-
- R"(
- /** \brief Test comment
- */
-
- void f();
- )",
-
- R"(
- /** \brief Test comment
- */
- #if 0
- /* tralala */
- #endif
- void f();
- )",
-
- R"(
- /** \brief Test comment
- */
-
- #if 0
- /* tralala */
- #endif
-
- void f();
- )",
-
- R"(
- /** \brief Test comment
- */
- #ifdef DOCS
- template<typename T>
- #endif
- void f();
- )",
-
- R"(
- /** \brief Test comment
- */
-
- #ifdef DOCS
- template<typename T>
- #endif
-
- void f();
- )",
- };
-
- for (const auto code : Sources) {
- auto AST = tooling::buildASTFromCodeWithArgs(code, /*Args=*/{"-std=c++20"});
- ASTContext &Ctx = AST->getASTContext();
-
- auto const *F = selectFirst<FunctionDecl>(
- "id", match(functionDecl(hasName("f")).bind("id"), Ctx));
- ASSERT_NE(F, nullptr);
-
- auto const *C = Ctx.getRawCommentForDeclNoCache(F);
- ASSERT_NE(C, nullptr);
- EXPECT_EQ(C->getRawText(Ctx.getSourceManager()),
- "/** \\brief Test comment\n */");
- }
-}
-
-/// This example intentionally inserts characters between a doc comment and the
-/// associated declaration to verify that the comment does not become associated
-/// with the FunctionDecl.
-/// By default, Clang does not allow for other declarations (aside from
-/// preprocessor directives, as shown above) to be placed between a doc comment
-/// and a declaration.
-TEST(Decl, CommentsAttachedToDecl5) {
- const SmallVector<StringRef> Sources{
- R"(
- /// Test comment
- ;
- void f();
- )",
-
- R"(
- /// Test comment
- // @
- void f();
- )",
-
- R"(
- /// Test comment
- // {}
- void f();
- )",
- };
-
- for (const auto code : Sources) {
- auto AST = tooling::buildASTFromCodeWithArgs(code, /*Args=*/{"-std=c++20"});
- ASTContext &Ctx = AST->getASTContext();
-
- auto const *F = selectFirst<FunctionDecl>(
- "id", match(functionDecl(hasName("f")).bind("id"), Ctx));
- ASSERT_NE(F, nullptr);
-
- auto const *C = Ctx.getRawCommentForDeclNoCache(F);
- ASSERT_EQ(C, nullptr);
- }
-}
>From b8eaa5bb10e1cf282fef130e372e57acc6a4b7e7 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Tue, 2 Jul 2024 11:46:31 -0700
Subject: [PATCH 028/246] [ProfileData] Remove the old version of
getValueProfDataFromInst (#97374)
I've migrated uses of the old version of getValueProfDataFromInst to
the one that returns SmallVector<InstrProfValueData, 4>. This patch
removes the old version.
---
llvm/include/llvm/ProfileData/InstrProf.h | 10 -----
llvm/lib/ProfileData/InstrProf.cpp | 45 -----------------------
2 files changed, 55 deletions(-)
diff --git a/llvm/include/llvm/ProfileData/InstrProf.h b/llvm/include/llvm/ProfileData/InstrProf.h
index 50e6f1d3b9b1f..9b34cb0b651f7 100644
--- a/llvm/include/llvm/ProfileData/InstrProf.h
+++ b/llvm/include/llvm/ProfileData/InstrProf.h
@@ -284,16 +284,6 @@ void annotateValueSite(Module &M, Instruction &Inst,
ArrayRef<InstrProfValueData> VDs, uint64_t Sum,
InstrProfValueKind ValueKind, uint32_t MaxMDCount);
-/// Extract the value profile data from \p Inst and returns them if \p Inst is
-/// annotated with value profile data. Returns nullptr otherwise. It's similar
-/// to `getValueProfDataFromInst` above except that an array is allocated only
-/// after a preliminary checking that the value profiles of kind `ValueKind`
-/// exist.
-std::unique_ptr<InstrProfValueData[]>
-getValueProfDataFromInst(const Instruction &Inst, InstrProfValueKind ValueKind,
- uint32_t MaxNumValueData, uint32_t &ActualNumValueData,
- uint64_t &TotalC, bool GetNoICPValue = false);
-
// TODO: Unify metadata name 'PGOFuncName' and 'PGOName', by supporting read
// of this metadata for backward compatibility and generating 'PGOName' only.
/// Extract the value profile data from \p Inst and returns them if \p Inst is
diff --git a/llvm/lib/ProfileData/InstrProf.cpp b/llvm/lib/ProfileData/InstrProf.cpp
index 9dbaa2ca0f020..4695285787cf3 100644
--- a/llvm/lib/ProfileData/InstrProf.cpp
+++ b/llvm/lib/ProfileData/InstrProf.cpp
@@ -1342,51 +1342,6 @@ MDNode *mayHaveValueProfileOfKind(const Instruction &Inst,
return MD;
}
-static bool getValueProfDataFromInstImpl(const MDNode *const MD,
- const uint32_t MaxNumDataWant,
- InstrProfValueData ValueData[],
- uint32_t &ActualNumValueData,
- uint64_t &TotalC, bool GetNoICPValue) {
- const unsigned NOps = MD->getNumOperands();
- // Get total count
- ConstantInt *TotalCInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
- if (!TotalCInt)
- return false;
- TotalC = TotalCInt->getZExtValue();
- ActualNumValueData = 0;
-
- for (unsigned I = 3; I < NOps; I += 2) {
- if (ActualNumValueData >= MaxNumDataWant)
- break;
- ConstantInt *Value = mdconst::dyn_extract<ConstantInt>(MD->getOperand(I));
- ConstantInt *Count =
- mdconst::dyn_extract<ConstantInt>(MD->getOperand(I + 1));
- if (!Value || !Count)
- return false;
- uint64_t CntValue = Count->getZExtValue();
- if (!GetNoICPValue && (CntValue == NOMORE_ICP_MAGICNUM))
- continue;
- ValueData[ActualNumValueData].Value = Value->getZExtValue();
- ValueData[ActualNumValueData].Count = CntValue;
- ActualNumValueData++;
- }
- return true;
-}
-
-std::unique_ptr<InstrProfValueData[]>
-getValueProfDataFromInst(const Instruction &Inst, InstrProfValueKind ValueKind,
- uint32_t MaxNumValueData, uint32_t &ActualNumValueData,
- uint64_t &TotalC, bool GetNoICPValue) {
- MDNode *MD = mayHaveValueProfileOfKind(Inst, ValueKind);
- if (!MD)
- return nullptr;
- auto ValueDataArray = std::make_unique<InstrProfValueData[]>(MaxNumValueData);
- if (!getValueProfDataFromInstImpl(MD, MaxNumValueData, ValueDataArray.get(),
- ActualNumValueData, TotalC, GetNoICPValue))
- return nullptr;
- return ValueDataArray;
-}
-
SmallVector<InstrProfValueData, 4>
getValueProfDataFromInst(const Instruction &Inst, InstrProfValueKind ValueKind,
uint32_t MaxNumValueData, uint64_t &TotalC,
>From 28a11cc49203b9af0875f4a78a681115478190b8 Mon Sep 17 00:00:00 2001
From: Mehdi Amini <joker.eph at gmail.com>
Date: Tue, 2 Jul 2024 20:57:16 +0200
Subject: [PATCH 029/246] Revert "Fix block merging" (#97460)
Reverts llvm/llvm-project#96871
Bots are broken.
---
.../BufferDeallocationSimplification.cpp | 9 +-
mlir/lib/Transforms/Utils/RegionUtils.cpp | 144 ++----------------
.../dealloc-branchop-interface.mlir | 20 +--
.../Linalg/detensorize_entry_block.mlir | 6 +-
mlir/test/Dialect/Linalg/detensorize_if.mlir | 67 ++++----
.../Dialect/Linalg/detensorize_while.mlir | 12 +-
.../Linalg/detensorize_while_impure_cf.mlir | 12 +-
.../Linalg/detensorize_while_pure_cf.mlir | 4 +-
.../Transforms/canonicalize-block-merge.mlir | 6 +-
mlir/test/Transforms/canonicalize-dce.mlir | 8 +-
.../Transforms/make-isolated-from-above.mlir | 18 +--
.../test-canonicalize-merge-large-blocks.mlir | 76 ---------
12 files changed, 93 insertions(+), 289 deletions(-)
delete mode 100644 mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
index 5227b22653eef..954485cfede3d 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
@@ -463,15 +463,10 @@ struct BufferDeallocationSimplificationPass
SplitDeallocWhenNotAliasingAnyOther,
RetainedMemrefAliasingAlwaysDeallocatedMemref>(&getContext(),
analysis);
- // We don't want that the block structure changes invalidating the
- // `BufferOriginAnalysis` so we apply the rewrites witha `Normal` level of
- // region simplification
- GreedyRewriteConfig config;
- config.enableRegionSimplification = GreedySimplifyRegionLevel::Normal;
populateDeallocOpCanonicalizationPatterns(patterns, &getContext());
- if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns),
- config)))
+ if (failed(
+ applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
signalPassFailure();
}
};
diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp
index 412e2456295ad..4c0f15bafbaba 100644
--- a/mlir/lib/Transforms/Utils/RegionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp
@@ -9,7 +9,6 @@
#include "mlir/Transforms/RegionUtils.h"
#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/IR/Block.h"
-#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
@@ -17,15 +16,11 @@
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
-#include "mlir/Support/LogicalResult.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallSet.h"
#include <deque>
-#include <iterator>
using namespace mlir;
@@ -704,8 +699,9 @@ LogicalResult BlockMergeCluster::merge(RewriterBase &rewriter) {
blockIterators.push_back(mergeBlock->begin());
// Update each of the predecessor terminators with the new arguments.
- SmallVector<SmallVector<Value, 8>, 2> newArguments(1 + blocksToMerge.size(),
- SmallVector<Value, 8>());
+ SmallVector<SmallVector<Value, 8>, 2> newArguments(
+ 1 + blocksToMerge.size(),
+ SmallVector<Value, 8>(operandsToMerge.size()));
unsigned curOpIndex = 0;
for (const auto &it : llvm::enumerate(operandsToMerge)) {
unsigned nextOpOffset = it.value().first - curOpIndex;
@@ -716,22 +712,13 @@ LogicalResult BlockMergeCluster::merge(RewriterBase &rewriter) {
Block::iterator &blockIter = blockIterators[i];
std::advance(blockIter, nextOpOffset);
auto &operand = blockIter->getOpOperand(it.value().second);
- Value operandVal = operand.get();
- Value *it = std::find(newArguments[i].begin(), newArguments[i].end(),
- operandVal);
- if (it == newArguments[i].end()) {
- newArguments[i].push_back(operandVal);
- // Update the operand and insert an argument if this is the leader.
- if (i == 0) {
- operand.set(leaderBlock->addArgument(operandVal.getType(),
- operandVal.getLoc()));
- }
- } else if (i == 0) {
- // If this is the leader, update the operand but do not insert a new
- // argument. Instead, the opearand should point to one of the
- // arguments we already passed (and that contained `operandVal`)
- operand.set(leaderBlock->getArgument(
- std::distance(newArguments[i].begin(), it)));
+ newArguments[i][it.index()] = operand.get();
+
+ // Update the operand and insert an argument if this is the leader.
+ if (i == 0) {
+ Value operandVal = operand.get();
+ operand.set(leaderBlock->addArgument(operandVal.getType(),
+ operandVal.getLoc()));
}
}
}
@@ -831,109 +818,6 @@ static LogicalResult mergeIdenticalBlocks(RewriterBase &rewriter,
return success(anyChanged);
}
-static LogicalResult dropRedundantArguments(RewriterBase &rewriter,
- Block &block) {
- SmallVector<size_t> argsToErase;
-
- // Go through the arguments of the block
- for (size_t argIdx = 0; argIdx < block.getNumArguments(); argIdx++) {
- bool sameArg = true;
- Value commonValue;
-
- // Go through the block predecessor and flag if they pass to the block
- // different values for the same argument
- for (auto predIt = block.pred_begin(), predE = block.pred_end();
- predIt != predE; ++predIt) {
- auto branch = dyn_cast<BranchOpInterface>((*predIt)->getTerminator());
- if (!branch) {
- sameArg = false;
- break;
- }
- unsigned succIndex = predIt.getSuccessorIndex();
- SuccessorOperands succOperands = branch.getSuccessorOperands(succIndex);
- auto operands = succOperands.getForwardedOperands();
- if (!commonValue) {
- commonValue = operands[argIdx];
- } else {
- if (operands[argIdx] != commonValue) {
- sameArg = false;
- break;
- }
- }
- }
-
- // If they are passing the same value, drop the argument
- if (commonValue && sameArg) {
- argsToErase.push_back(argIdx);
-
- // Remove the argument from the block
- Value argVal = block.getArgument(argIdx);
- rewriter.replaceAllUsesWith(argVal, commonValue);
- }
- }
-
- // Remove the arguments
- for (auto argIdx : llvm::reverse(argsToErase)) {
- block.eraseArgument(argIdx);
-
- // Remove the argument from the branch ops
- for (auto predIt = block.pred_begin(), predE = block.pred_end();
- predIt != predE; ++predIt) {
- auto branch = cast<BranchOpInterface>((*predIt)->getTerminator());
- unsigned succIndex = predIt.getSuccessorIndex();
- SuccessorOperands succOperands = branch.getSuccessorOperands(succIndex);
- succOperands.erase(argIdx);
- }
- }
- return success(!argsToErase.empty());
-}
-
-/// This optimization drops redundant argument to blocks. I.e., if a given
-/// argument to a block receives the same value from each of the block
-/// predecessors, we can remove the argument from the block and use directly the
-/// original value. This is a simple example:
-///
-/// %cond = llvm.call @rand() : () -> i1
-/// %val0 = llvm.mlir.constant(1 : i64) : i64
-/// %val1 = llvm.mlir.constant(2 : i64) : i64
-/// %val2 = llvm.mlir.constant(3 : i64) : i64
-/// llvm.cond_br %cond, ^bb1(%val0 : i64, %val1 : i64), ^bb2(%val0 : i64, %val2
-/// : i64)
-///
-/// ^bb1(%arg0 : i64, %arg1 : i64):
-/// llvm.call @foo(%arg0, %arg1)
-///
-/// The previous IR can be rewritten as:
-/// %cond = llvm.call @rand() : () -> i1
-/// %val0 = llvm.mlir.constant(1 : i64) : i64
-/// %val1 = llvm.mlir.constant(2 : i64) : i64
-/// %val2 = llvm.mlir.constant(3 : i64) : i64
-/// llvm.cond_br %cond, ^bb1(%val1 : i64), ^bb2(%val2 : i64)
-///
-/// ^bb1(%arg0 : i64):
-/// llvm.call @foo(%val0, %arg0)
-///
-static LogicalResult dropRedundantArguments(RewriterBase &rewriter,
- MutableArrayRef<Region> regions) {
- llvm::SmallSetVector<Region *, 1> worklist;
- for (auto ®ion : regions)
- worklist.insert(®ion);
- bool anyChanged = false;
- while (!worklist.empty()) {
- Region *region = worklist.pop_back_val();
-
- // Add any nested regions to the worklist.
- for (Block &block : *region) {
- anyChanged = succeeded(dropRedundantArguments(rewriter, block));
-
- for (auto &op : block)
- for (auto &nestedRegion : op.getRegions())
- worklist.insert(&nestedRegion);
- }
- }
- return success(anyChanged);
-}
-
//===----------------------------------------------------------------------===//
// Region Simplification
//===----------------------------------------------------------------------===//
@@ -948,12 +832,8 @@ LogicalResult mlir::simplifyRegions(RewriterBase &rewriter,
bool eliminatedBlocks = succeeded(eraseUnreachableBlocks(rewriter, regions));
bool eliminatedOpsOrArgs = succeeded(runRegionDCE(rewriter, regions));
bool mergedIdenticalBlocks = false;
- bool droppedRedundantArguments = false;
- if (mergeBlocks) {
+ if (mergeBlocks)
mergedIdenticalBlocks = succeeded(mergeIdenticalBlocks(rewriter, regions));
- droppedRedundantArguments =
- succeeded(dropRedundantArguments(rewriter, regions));
- }
return success(eliminatedBlocks || eliminatedOpsOrArgs ||
- mergedIdenticalBlocks || droppedRedundantArguments);
+ mergedIdenticalBlocks);
}
diff --git a/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir
index 8e14990502143..5e8104f83cc4d 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir
@@ -178,7 +178,7 @@ func.func @condBranchDynamicTypeNested(
// CHECK-NEXT: ^bb1
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.br ^bb6([[ARG1]], %false{{[0-9_]*}} :
+// CHECK: cf.br ^bb5([[ARG1]], %false{{[0-9_]*}} :
// CHECK: ^bb2([[IDX:%.*]]:{{.*}})
// CHECK: [[ALLOC1:%.*]] = memref.alloc([[IDX]])
// CHECK-NEXT: test.buffer_based
@@ -186,24 +186,20 @@ func.func @condBranchDynamicTypeNested(
// CHECK-NEXT: [[OWN:%.+]] = arith.select [[ARG0]], [[ARG0]], [[NOT_ARG0]]
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.cond_br{{.*}}, ^bb3, ^bb4
+// CHECK: cf.cond_br{{.*}}, ^bb3, ^bb3
// CHECK-NEXT: ^bb3:
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.br ^bb5([[ALLOC1]], [[OWN]]
-// CHECK-NEXT: ^bb4:
+// CHECK: cf.br ^bb4([[ALLOC1]], [[OWN]]
+// CHECK-NEXT: ^bb4([[ALLOC2:%.*]]:{{.*}}, [[COND1:%.+]]:{{.*}})
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.br ^bb5([[ALLOC1]], [[OWN]]
-// CHECK-NEXT: ^bb5([[ALLOC2:%.*]]:{{.*}}, [[COND1:%.+]]:{{.*}})
-// CHECK-NOT: bufferization.dealloc
-// CHECK-NOT: bufferization.clone
-// CHECK: cf.br ^bb6([[ALLOC2]], [[COND1]]
-// CHECK-NEXT: ^bb6([[ALLOC4:%.*]]:{{.*}}, [[COND2:%.+]]:{{.*}})
+// CHECK: cf.br ^bb5([[ALLOC2]], [[COND1]]
+// CHECK-NEXT: ^bb5([[ALLOC4:%.*]]:{{.*}}, [[COND2:%.+]]:{{.*}})
// CHECK-NEXT: [[BASE:%[a-zA-Z0-9_]+]]{{.*}} = memref.extract_strided_metadata [[ALLOC4]]
// CHECK-NEXT: [[OWN:%.+]]:2 = bufferization.dealloc ([[BASE]] :{{.*}}) if ([[COND2]]) retain ([[ALLOC4]], [[ARG2]] :
-// CHECK: cf.br ^bb7([[ALLOC4]], [[OWN]]#0
-// CHECK-NEXT: ^bb7([[ALLOC5:%.*]]:{{.*}}, [[COND3:%.+]]:{{.*}})
+// CHECK: cf.br ^bb6([[ALLOC4]], [[OWN]]#0
+// CHECK-NEXT: ^bb6([[ALLOC5:%.*]]:{{.*}}, [[COND3:%.+]]:{{.*}})
// CHECK: test.copy
// CHECK: [[BASE:%[a-zA-Z0-9_]+]]{{.*}} = memref.extract_strided_metadata [[ALLOC5]]
// CHECK-NEXT: bufferization.dealloc ([[BASE]] : {{.*}}) if ([[COND3]])
diff --git a/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir b/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir
index 50a2d6bf532aa..d1a89226fdb58 100644
--- a/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir
@@ -15,7 +15,7 @@ func.func @main(%arg0: tensor<f32>) -> tensor<f32> {
// CHECK-LABEL: @main
// CHECK-SAME: (%[[ARG0:.+]]: tensor<f32>) -> tensor<f32>
// CHECK: %[[EXTRACTED:.+]] = tensor.extract %[[ARG0]][] : tensor<f32>
-// CHECK: cf.br ^{{.*}}
-// CHECK: ^{{.*}}:
-// CHECK: %[[ELEMENTS:.+]] = tensor.from_elements %[[EXTRACTED]] : tensor<f32>
+// CHECK: cf.br ^{{.*}}(%[[EXTRACTED]] : f32)
+// CHECK: ^{{.*}}(%[[ARG1:.+]]: f32):
+// CHECK: %[[ELEMENTS:.+]] = tensor.from_elements %[[ARG1]] : tensor<f32>
// CHECK: return %[[ELEMENTS]] : tensor<f32>
diff --git a/mlir/test/Dialect/Linalg/detensorize_if.mlir b/mlir/test/Dialect/Linalg/detensorize_if.mlir
index c728ad21d2209..8d17763c04b6c 100644
--- a/mlir/test/Dialect/Linalg/detensorize_if.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_if.mlir
@@ -42,15 +42,18 @@ func.func @main() -> (tensor<i32>) attributes {} {
}
// CHECK-LABEL: func @main()
-// CHECK-DAG: %[[cst:.*]] = arith.constant dense<0>
-// CHECK-DAG: arith.constant true
-// CHECK: cf.br
-// CHECK-NEXT: ^[[bb1:.*]]:
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^bb3
-// CHECK-NEXT: ^[[bb2]]
-// CHECK-NEXT: cf.br ^[[bb3:.*]]
-// CHECK-NEXT: ^[[bb3]]
-// CHECK-NEXT: return %[[cst]]
+// CHECK-DAG: arith.constant 0
+// CHECK-DAG: arith.constant 10
+// CHECK: cf.br ^[[bb1:.*]](%{{.*}}: i32)
+// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32):
+// CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}}
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb3(%{{.*}} : i32)
+// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
+// CHECK-NEXT: arith.addi %{{.*}}, %{{.*}}
+// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32)
+// CHECK-NEXT: ^[[bb3]](%{{.*}}: i32)
+// CHECK-NEXT: tensor.from_elements %{{.*}} : tensor<i32>
+// CHECK-NEXT: return %{{.*}}
// CHECK-NEXT: }
// -----
@@ -103,17 +106,20 @@ func.func @main() -> (tensor<i32>) attributes {} {
}
// CHECK-LABEL: func @main()
-// CHECK-DAG: %[[cst:.*]] = arith.constant dense<0>
-// CHECK-DAG: arith.constant true
-// CHECK: cf.br ^[[bb1:.*]]
-// CHECK-NEXT: ^[[bb1:.*]]:
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^bb3
-// CHECK-NEXT: ^[[bb2]]:
-// CHECK-NEXT: cf.br ^[[bb3:.*]]
-// CHECK-NEXT: ^[[bb3]]:
-// CHECK-NEXT: cf.br ^[[bb4:.*]]
-// CHECK-NEXT: ^[[bb4]]:
-// CHECK-NEXT: return %[[cst]]
+// CHECK-DAG: arith.constant 0
+// CHECK-DAG: arith.constant 10
+// CHECK: cf.br ^[[bb1:.*]](%{{.*}}: i32)
+// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32):
+// CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}}
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb3(%{{.*}} : i32)
+// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
+// CHECK-NEXT: arith.addi %{{.*}}, %{{.*}}
+// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32)
+// CHECK-NEXT: ^[[bb3]](%{{.*}}: i32)
+// CHECK-NEXT: cf.br ^[[bb4:.*]](%{{.*}} : i32)
+// CHECK-NEXT: ^[[bb4]](%{{.*}}: i32)
+// CHECK-NEXT: tensor.from_elements %{{.*}} : tensor<i32>
+// CHECK-NEXT: return %{{.*}}
// CHECK-NEXT: }
// -----
@@ -165,13 +171,16 @@ func.func @main() -> (tensor<i32>) attributes {} {
}
// CHECK-LABEL: func @main()
-// CHECK-DAG: %[[cst:.*]] = arith.constant dense<10>
-// CHECK-DAG: arith.constant true
-// CHECK: cf.br ^[[bb1:.*]]
-// CHECK-NEXT: ^[[bb1]]:
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^bb2
-// CHECK-NEXT: ^[[bb2]]
-// CHECK-NEXT: cf.br ^[[bb3:.*]]
-// CHECK-NEXT: ^[[bb3]]
-// CHECK-NEXT: return %[[cst]]
+// CHECK-DAG: arith.constant 0
+// CHECK-DAG: arith.constant 10
+// CHECK: cf.br ^[[bb1:.*]](%{{.*}}: i32)
+// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32):
+// CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}}
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb2(%{{.*}} : i32)
+// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
+// CHECK-NEXT: arith.addi %{{.*}}, %{{.*}}
+// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32)
+// CHECK-NEXT: ^[[bb3]](%{{.*}}: i32)
+// CHECK-NEXT: tensor.from_elements %{{.*}} : tensor<i32>
+// CHECK-NEXT: return %{{.*}}
// CHECK-NEXT: }
diff --git a/mlir/test/Dialect/Linalg/detensorize_while.mlir b/mlir/test/Dialect/Linalg/detensorize_while.mlir
index 580a97d3a851b..aa30900f76a33 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while.mlir
@@ -46,11 +46,11 @@ func.func @main(%farg0: tensor<i32>, %farg1: tensor<i32>) -> tensor<i32> attribu
// DET-ALL: cf.br ^[[bb1:.*]](%{{.*}} : i32)
// DET-ALL: ^[[bb1]](%{{.*}}: i32)
// DET-ALL: arith.cmpi slt, {{.*}}
-// DET-ALL: cf.cond_br {{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
-// DET-ALL: ^[[bb2]]
+// DET-ALL: cf.cond_br {{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32)
+// DET-ALL: ^[[bb2]](%{{.*}}: i32)
// DET-ALL: arith.addi {{.*}}
// DET-ALL: cf.br ^[[bb1]](%{{.*}} : i32)
-// DET-ALL: ^[[bb3]]:
+// DET-ALL: ^[[bb3]](%{{.*}}: i32)
// DET-ALL: tensor.from_elements {{.*}}
// DET-ALL: return %{{.*}} : tensor<i32>
@@ -62,10 +62,10 @@ func.func @main(%farg0: tensor<i32>, %farg1: tensor<i32>) -> tensor<i32> attribu
// DET-CF: cf.br ^[[bb1:.*]](%{{.*}} : i32)
// DET-CF: ^[[bb1]](%{{.*}}: i32)
// DET-CF: arith.cmpi slt, {{.*}}
-// DET-CF: cf.cond_br {{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
-// DET-CF: ^[[bb2]]:
+// DET-CF: cf.cond_br {{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32)
+// DET-CF: ^[[bb2]](%{{.*}}: i32)
// DET-CF: arith.addi {{.*}}
// DET-CF: cf.br ^[[bb1]](%{{.*}} : i32)
-// DET-CF: ^[[bb3]]:
+// DET-CF: ^[[bb3]](%{{.*}}: i32)
// DET-CF: tensor.from_elements %{{.*}} : tensor<i32>
// DET-CF: return %{{.*}} : tensor<i32>
diff --git a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
index 414d9b94cbf53..955c7be5ef4c8 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
@@ -74,8 +74,8 @@ func.func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attr
// DET-ALL: } -> tensor<i32>
// DET-ALL: tensor.extract %{{.*}}[] : tensor<i32>
// DET-ALL: cmpi slt, %{{.*}}, %{{.*}} : i32
-// DET-ALL: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
-// DET-ALL: ^[[bb2]]:
+// DET-ALL: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32)
+// DET-ALL: ^[[bb2]](%{{.*}}: i32)
// DET-ALL: tensor.from_elements %{{.*}} : tensor<i32>
// DET-ALL: tensor.empty() : tensor<10xi32>
// DET-ALL: linalg.generic {{{.*}}} ins(%{{.*}} : tensor<i32>) outs(%{{.*}} : tensor<10xi32>) {
@@ -83,7 +83,7 @@ func.func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attr
// DET-ALL: linalg.yield %{{.*}} : i32
// DET-ALL: } -> tensor<10xi32>
// DET-ALL: cf.br ^[[bb1]](%{{.*}} : tensor<10xi32>)
-// DET-ALL: ^[[bb3]]
+// DET-ALL: ^[[bb3]](%{{.*}}: i32)
// DET-ALL: tensor.from_elements %{{.*}} : tensor<i32>
// DET-ALL: return %{{.*}} : tensor<i32>
// DET-ALL: }
@@ -95,10 +95,10 @@ func.func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attr
// DET-CF: %{{.*}} = linalg.generic {{{.*}}} ins(%{{.*}} : tensor<10xi32>) outs(%{{.*}} : tensor<i32>) {
// DET-CF: tensor.extract %{{.*}}[] : tensor<i32>
// DET-CF: cmpi slt, %{{.*}}, %{{.*}} : i32
-// DET-CF: cf.cond_br %{{.*}}, ^bb2, ^bb3
-// DET-CF: ^bb2:
+// DET-CF: cf.cond_br %{{.*}}, ^bb2(%{{.*}} : tensor<i32>), ^bb3(%{{.*}} : tensor<i32>)
+// DET-CF: ^bb2(%{{.*}}: tensor<i32>)
// DET-CF: %{{.*}} = linalg.generic {{{.*}}} ins(%{{.*}} : tensor<i32>) outs(%{{.*}} : tensor<10xi32>) {
// DET-CF: cf.br ^bb1(%{{.*}} : tensor<10xi32>)
-// DET-CF: ^bb3:
+// DET-CF: ^bb3(%{{.*}}: tensor<i32>)
// DET-CF: return %{{.*}} : tensor<i32>
// DET-CF: }
diff --git a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
index 913e78272db79..6d8d5fe71fca5 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
@@ -49,8 +49,8 @@ func.func @main() -> () attributes {} {
// CHECK-NEXT: cf.br ^[[bb1:.*]](%{{.*}} : i32)
// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32)
// CHECK-NEXT: %{{.*}} = arith.cmpi slt, %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
-// CHECK-NEXT: ^[[bb2]]
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]]
+// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
// CHECK-NEXT: %{{.*}} = arith.addi %{{.*}}, %{{.*}}
// CHECK-NEXT: cf.br ^[[bb1]](%{{.*}} : i32)
// CHECK-NEXT: ^[[bb3]]:
diff --git a/mlir/test/Transforms/canonicalize-block-merge.mlir b/mlir/test/Transforms/canonicalize-block-merge.mlir
index 92cfde817cf7f..3b8b1fce0575a 100644
--- a/mlir/test/Transforms/canonicalize-block-merge.mlir
+++ b/mlir/test/Transforms/canonicalize-block-merge.mlir
@@ -87,7 +87,7 @@ func.func @mismatch_operands_matching_arguments(%cond : i1, %arg0 : i32, %arg1 :
// CHECK-LABEL: func @mismatch_argument_uses(
func.func @mismatch_argument_uses(%cond : i1, %arg0 : i32, %arg1 : i32) -> (i32, i32) {
- // CHECK: return {{.*}}, {{.*}}
+ // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2
cf.cond_br %cond, ^bb1(%arg1 : i32), ^bb2(%arg0 : i32)
@@ -101,7 +101,7 @@ func.func @mismatch_argument_uses(%cond : i1, %arg0 : i32, %arg1 : i32) -> (i32,
// CHECK-LABEL: func @mismatch_argument_types(
func.func @mismatch_argument_types(%cond : i1, %arg0 : i32, %arg1 : i16) {
- // CHECK: cf.cond_br %{{.*}}, ^bb1, ^bb2
+ // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2
cf.cond_br %cond, ^bb1(%arg0 : i32), ^bb2(%arg1 : i16)
@@ -115,7 +115,7 @@ func.func @mismatch_argument_types(%cond : i1, %arg0 : i32, %arg1 : i16) {
// CHECK-LABEL: func @mismatch_argument_count(
func.func @mismatch_argument_count(%cond : i1, %arg0 : i32) {
- // CHECK: cf.cond_br %{{.*}}, ^bb1, ^bb2
+ // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2
cf.cond_br %cond, ^bb1(%arg0 : i32), ^bb2
diff --git a/mlir/test/Transforms/canonicalize-dce.mlir b/mlir/test/Transforms/canonicalize-dce.mlir
index 84631947970de..ac034d567a26a 100644
--- a/mlir/test/Transforms/canonicalize-dce.mlir
+++ b/mlir/test/Transforms/canonicalize-dce.mlir
@@ -137,10 +137,10 @@ func.func @f(%arg0: f32) {
// Test case: Test the mechanics of deleting multiple block arguments.
// CHECK: func @f(%arg0: tensor<1xf32>, %arg1: tensor<2xf32>, %arg2: tensor<3xf32>, %arg3: tensor<4xf32>, %arg4: tensor<5xf32>)
-// CHECK-NEXT: "test.br"()[^bb1]
-// CHECK-NEXT: ^bb1:
-// CHECK-NEXT: "foo.print"(%arg1)
-// CHECK-NEXT: "foo.print"(%arg3)
+// CHECK-NEXT: "test.br"(%arg1, %arg3)[^bb1] : (tensor<2xf32>, tensor<4xf32>)
+// CHECK-NEXT: ^bb1([[VAL0:%.+]]: tensor<2xf32>, [[VAL1:%.+]]: tensor<4xf32>):
+// CHECK-NEXT: "foo.print"([[VAL0]])
+// CHECK-NEXT: "foo.print"([[VAL1]])
// CHECK-NEXT: return
diff --git a/mlir/test/Transforms/make-isolated-from-above.mlir b/mlir/test/Transforms/make-isolated-from-above.mlir
index a9d4325944fd9..58f6cfbc5dd65 100644
--- a/mlir/test/Transforms/make-isolated-from-above.mlir
+++ b/mlir/test/Transforms/make-isolated-from-above.mlir
@@ -78,9 +78,9 @@ func.func @make_isolated_from_above_multiple_blocks(%arg0 : index, %arg1 : index
// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[EMPTY]], %[[C1]]
// CHECK: test.isolated_one_region_op %[[ARG2]], %[[C0]], %[[C1]], %[[D0]], %[[D1]]
// CHECK-NEXT: ^bb0(%[[B0:[a-zA-Z0-9]+]]: index, %[[B1:[a-zA-Z0-9]+]]: index, %[[B2:[a-zA-Z0-9]+]]: index, %[[B3:[a-zA-Z0-9]+]]: index, %[[B4:[a-zA-Z0-9]+]]: index)
-// CHECK-NEXT: cf.br ^bb1
-// CHECK: ^bb1:
-// CHECK: "foo.yield"(%[[B1]], %[[B2]], %[[B3]], %[[B4]], %[[B0]])
+// CHECK-NEXT: cf.br ^bb1(%[[B0]] : index)
+// CHECK: ^bb1(%[[B5:.+]]: index)
+// CHECK: "foo.yield"(%[[B1]], %[[B2]], %[[B3]], %[[B4]], %[[B5]])
// CLONE1-LABEL: func @make_isolated_from_above_multiple_blocks(
// CLONE1-SAME: %[[ARG0:[a-zA-Z0-9]+]]: index
@@ -95,9 +95,9 @@ func.func @make_isolated_from_above_multiple_blocks(%arg0 : index, %arg1 : index
// CLONE1-NEXT: ^bb0(%[[B0:[a-zA-Z0-9]+]]: index, %[[B1:[a-zA-Z0-9]+]]: index, %[[B2:[a-zA-Z0-9]+]]: index)
// CLONE1-DAG: %[[C0_0:.+]] = arith.constant 0 : index
// CLONE1-DAG: %[[C1_0:.+]] = arith.constant 1 : index
-// CLONE1-NEXT: cf.br ^bb1
-// CLONE1: ^bb1:
-// CLONE1: "foo.yield"(%[[C0_0]], %[[C1_0]], %[[B1]], %[[B2]], %[[B0]])
+// CLONE1-NEXT: cf.br ^bb1(%[[B0]] : index)
+// CLONE1: ^bb1(%[[B3:.+]]: index)
+// CLONE1: "foo.yield"(%[[C0_0]], %[[C1_0]], %[[B1]], %[[B2]], %[[B3]])
// CLONE2-LABEL: func @make_isolated_from_above_multiple_blocks(
// CLONE2-SAME: %[[ARG0:[a-zA-Z0-9]+]]: index
@@ -110,6 +110,6 @@ func.func @make_isolated_from_above_multiple_blocks(%arg0 : index, %arg1 : index
// CLONE2-DAG: %[[EMPTY:.+]] = tensor.empty(%[[B1]], %[[B2]])
// CLONE2-DAG: %[[D0:.+]] = tensor.dim %[[EMPTY]], %[[C0]]
// CLONE2-DAG: %[[D1:.+]] = tensor.dim %[[EMPTY]], %[[C1]]
-// CLONE2-NEXT: cf.br ^bb1
-// CLONE2: ^bb1:
-// CLONE2: "foo.yield"(%[[C0]], %[[C1]], %[[D0]], %[[D1]], %[[B0]])
+// CLONE2-NEXT: cf.br ^bb1(%[[B0]] : index)
+// CLONE2: ^bb1(%[[B3:.+]]: index)
+// CLONE2: "foo.yield"(%[[C0]], %[[C1]], %[[D0]], %[[D1]], %[[B3]])
diff --git a/mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir b/mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir
deleted file mode 100644
index 570ff6905a04d..0000000000000
--- a/mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir
+++ /dev/null
@@ -1,76 +0,0 @@
- // RUN: mlir-opt -pass-pipeline='builtin.module(llvm.func(canonicalize{region-simplify=aggressive}))' %s | FileCheck %s
-
-llvm.func @foo(%arg0: i64)
-
-llvm.func @rand() -> i1
-
-// CHECK-LABEL: func @large_merge_block(
-llvm.func @large_merge_block(%arg0: i64) {
- // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
- // CHECK: %[[C3:.*]] = llvm.mlir.constant(3 : i64) : i64
- // CHECK: %[[C4:.*]] = llvm.mlir.constant(4 : i64) : i64
-
- // CHECK: llvm.cond_br %5, ^bb1(%[[C1]], %[[C3]], %[[C4]], %[[C2]] : i64, i64, i64, i64), ^bb1(%[[C4]], %[[C2]], %[[C1]], %[[C3]] : i64, i64, i64, i64)
- // CHECK: ^bb{{.*}}(%[[arg0:.*]]: i64, %[[arg1:.*]]: i64, %[[arg2:.*]]: i64, %[[arg3:.*]]: i64):
- // CHECK: llvm.cond_br %{{.*}}, ^bb2(%[[arg0]] : i64), ^bb2(%[[arg3]] : i64)
- // CHECK: ^bb{{.*}}(%11: i64):
- // CHECK: llvm.br ^bb{{.*}}
- // CHECK: ^bb{{.*}}:
- // CHECK: llvm.call
- // CHECK: llvm.cond_br {{.*}}, ^bb{{.*}}(%[[arg1]] : i64), ^bb{{.*}}(%[[arg2]] : i64)
- // CHECK: ^bb{{.*}}:
- // CHECK: llvm.call
- // CHECK llvm.br ^bb{{.*}}
-
- %0 = llvm.mlir.constant(0 : i64) : i64
- %1 = llvm.mlir.constant(1 : i64) : i64
- %2 = llvm.mlir.constant(2 : i64) : i64
- %3 = llvm.mlir.constant(3 : i64) : i64
- %4 = llvm.mlir.constant(4 : i64) : i64
- %10 = llvm.icmp "eq" %arg0, %0 : i64
- llvm.cond_br %10, ^bb1, ^bb14
-^bb1: // pred: ^bb0
- %11 = llvm.call @rand() : () -> i1
- llvm.cond_br %11, ^bb2, ^bb3
-^bb2: // pred: ^bb1
- llvm.call @foo(%1) : (i64) -> ()
- llvm.br ^bb4
-^bb3: // pred: ^bb1
- llvm.call @foo(%2) : (i64) -> ()
- llvm.br ^bb4
-^bb4: // 2 preds: ^bb2, ^bb3
- %14 = llvm.call @rand() : () -> i1
- llvm.cond_br %14, ^bb5, ^bb6
-^bb5: // pred: ^bb4
- llvm.call @foo(%3) : (i64) -> ()
- llvm.br ^bb13
-^bb6: // pred: ^bb4
- llvm.call @foo(%4) : (i64) -> ()
- llvm.br ^bb13
-^bb13: // 2 preds: ^bb11, ^bb12
- llvm.br ^bb27
-^bb14: // pred: ^bb0
- %23 = llvm.call @rand() : () -> i1
- llvm.cond_br %23, ^bb15, ^bb16
-^bb15: // pred: ^bb14
- llvm.call @foo(%4) : (i64) -> ()
- llvm.br ^bb17
-^bb16: // pred: ^bb14
- llvm.call @foo(%3) : (i64) -> ()
- llvm.br ^bb17
-^bb17: // 2 preds: ^bb15, ^bb16
- %26 = llvm.call @rand() : () -> i1
- llvm.cond_br %26, ^bb18, ^bb19
-^bb18: // pred: ^bb17
- llvm.call @foo(%2) : (i64) -> ()
- llvm.br ^bb26
-^bb19: // pred: ^bb17
- llvm.call @foo(%1) : (i64) -> ()
- llvm.br ^bb26
-^bb26: // 2 preds: ^bb24, ^bb25
- llvm.br ^bb27
-^bb27: // 2 preds: ^bb13, ^bb26
- llvm.return
-}
>From e25da69c132b2829a90a0fff6764cf27ea30a6d3 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 2 Jul 2024 11:59:00 -0700
Subject: [PATCH 030/246] [LangRef] Rename 'operand' to 'argument' in
descriptions of VP intrinsics. NFC (#97437)
Fixes inconsistencies noted in #97386 and #97387.
There are still more intrinsics that have the same issue. I might submit
more patches for those.
---
llvm/docs/LangRef.rst | 833 +++++++++++++++++++++---------------------
1 file changed, 418 insertions(+), 415 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index b6f8a24937df6..1dc97d45c0953 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -20064,10 +20064,10 @@ condition vector, without IR-level branching.
Arguments:
""""""""""
-The first operand is a vector of ``i1`` and indicates the condition. The
-second operand is the value that is selected where the condition vector is
-true. The third operand is the value that is selected where the condition
-vector is false. The vectors must be of the same size. The fourth operand is
+The first argument is a vector of ``i1`` and indicates the condition. The
+second argument is the value that is selected where the condition vector is
+true. The third argument is the value that is selected where the condition
+vector is false. The vectors must be of the same size. The fourth argument is
the explicit vector length.
#. The optional ``fast-math flags`` marker indicates that the select has one or
@@ -20079,13 +20079,13 @@ the explicit vector length.
Semantics:
""""""""""
-The intrinsic selects lanes from the second and third operand depending on a
+The intrinsic selects lanes from the second and third argument depending on a
condition vector.
All result lanes at positions greater or equal than ``%evl`` are undefined.
For all lanes below ``%evl`` where the condition vector is true the lane is
-taken from the second operand. Otherwise, the lane is taken from the third
-operand.
+taken from the second argument. Otherwise, the lane is taken from the third
+argument.
Example:
""""""""
@@ -20117,15 +20117,15 @@ Overview:
"""""""""
The '``llvm.vp.merge``' intrinsic is used to choose one value based on a
-condition vector and an index operand, without IR-level branching.
+condition vector and an index argument, without IR-level branching.
Arguments:
""""""""""
-The first operand is a vector of ``i1`` and indicates the condition. The
-second operand is the value that is merged where the condition vector is true.
-The third operand is the value that is selected where the condition vector is
-false or the lane position is greater equal than the pivot. The fourth operand
+The first argument is a vector of ``i1`` and indicates the condition. The
+second argument is the value that is merged where the condition vector is true.
+The third argument is the value that is selected where the condition vector is
+false or the lane position is greater equal than the pivot. The fourth argument
is the pivot.
#. The optional ``fast-math flags`` marker indicates that the merge has one or
@@ -20137,12 +20137,12 @@ is the pivot.
Semantics:
""""""""""
-The intrinsic selects lanes from the second and third operand depending on a
+The intrinsic selects lanes from the second and third argument depending on a
condition vector and pivot value.
For all lanes where the condition vector is true and the lane position is less
-than ``%pivot`` the lane is taken from the second operand. Otherwise, the lane
-is taken from the third operand.
+than ``%pivot`` the lane is taken from the second argument. Otherwise, the lane
+is taken from the third argument.
Example:
""""""""
@@ -20185,16 +20185,16 @@ Predicated integer addition of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.add``' intrinsic performs integer addition (:ref:`add <i_add>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20232,16 +20232,16 @@ Predicated integer subtraction of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.sub``' intrinsic performs integer subtraction
-(:ref:`sub <i_sub>`) of the first and second vector operand on each enabled
+(:ref:`sub <i_sub>`) of the first and second vector arguments on each enabled
lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20281,15 +20281,15 @@ Predicated integer multiplication of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.mul``' intrinsic performs integer multiplication
-(:ref:`mul <i_mul>`) of the first and second vector operand on each enabled
+(:ref:`mul <i_mul>`) of the first and second vector arguments on each enabled
lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20328,16 +20328,16 @@ Predicated, signed division of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.sdiv``' intrinsic performs signed division (:ref:`sdiv <i_sdiv>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20376,13 +20376,16 @@ Predicated, unsigned division of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The third operand is the vector mask and has the same number of elements as the result vector type. The fourth operand is the explicit vector length of the operation.
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
+operation.
Semantics:
""""""""""
The '``llvm.vp.udiv``' intrinsic performs unsigned division
-(:ref:`udiv <i_udiv>`) of the first and second vector operand on each enabled
+(:ref:`udiv <i_udiv>`) of the first and second vector arguments on each enabled
lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20422,16 +20425,16 @@ Predicated computations of the signed remainder of two integer vectors.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.srem``' intrinsic computes the remainder of the signed division
-(:ref:`srem <i_srem>`) of the first and second vector operand on each enabled
+(:ref:`srem <i_srem>`) of the first and second vector arguments on each enabled
lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20471,16 +20474,16 @@ Predicated computation of the unsigned remainder of two integer vectors.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.urem``' intrinsic computes the remainder of the unsigned division
-(:ref:`urem <i_urem>`) of the first and second vector operand on each enabled
+(:ref:`urem <i_urem>`) of the first and second vector arguments on each enabled
lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20519,16 +20522,16 @@ Vector-predicated arithmetic right-shift.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.ashr``' intrinsic computes the arithmetic right shift
-(:ref:`ashr <i_ashr>`) of the first operand by the second operand on each
+(:ref:`ashr <i_ashr>`) of the first argument by the second argument on each
enabled lane. The result on disabled lanes is a
:ref:`poison value <poisonvalues>`.
@@ -20569,16 +20572,16 @@ Vector-predicated logical right-shift.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.lshr``' intrinsic computes the logical right shift
-(:ref:`lshr <i_lshr>`) of the first operand by the second operand on each
+(:ref:`lshr <i_lshr>`) of the first argument by the second argument on each
enabled lane. The result on disabled lanes is a
:ref:`poison value <poisonvalues>`.
@@ -20618,16 +20621,16 @@ Vector-predicated left shift.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.shl``' intrinsic computes the left shift (:ref:`shl <i_shl>`) of
-the first operand by the second operand on each enabled lane. The result on
+the first argument by the second argument on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20666,16 +20669,16 @@ Vector-predicated or.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.or``' intrinsic performs a bitwise or (:ref:`or <i_or>`) of the
-first two operands on each enabled lane. The result on disabled lanes is
+first two arguments on each enabled lane. The result on disabled lanes is
a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20714,16 +20717,16 @@ Vector-predicated and.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.and``' intrinsic performs a bitwise and (:ref:`and <i_or>`) of
-the first two operands on each enabled lane. The result on disabled lanes is
+the first two arguments on each enabled lane. The result on disabled lanes is
a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20762,16 +20765,16 @@ Vector-predicated, bitwise xor.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.xor``' intrinsic performs a bitwise xor (:ref:`xor <i_xor>`) of
-the first two operands on each enabled lane.
+the first two arguments on each enabled lane.
The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20809,10 +20812,10 @@ Predicated abs of a vector of integers.
Arguments:
""""""""""
-The first operand and the result have the same vector of integer type. The
-second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
-operation. The fourth operand must be a constant and is a flag to indicate
+The first argument and the result have the same vector of integer type. The
+second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
+operation. The fourth argument must be a constant and is a flag to indicate
whether the result value of the '``llvm.vp.abs``' intrinsic is a
:ref:`poison value <poisonvalues>` if the first argument is statically or
dynamically an ``INT_MIN`` value.
@@ -20820,7 +20823,7 @@ dynamically an ``INT_MIN`` value.
Semantics:
""""""""""
-The '``llvm.vp.abs``' intrinsic performs abs (:ref:`abs <int_abs>`) of the first operand on each
+The '``llvm.vp.abs``' intrinsic performs abs (:ref:`abs <int_abs>`) of the first argument on each
enabled lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20860,16 +20863,16 @@ Predicated integer signed maximum of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.smax``' intrinsic performs integer signed maximum (:ref:`smax <int_smax>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20908,16 +20911,16 @@ Predicated integer signed minimum of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.smin``' intrinsic performs integer signed minimum (:ref:`smin <int_smin>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -20956,16 +20959,16 @@ Predicated integer unsigned maximum of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.umax``' intrinsic performs integer unsigned maximum (:ref:`umax <int_umax>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -21004,16 +21007,16 @@ Predicated integer unsigned minimum of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.umin``' intrinsic performs integer unsigned minimum (:ref:`umin <int_umin>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -21052,16 +21055,16 @@ Predicated floating-point copysign of two vectors of floating-point values.
Arguments:
""""""""""
-The first two operands and the result have the same vector of floating-point type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of floating-point type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.copysign``' intrinsic performs floating-point copysign (:ref:`copysign <int_copysign>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`. The operation is
performed in the default floating-point environment.
@@ -21101,16 +21104,16 @@ Predicated floating-point IEEE-754 minNum of two vectors of floating-point value
Arguments:
""""""""""
-The first two operands and the result have the same vector of floating-point type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of floating-point type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.minnum``' intrinsic performs floating-point minimum (:ref:`minnum <i_minnum>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`. The operation is
performed in the default floating-point environment.
@@ -21150,16 +21153,16 @@ Predicated floating-point IEEE-754 maxNum of two vectors of floating-point value
Arguments:
""""""""""
-The first two operands and the result have the same vector of floating-point type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of floating-point type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.maxnum``' intrinsic performs floating-point maximum (:ref:`maxnum <i_maxnum>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`. The operation is
performed in the default floating-point environment.
@@ -21199,17 +21202,17 @@ propagating NaNs and treating -0.0 as less than +0.0.
Arguments:
""""""""""
-The first two operands and the result have the same vector of floating-point type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of floating-point type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.minimum``' intrinsic performs floating-point minimum (:ref:`minimum <i_minimum>`)
-of the first and second vector operand on each enabled lane, the result being
-NaN if either operand is a NaN. -0.0 is considered to be less than +0.0 for this
+of the first and second vector arguments on each enabled lane, the result being
+NaN if either argument is a NaN. -0.0 is considered to be less than +0.0 for this
intrinsic. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
The operation is performed in the default floating-point environment.
@@ -21249,18 +21252,18 @@ propagating NaNs and treating -0.0 as less than +0.0.
Arguments:
""""""""""
-The first two operands and the result have the same vector of floating-point type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of floating-point type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.maximum``' intrinsic performs floating-point maximum (:ref:`maximum <i_maximum>`)
-of the first and second vector operand on each enabled lane, the result being
-NaN if either operand is a NaN. -0.0 is considered to be less than +0.0 for this
-intrinsic. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
+of the first and second vector arguments on each enabled lane, the result being
+NaN if either argument is a NaN. -0.0 is considered to be less than +0.0 for this
+intrinsic. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
The operation is performed in the default floating-point environment.
Examples:
@@ -21299,16 +21302,16 @@ Predicated floating-point addition of two vectors of floating-point values.
Arguments:
""""""""""
-The first two operands and the result have the same vector of floating-point type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of floating-point type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.fadd``' intrinsic performs floating-point addition (:ref:`fadd <i_fadd>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`. The operation is
performed in the default floating-point environment.
@@ -21348,16 +21351,16 @@ Predicated floating-point subtraction of two vectors of floating-point values.
Arguments:
""""""""""
-The first two operands and the result have the same vector of floating-point type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of floating-point type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.fsub``' intrinsic performs floating-point subtraction (:ref:`fsub <i_fsub>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`. The operation is
performed in the default floating-point environment.
@@ -21397,16 +21400,16 @@ Predicated floating-point multiplication of two vectors of floating-point values
Arguments:
""""""""""
-The first two operands and the result have the same vector of floating-point type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of floating-point type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.fmul``' intrinsic performs floating-point multiplication (:ref:`fmul <i_fmul>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`. The operation is
performed in the default floating-point environment.
@@ -21446,16 +21449,16 @@ Predicated floating-point division of two vectors of floating-point values.
Arguments:
""""""""""
-The first two operands and the result have the same vector of floating-point type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of floating-point type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.fdiv``' intrinsic performs floating-point division (:ref:`fdiv <i_fdiv>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`. The operation is
performed in the default floating-point environment.
@@ -21495,16 +21498,16 @@ Predicated floating-point remainder of two vectors of floating-point values.
Arguments:
""""""""""
-The first two operands and the result have the same vector of floating-point type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of floating-point type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.frem``' intrinsic performs floating-point remainder (:ref:`frem <i_frem>`)
-of the first and second vector operand on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`. The operation is
performed in the default floating-point environment.
@@ -21544,16 +21547,16 @@ Predicated floating-point negation of a vector of floating-point values.
Arguments:
""""""""""
-The first operand and the result have the same vector of floating-point type.
-The second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of floating-point type.
+The second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.fneg``' intrinsic performs floating-point negation (:ref:`fneg <i_fneg>`)
-of the first vector operand on each enabled lane. The result on disabled lanes
+of the first vector argument on each enabled lane. The result on disabled lanes
is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -21592,16 +21595,16 @@ Predicated floating-point absolute value of a vector of floating-point values.
Arguments:
""""""""""
-The first operand and the result have the same vector of floating-point type.
-The second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of floating-point type.
+The second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.fabs``' intrinsic performs floating-point absolute value
-(:ref:`fabs <int_fabs>`) of the first vector operand on each enabled lane. The
+(:ref:`fabs <int_fabs>`) of the first vector argument on each enabled lane. The
result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -21640,16 +21643,16 @@ Predicated floating-point square root of a vector of floating-point values.
Arguments:
""""""""""
-The first operand and the result have the same vector of floating-point type.
-The second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of floating-point type.
+The second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.sqrt``' intrinsic performs floating-point square root (:ref:`sqrt <int_sqrt>`) of
-the first vector operand on each enabled lane. The result on disabled lanes is
+the first vector argument on each enabled lane. The result on disabled lanes is
a :ref:`poison value <poisonvalues>`. The operation is performed in the default
floating-point environment.
@@ -21689,16 +21692,16 @@ Predicated floating-point fused multiply-add of two vectors of floating-point va
Arguments:
""""""""""
-The first three operands and the result have the same vector of floating-point type. The
-fourth operand is the vector mask and has the same number of elements as the
-result vector type. The fifth operand is the explicit vector length of the
+The first three arguments and the result have the same vector of floating-point type. The
+fourth argument is the vector mask and has the same number of elements as the
+result vector type. The fifth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.fma``' intrinsic performs floating-point fused multiply-add (:ref:`llvm.fma <int_fma>`)
-of the first, second, and third vector operand on each enabled lane. The result on
+of the first, second, and third vector argument on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`. The operation is
performed in the default floating-point environment.
@@ -21740,16 +21743,16 @@ efficient than the equivalent, separate pair of mul and add instructions.
Arguments:
""""""""""
-The first three operands and the result have the same vector of floating-point
-type. The fourth operand is the vector mask and has the same number of elements
-as the result vector type. The fifth operand is the explicit vector length of
+The first three arguments and the result have the same vector of floating-point
+type. The fourth argument is the vector mask and has the same number of elements
+as the result vector type. The fifth argument is the explicit vector length of
the operation.
Semantics:
""""""""""
The '``llvm.vp.fmuladd``' intrinsic performs floating-point multiply-add (:ref:`llvm.fuladd <int_fmuladd>`)
-of the first, second, and third vector operand on each enabled lane. The result
+of the first, second, and third vector argument on each enabled lane. The result
on disabled lanes is a :ref:`poison value <poisonvalues>`. The operation is
performed in the default floating-point environment.
@@ -21788,18 +21791,18 @@ returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-integer type equal to the result type. The second operand is the vector on
+The first argument is the start value of the reduction, which must be a scalar
+integer type equal to the result type. The second argument is the vector on
which the reduction is performed and must be a vector of integer values whose
-element type is the result/start type. The third operand is the vector mask and
+element type is the result/start type. The third argument is the vector mask and
is a vector of boolean values with the same number of elements as the vector
-operand. The fourth operand is the explicit vector length of the operation.
+argument. The fourth argument is the explicit vector length of the operation.
Semantics:
""""""""""
The '``llvm.vp.reduce.add``' intrinsic performs the integer ``ADD`` reduction
-(:ref:`llvm.vector.reduce.add <int_vector_reduce_add>`) of the vector operand
+(:ref:`llvm.vector.reduce.add <int_vector_reduce_add>`) of the vector argument
``val`` on each enabled lane, adding it to the scalar ``start_value``. Disabled
lanes are treated as containing the neutral value ``0`` (i.e. having no effect
on the reduction operation). If the vector length is zero, the result is equal
@@ -21844,12 +21847,12 @@ value, returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-floating-point type equal to the result type. The second operand is the vector
+The first argument is the start value of the reduction, which must be a scalar
+floating-point type equal to the result type. The second argument is the vector
on which the reduction is performed and must be a vector of floating-point
-values whose element type is the result/start type. The third operand is the
+values whose element type is the result/start type. The third argument is the
vector mask and is a vector of boolean values with the same number of elements
-as the vector operand. The fourth operand is the explicit vector length of the
+as the vector argument. The fourth argument is the explicit vector length of the
operation.
Semantics:
@@ -21857,7 +21860,7 @@ Semantics:
The '``llvm.vp.reduce.fadd``' intrinsic performs the floating-point ``ADD``
reduction (:ref:`llvm.vector.reduce.fadd <int_vector_reduce_fadd>`) of the
-vector operand ``val`` on each enabled lane, adding it to the scalar
+vector argument ``val`` on each enabled lane, adding it to the scalar
``start_value``. Disabled lanes are treated as containing the neutral value
``-0.0`` (i.e. having no effect on the reduction operation). If no lanes are
enabled, the resulting value will be equal to ``start_value``.
@@ -21904,18 +21907,18 @@ returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-integer type equal to the result type. The second operand is the vector on
+The first argument is the start value of the reduction, which must be a scalar
+integer type equal to the result type. The second argument is the vector on
which the reduction is performed and must be a vector of integer values whose
-element type is the result/start type. The third operand is the vector mask and
+element type is the result/start type. The third argument is the vector mask and
is a vector of boolean values with the same number of elements as the vector
-operand. The fourth operand is the explicit vector length of the operation.
+argument. The fourth argument is the explicit vector length of the operation.
Semantics:
""""""""""
The '``llvm.vp.reduce.mul``' intrinsic performs the integer ``MUL`` reduction
-(:ref:`llvm.vector.reduce.mul <int_vector_reduce_mul>`) of the vector operand ``val``
+(:ref:`llvm.vector.reduce.mul <int_vector_reduce_mul>`) of the vector argument ``val``
on each enabled lane, multiplying it by the scalar ``start_value``. Disabled
lanes are treated as containing the neutral value ``1`` (i.e. having no effect
on the reduction operation). If the vector length is zero, the result is the
@@ -21960,12 +21963,12 @@ value, returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-floating-point type equal to the result type. The second operand is the vector
+The first argument is the start value of the reduction, which must be a scalar
+floating-point type equal to the result type. The second argument is the vector
on which the reduction is performed and must be a vector of floating-point
-values whose element type is the result/start type. The third operand is the
+values whose element type is the result/start type. The third argument is the
vector mask and is a vector of boolean values with the same number of elements
-as the vector operand. The fourth operand is the explicit vector length of the
+as the vector argument. The fourth argument is the explicit vector length of the
operation.
Semantics:
@@ -21973,7 +21976,7 @@ Semantics:
The '``llvm.vp.reduce.fmul``' intrinsic performs the floating-point ``MUL``
reduction (:ref:`llvm.vector.reduce.fmul <int_vector_reduce_fmul>`) of the
-vector operand ``val`` on each enabled lane, multiplying it by the scalar
+vector argument ``val`` on each enabled lane, multiplying it by the scalar
`start_value``. Disabled lanes are treated as containing the neutral value
``1.0`` (i.e. having no effect on the reduction operation). If no lanes are
enabled, the resulting value will be equal to the starting value.
@@ -22020,18 +22023,18 @@ returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-integer type equal to the result type. The second operand is the vector on
+The first argument is the start value of the reduction, which must be a scalar
+integer type equal to the result type. The second argument is the vector on
which the reduction is performed and must be a vector of integer values whose
-element type is the result/start type. The third operand is the vector mask and
+element type is the result/start type. The third argument is the vector mask and
is a vector of boolean values with the same number of elements as the vector
-operand. The fourth operand is the explicit vector length of the operation.
+argument. The fourth argument is the explicit vector length of the operation.
Semantics:
""""""""""
The '``llvm.vp.reduce.and``' intrinsic performs the integer ``AND`` reduction
-(:ref:`llvm.vector.reduce.and <int_vector_reduce_and>`) of the vector operand
+(:ref:`llvm.vector.reduce.and <int_vector_reduce_and>`) of the vector argument
``val`` on each enabled lane, performing an '``and``' of that with with the
scalar ``start_value``. Disabled lanes are treated as containing the neutral
value ``UINT_MAX``, or ``-1`` (i.e. having no effect on the reduction
@@ -22077,18 +22080,18 @@ returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-integer type equal to the result type. The second operand is the vector on
+The first argument is the start value of the reduction, which must be a scalar
+integer type equal to the result type. The second argument is the vector on
which the reduction is performed and must be a vector of integer values whose
-element type is the result/start type. The third operand is the vector mask and
+element type is the result/start type. The third argument is the vector mask and
is a vector of boolean values with the same number of elements as the vector
-operand. The fourth operand is the explicit vector length of the operation.
+argument. The fourth argument is the explicit vector length of the operation.
Semantics:
""""""""""
The '``llvm.vp.reduce.or``' intrinsic performs the integer ``OR`` reduction
-(:ref:`llvm.vector.reduce.or <int_vector_reduce_or>`) of the vector operand
+(:ref:`llvm.vector.reduce.or <int_vector_reduce_or>`) of the vector argument
``val`` on each enabled lane, performing an '``or``' of that with the scalar
``start_value``. Disabled lanes are treated as containing the neutral value
``0`` (i.e. having no effect on the reduction operation). If the vector length
@@ -22133,18 +22136,18 @@ returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-integer type equal to the result type. The second operand is the vector on
+The first argument is the start value of the reduction, which must be a scalar
+integer type equal to the result type. The second argument is the vector on
which the reduction is performed and must be a vector of integer values whose
-element type is the result/start type. The third operand is the vector mask and
+element type is the result/start type. The third argument is the vector mask and
is a vector of boolean values with the same number of elements as the vector
-operand. The fourth operand is the explicit vector length of the operation.
+argument. The fourth argument is the explicit vector length of the operation.
Semantics:
""""""""""
The '``llvm.vp.reduce.xor``' intrinsic performs the integer ``XOR`` reduction
-(:ref:`llvm.vector.reduce.xor <int_vector_reduce_xor>`) of the vector operand
+(:ref:`llvm.vector.reduce.xor <int_vector_reduce_xor>`) of the vector argument
``val`` on each enabled lane, performing an '``xor``' of that with the scalar
``start_value``. Disabled lanes are treated as containing the neutral value
``0`` (i.e. having no effect on the reduction operation). If the vector length
@@ -22190,19 +22193,19 @@ value, returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-integer type equal to the result type. The second operand is the vector on
+The first argument is the start value of the reduction, which must be a scalar
+integer type equal to the result type. The second argument is the vector on
which the reduction is performed and must be a vector of integer values whose
-element type is the result/start type. The third operand is the vector mask and
+element type is the result/start type. The third argument is the vector mask and
is a vector of boolean values with the same number of elements as the vector
-operand. The fourth operand is the explicit vector length of the operation.
+argument. The fourth argument is the explicit vector length of the operation.
Semantics:
""""""""""
The '``llvm.vp.reduce.smax``' intrinsic performs the signed-integer ``MAX``
reduction (:ref:`llvm.vector.reduce.smax <int_vector_reduce_smax>`) of the
-vector operand ``val`` on each enabled lane, and taking the maximum of that and
+vector argument ``val`` on each enabled lane, and taking the maximum of that and
the scalar ``start_value``. Disabled lanes are treated as containing the
neutral value ``INT_MIN`` (i.e. having no effect on the reduction operation).
If the vector length is zero, the result is the start value.
@@ -22247,19 +22250,19 @@ value, returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-integer type equal to the result type. The second operand is the vector on
+The first argument is the start value of the reduction, which must be a scalar
+integer type equal to the result type. The second argument is the vector on
which the reduction is performed and must be a vector of integer values whose
-element type is the result/start type. The third operand is the vector mask and
+element type is the result/start type. The third argument is the vector mask and
is a vector of boolean values with the same number of elements as the vector
-operand. The fourth operand is the explicit vector length of the operation.
+argument. The fourth argument is the explicit vector length of the operation.
Semantics:
""""""""""
The '``llvm.vp.reduce.smin``' intrinsic performs the signed-integer ``MIN``
reduction (:ref:`llvm.vector.reduce.smin <int_vector_reduce_smin>`) of the
-vector operand ``val`` on each enabled lane, and taking the minimum of that and
+vector argument ``val`` on each enabled lane, and taking the minimum of that and
the scalar ``start_value``. Disabled lanes are treated as containing the
neutral value ``INT_MAX`` (i.e. having no effect on the reduction operation).
If the vector length is zero, the result is the start value.
@@ -22304,19 +22307,19 @@ value, returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-integer type equal to the result type. The second operand is the vector on
+The first argument is the start value of the reduction, which must be a scalar
+integer type equal to the result type. The second argument is the vector on
which the reduction is performed and must be a vector of integer values whose
-element type is the result/start type. The third operand is the vector mask and
+element type is the result/start type. The third argument is the vector mask and
is a vector of boolean values with the same number of elements as the vector
-operand. The fourth operand is the explicit vector length of the operation.
+argument. The fourth argument is the explicit vector length of the operation.
Semantics:
""""""""""
The '``llvm.vp.reduce.umax``' intrinsic performs the unsigned-integer ``MAX``
reduction (:ref:`llvm.vector.reduce.umax <int_vector_reduce_umax>`) of the
-vector operand ``val`` on each enabled lane, and taking the maximum of that and
+vector argument ``val`` on each enabled lane, and taking the maximum of that and
the scalar ``start_value``. Disabled lanes are treated as containing the
neutral value ``0`` (i.e. having no effect on the reduction operation). If the
vector length is zero, the result is the start value.
@@ -22361,19 +22364,19 @@ value, returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-integer type equal to the result type. The second operand is the vector on
+The first argument is the start value of the reduction, which must be a scalar
+integer type equal to the result type. The second argument is the vector on
which the reduction is performed and must be a vector of integer values whose
-element type is the result/start type. The third operand is the vector mask and
+element type is the result/start type. The third argument is the vector mask and
is a vector of boolean values with the same number of elements as the vector
-operand. The fourth operand is the explicit vector length of the operation.
+argument. The fourth argument is the explicit vector length of the operation.
Semantics:
""""""""""
The '``llvm.vp.reduce.umin``' intrinsic performs the unsigned-integer ``MIN``
reduction (:ref:`llvm.vector.reduce.umin <int_vector_reduce_umin>`) of the
-vector operand ``val`` on each enabled lane, taking the minimum of that and the
+vector argument ``val`` on each enabled lane, taking the minimum of that and the
scalar ``start_value``. Disabled lanes are treated as containing the neutral
value ``UINT_MAX``, or ``-1`` (i.e. having no effect on the reduction
operation). If the vector length is zero, the result is the start value.
@@ -22418,12 +22421,12 @@ value, returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-floating-point type equal to the result type. The second operand is the vector
+The first argument is the start value of the reduction, which must be a scalar
+floating-point type equal to the result type. The second argument is the vector
on which the reduction is performed and must be a vector of floating-point
-values whose element type is the result/start type. The third operand is the
+values whose element type is the result/start type. The third argument is the
vector mask and is a vector of boolean values with the same number of elements
-as the vector operand. The fourth operand is the explicit vector length of the
+as the vector argument. The fourth argument is the explicit vector length of the
operation.
Semantics:
@@ -22431,7 +22434,7 @@ Semantics:
The '``llvm.vp.reduce.fmax``' intrinsic performs the floating-point ``MAX``
reduction (:ref:`llvm.vector.reduce.fmax <int_vector_reduce_fmax>`) of the
-vector operand ``val`` on each enabled lane, taking the maximum of that and the
+vector argument ``val`` on each enabled lane, taking the maximum of that and the
scalar ``start_value``. Disabled lanes are treated as containing the neutral
value (i.e. having no effect on the reduction operation). If the vector length
is zero, the result is the start value.
@@ -22488,12 +22491,12 @@ value, returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-floating-point type equal to the result type. The second operand is the vector
+The first argument is the start value of the reduction, which must be a scalar
+floating-point type equal to the result type. The second argument is the vector
on which the reduction is performed and must be a vector of floating-point
-values whose element type is the result/start type. The third operand is the
+values whose element type is the result/start type. The third argument is the
vector mask and is a vector of boolean values with the same number of elements
-as the vector operand. The fourth operand is the explicit vector length of the
+as the vector argument. The fourth argument is the explicit vector length of the
operation.
Semantics:
@@ -22501,7 +22504,7 @@ Semantics:
The '``llvm.vp.reduce.fmin``' intrinsic performs the floating-point ``MIN``
reduction (:ref:`llvm.vector.reduce.fmin <int_vector_reduce_fmin>`) of the
-vector operand ``val`` on each enabled lane, taking the minimum of that and the
+vector argument ``val`` on each enabled lane, taking the minimum of that and the
scalar ``start_value``. Disabled lanes are treated as containing the neutral
value (i.e. having no effect on the reduction operation). If the vector length
is zero, the result is the start value.
@@ -22558,12 +22561,12 @@ value, returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-floating-point type equal to the result type. The second operand is the vector
+The first argument is the start value of the reduction, which must be a scalar
+floating-point type equal to the result type. The second argument is the vector
on which the reduction is performed and must be a vector of floating-point
-values whose element type is the result/start type. The third operand is the
+values whose element type is the result/start type. The third argument is the
vector mask and is a vector of boolean values with the same number of elements
-as the vector operand. The fourth operand is the explicit vector length of the
+as the vector argument. The fourth argument is the explicit vector length of the
operation.
Semantics:
@@ -22571,7 +22574,7 @@ Semantics:
The '``llvm.vp.reduce.fmaximum``' intrinsic performs the floating-point ``MAX``
reduction (:ref:`llvm.vector.reduce.fmaximum <int_vector_reduce_fmaximum>`) of
-the vector operand ``val`` on each enabled lane, taking the maximum of that and
+the vector argument ``val`` on each enabled lane, taking the maximum of that and
the scalar ``start_value``. Disabled lanes are treated as containing the
neutral value (i.e. having no effect on the reduction operation). If the vector
length is zero, the result is the start value.
@@ -22628,12 +22631,12 @@ value, returning the result as a scalar.
Arguments:
""""""""""
-The first operand is the start value of the reduction, which must be a scalar
-floating-point type equal to the result type. The second operand is the vector
+The first argument is the start value of the reduction, which must be a scalar
+floating-point type equal to the result type. The second argument is the vector
on which the reduction is performed and must be a vector of floating-point
-values whose element type is the result/start type. The third operand is the
+values whose element type is the result/start type. The third argument is the
vector mask and is a vector of boolean values with the same number of elements
-as the vector operand. The fourth operand is the explicit vector length of the
+as the vector argument. The fourth argument is the explicit vector length of the
operation.
Semantics:
@@ -22641,7 +22644,7 @@ Semantics:
The '``llvm.vp.reduce.fminimum``' intrinsic performs the floating-point ``MIN``
reduction (:ref:`llvm.vector.reduce.fminimum <int_vector_reduce_fminimum>`) of
-the vector operand ``val`` on each enabled lane, taking the minimum of that and
+the vector argument ``val`` on each enabled lane, taking the minimum of that and
the scalar ``start_value``. Disabled lanes are treated as containing the neutral
value (i.e. having no effect on the reduction operation). If the vector length
is zero, the result is the start value.
@@ -22700,7 +22703,7 @@ Create a mask representing active and inactive vector lanes.
Arguments:
""""""""""
-Both operands have the same scalar integer type. The result is a vector with
+Both arguments have the same scalar integer type. The result is a vector with
the i1 element type.
Semantics:
@@ -22869,23 +22872,23 @@ the :ref:`llvm.masked.load <int_mload>` intrinsic.
Arguments:
""""""""""
-The first operand is the base pointer for the load. The second operand is a
+The first argument is the base pointer for the load. The second argument is a
vector of boolean values with the same number of elements as the return type.
The third is the explicit vector length of the operation. The return type and
underlying type of the base pointer are the same vector types.
The :ref:`align <attr_align>` parameter attribute can be provided for the first
-operand.
+argument.
Semantics:
""""""""""
The '``llvm.vp.load``' intrinsic reads a vector from memory in the same way as
the '``llvm.masked.load``' intrinsic, where the mask is taken from the
-combination of the '``mask``' and '``evl``' operands in the usual VP way.
-Certain '``llvm.masked.load``' operands do not have corresponding operands in
-'``llvm.vp.load``': the '``passthru``' operand is implicitly ``poison``; the
-'``alignment``' operand is taken as the ``align`` parameter attribute, if
+combination of the '``mask``' and '``evl``' arguments in the usual VP way.
+Certain '``llvm.masked.load``' arguments do not have corresponding arguments in
+'``llvm.vp.load``': the '``passthru``' argument is implicitly ``poison``; the
+'``alignment``' argument is taken as the ``align`` parameter attribute, if
provided. The default alignment is taken as the ABI alignment of the return
type as specified by the :ref:`datalayout string<langref_datalayout>`.
@@ -22925,25 +22928,25 @@ the :ref:`llvm.masked.store <int_mstore>` intrinsic.
Arguments:
""""""""""
-The first operand is the vector value to be written to memory. The second
-operand is the base pointer for the store. It has the same underlying type as
-the value operand. The third operand is a vector of boolean values with the
+The first argument is the vector value to be written to memory. The second
+argument is the base pointer for the store. It has the same underlying type as
+the value argument. The third argument is a vector of boolean values with the
same number of elements as the return type. The fourth is the explicit vector
length of the operation.
The :ref:`align <attr_align>` parameter attribute can be provided for the
-second operand.
+second argument.
Semantics:
""""""""""
The '``llvm.vp.store``' intrinsic reads a vector from memory in the same way as
the '``llvm.masked.store``' intrinsic, where the mask is taken from the
-combination of the '``mask``' and '``evl``' operands in the usual VP way. The
-alignment of the operation (corresponding to the '``alignment``' operand of
+combination of the '``mask``' and '``evl``' arguments in the usual VP way. The
+alignment of the operation (corresponding to the '``alignment``' argument of
'``llvm.masked.store``') is specified by the ``align`` parameter attribute (see
above). If it is not provided then the ABI alignment of the type of the
-'``value``' operand as specified by the :ref:`datalayout
+'``value``' argument as specified by the :ref:`datalayout
string<langref_datalayout>` is used instead.
Examples:
@@ -22980,14 +22983,14 @@ memory locations evenly spaced apart by '``stride``' number of bytes, starting f
Arguments:
""""""""""
-The first operand is the base pointer for the load. The second operand is the stride
-value expressed in bytes. The third operand is a vector of boolean values
+The first argument is the base pointer for the load. The second argument is the stride
+value expressed in bytes. The third argument is a vector of boolean values
with the same number of elements as the return type. The fourth is the explicit
vector length of the operation. The base pointer underlying type matches the type of the scalar
-elements of the return operand.
+elements of the return argument.
The :ref:`align <attr_align>` parameter attribute can be provided for the first
-operand.
+argument.
Semantics:
""""""""""
@@ -23040,15 +23043,15 @@ bytes, starting from '``ptr``'.
Arguments:
""""""""""
-The first operand is the vector value to be written to memory. The second
-operand is the base pointer for the store. Its underlying type matches the
-scalar element type of the value operand. The third operand is the stride value
-expressed in bytes. The fourth operand is a vector of boolean values with the
+The first argument is the vector value to be written to memory. The second
+argument is the base pointer for the store. Its underlying type matches the
+scalar element type of the value argument. The third argument is the stride value
+expressed in bytes. The fourth argument is a vector of boolean values with the
same number of elements as the return type. The fifth is the explicit vector
length of the operation.
The :ref:`align <attr_align>` parameter attribute can be provided for the
-second operand.
+second argument.
Semantics:
""""""""""
@@ -23102,24 +23105,24 @@ the :ref:`llvm.masked.gather <int_mgather>` intrinsic.
Arguments:
""""""""""
-The first operand is a vector of pointers which holds all memory addresses to
-read. The second operand is a vector of boolean values with the same number of
+The first argument is a vector of pointers which holds all memory addresses to
+read. The second argument is a vector of boolean values with the same number of
elements as the return type. The third is the explicit vector length of the
operation. The return type and underlying type of the vector of pointers are
the same vector types.
The :ref:`align <attr_align>` parameter attribute can be provided for the first
-operand.
+argument.
Semantics:
""""""""""
The '``llvm.vp.gather``' intrinsic reads multiple scalar values from memory in
the same way as the '``llvm.masked.gather``' intrinsic, where the mask is taken
-from the combination of the '``mask``' and '``evl``' operands in the usual VP
-way. Certain '``llvm.masked.gather``' operands do not have corresponding
-operands in '``llvm.vp.gather``': the '``passthru``' operand is implicitly
-``poison``; the '``alignment``' operand is taken as the ``align`` parameter, if
+from the combination of the '``mask``' and '``evl``' arguments in the usual VP
+way. Certain '``llvm.masked.gather``' arguments do not have corresponding
+arguments in '``llvm.vp.gather``': the '``passthru``' argument is implicitly
+``poison``; the '``alignment``' argument is taken as the ``align`` parameter, if
provided. The default alignment is taken as the ABI alignment of the source
addresses as specified by the :ref:`datalayout string<langref_datalayout>`.
@@ -23159,25 +23162,25 @@ the :ref:`llvm.masked.scatter <int_mscatter>` intrinsic.
Arguments:
""""""""""
-The first operand is a vector value to be written to memory. The second operand
+The first argument is a vector value to be written to memory. The second argument
is a vector of pointers, pointing to where the value elements should be stored.
-The third operand is a vector of boolean values with the same number of
+The third argument is a vector of boolean values with the same number of
elements as the return type. The fourth is the explicit vector length of the
operation.
The :ref:`align <attr_align>` parameter attribute can be provided for the
-second operand.
+second argument.
Semantics:
""""""""""
The '``llvm.vp.scatter``' intrinsic writes multiple scalar values to memory in
the same way as the '``llvm.masked.scatter``' intrinsic, where the mask is
-taken from the combination of the '``mask``' and '``evl``' operands in the
-usual VP way. The '``alignment``' operand of the '``llvm.masked.scatter``' does
-not have a corresponding operand in '``llvm.vp.scatter``': it is instead
+taken from the combination of the '``mask``' and '``evl``' arguments in the
+usual VP way. The '``alignment``' argument of the '``llvm.masked.scatter``' does
+not have a corresponding argument in '``llvm.vp.scatter``': it is instead
provided via the optional ``align`` parameter attribute on the
-vector-of-pointers operand. Otherwise it is taken as the ABI alignment of the
+vector-of-pointers argument. Otherwise it is taken as the ABI alignment of the
destination addresses as specified by the :ref:`datalayout
string<langref_datalayout>`.
@@ -23209,19 +23212,19 @@ This is an overloaded intrinsic.
Overview:
"""""""""
-The '``llvm.vp.trunc``' intrinsic truncates its first operand to the return
+The '``llvm.vp.trunc``' intrinsic truncates its first argument to the return
type. The operation has a mask and an explicit vector length parameter.
Arguments:
""""""""""
-The '``llvm.vp.trunc``' intrinsic takes a value to cast as its first operand.
+The '``llvm.vp.trunc``' intrinsic takes a value to cast as its first argument.
The return type is the type to cast the value to. Both types must be vector of
:ref:`integer <t_integer>` type. The bit size of the value must be larger than
-the bit size of the return type. The second operand is the vector mask. The
+the bit size of the return type. The second argument is the vector mask. The
return type, the value to cast, and the vector mask have the same number of
-elements. The third operand is the explicit vector length of the operation.
+elements. The third argument is the explicit vector length of the operation.
Semantics:
""""""""""
@@ -23262,19 +23265,19 @@ This is an overloaded intrinsic.
Overview:
"""""""""
-The '``llvm.vp.zext``' intrinsic zero extends its first operand to the return
+The '``llvm.vp.zext``' intrinsic zero extends its first argument to the return
type. The operation has a mask and an explicit vector length parameter.
Arguments:
""""""""""
-The '``llvm.vp.zext``' intrinsic takes a value to cast as its first operand.
+The '``llvm.vp.zext``' intrinsic takes a value to cast as its first argument.
The return type is the type to cast the value to. Both types must be vectors of
:ref:`integer <t_integer>` type. The bit size of the value must be smaller than
-the bit size of the return type. The second operand is the vector mask. The
+the bit size of the return type. The second argument is the vector mask. The
return type, the value to cast, and the vector mask have the same number of
-elements. The third operand is the explicit vector length of the operation.
+elements. The third argument is the explicit vector length of the operation.
Semantics:
""""""""""
@@ -23314,19 +23317,19 @@ This is an overloaded intrinsic.
Overview:
"""""""""
-The '``llvm.vp.sext``' intrinsic sign extends its first operand to the return
+The '``llvm.vp.sext``' intrinsic sign extends its first argument to the return
type. The operation has a mask and an explicit vector length parameter.
Arguments:
""""""""""
-The '``llvm.vp.sext``' intrinsic takes a value to cast as its first operand.
+The '``llvm.vp.sext``' intrinsic takes a value to cast as its first argument.
The return type is the type to cast the value to. Both types must be vectors of
:ref:`integer <t_integer>` type. The bit size of the value must be smaller than
-the bit size of the return type. The second operand is the vector mask. The
+the bit size of the return type. The second argument is the vector mask. The
return type, the value to cast, and the vector mask have the same number of
-elements. The third operand is the explicit vector length of the operation.
+elements. The third argument is the explicit vector length of the operation.
Semantics:
""""""""""
@@ -23366,20 +23369,20 @@ This is an overloaded intrinsic.
Overview:
"""""""""
-The '``llvm.vp.fptrunc``' intrinsic truncates its first operand to the return
+The '``llvm.vp.fptrunc``' intrinsic truncates its first argument to the return
type. The operation has a mask and an explicit vector length parameter.
Arguments:
""""""""""
-The '``llvm.vp.fptrunc``' intrinsic takes a value to cast as its first operand.
+The '``llvm.vp.fptrunc``' intrinsic takes a value to cast as its first argument.
The return type is the type to cast the value to. Both types must be vector of
:ref:`floating-point <t_floating>` type. The bit size of the value must be
larger than the bit size of the return type. This implies that
-'``llvm.vp.fptrunc``' cannot be used to make a *no-op cast*. The second operand
+'``llvm.vp.fptrunc``' cannot be used to make a *no-op cast*. The second argument
is the vector mask. The return type, the value to cast, and the vector mask have
-the same number of elements. The third operand is the explicit vector length of
+the same number of elements. The third argument is the explicit vector length of
the operation.
Semantics:
@@ -23422,20 +23425,20 @@ This is an overloaded intrinsic.
Overview:
"""""""""
-The '``llvm.vp.fpext``' intrinsic extends its first operand to the return
+The '``llvm.vp.fpext``' intrinsic extends its first argument to the return
type. The operation has a mask and an explicit vector length parameter.
Arguments:
""""""""""
-The '``llvm.vp.fpext``' intrinsic takes a value to cast as its first operand.
+The '``llvm.vp.fpext``' intrinsic takes a value to cast as its first argument.
The return type is the type to cast the value to. Both types must be vector of
:ref:`floating-point <t_floating>` type. The bit size of the value must be
smaller than the bit size of the return type. This implies that
-'``llvm.vp.fpext``' cannot be used to make a *no-op cast*. The second operand
+'``llvm.vp.fpext``' cannot be used to make a *no-op cast*. The second argument
is the vector mask. The return type, the value to cast, and the vector mask have
-the same number of elements. The third operand is the explicit vector length of
+the same number of elements. The third argument is the explicit vector length of
the operation.
Semantics:
@@ -23480,26 +23483,26 @@ Overview:
"""""""""
The '``llvm.vp.fptoui``' intrinsic converts the :ref:`floating-point
-<t_floating>` operand to the unsigned integer return type.
+<t_floating>` argument to the unsigned integer return type.
The operation has a mask and an explicit vector length parameter.
Arguments:
""""""""""
-The '``llvm.vp.fptoui``' intrinsic takes a value to cast as its first operand.
+The '``llvm.vp.fptoui``' intrinsic takes a value to cast as its first argument.
The value to cast must be a vector of :ref:`floating-point <t_floating>` type.
The return type is the type to cast the value to. The return type must be
-vector of :ref:`integer <t_integer>` type. The second operand is the vector
+vector of :ref:`integer <t_integer>` type. The second argument is the vector
mask. The return type, the value to cast, and the vector mask have the same
-number of elements. The third operand is the explicit vector length of the
+number of elements. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.fptoui``' intrinsic converts its :ref:`floating-point
-<t_floating>` operand into the nearest (rounding towards zero) unsigned integer
+<t_floating>` argument into the nearest (rounding towards zero) unsigned integer
value where the lane position is below the explicit vector length and the
vector mask is true. Masked-off lanes are ``poison``. On enabled lanes where
conversion takes place and the value cannot fit in the return type, the result
@@ -23536,26 +23539,26 @@ Overview:
"""""""""
The '``llvm.vp.fptosi``' intrinsic converts the :ref:`floating-point
-<t_floating>` operand to the signed integer return type.
+<t_floating>` argument to the signed integer return type.
The operation has a mask and an explicit vector length parameter.
Arguments:
""""""""""
-The '``llvm.vp.fptosi``' intrinsic takes a value to cast as its first operand.
+The '``llvm.vp.fptosi``' intrinsic takes a value to cast as its first argument.
The value to cast must be a vector of :ref:`floating-point <t_floating>` type.
The return type is the type to cast the value to. The return type must be
-vector of :ref:`integer <t_integer>` type. The second operand is the vector
+vector of :ref:`integer <t_integer>` type. The second argument is the vector
mask. The return type, the value to cast, and the vector mask have the same
-number of elements. The third operand is the explicit vector length of the
+number of elements. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.fptosi``' intrinsic converts its :ref:`floating-point
-<t_floating>` operand into the nearest (rounding towards zero) signed integer
+<t_floating>` argument into the nearest (rounding towards zero) signed integer
value where the lane position is below the explicit vector length and the
vector mask is true. Masked-off lanes are ``poison``. On enabled lanes where
conversion takes place and the value cannot fit in the return type, the result
@@ -23591,7 +23594,7 @@ This is an overloaded intrinsic.
Overview:
"""""""""
-The '``llvm.vp.uitofp``' intrinsic converts its unsigned integer operand to the
+The '``llvm.vp.uitofp``' intrinsic converts its unsigned integer argument to the
:ref:`floating-point <t_floating>` return type. The operation has a mask and
an explicit vector length parameter.
@@ -23599,18 +23602,18 @@ an explicit vector length parameter.
Arguments:
""""""""""
-The '``llvm.vp.uitofp``' intrinsic takes a value to cast as its first operand.
+The '``llvm.vp.uitofp``' intrinsic takes a value to cast as its first argument.
The value to cast must be vector of :ref:`integer <t_integer>` type. The
return type is the type to cast the value to. The return type must be a vector
-of :ref:`floating-point <t_floating>` type. The second operand is the vector
+of :ref:`floating-point <t_floating>` type. The second argument is the vector
mask. The return type, the value to cast, and the vector mask have the same
-number of elements. The third operand is the explicit vector length of the
+number of elements. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
-The '``llvm.vp.uitofp``' intrinsic interprets its first operand as an unsigned
+The '``llvm.vp.uitofp``' intrinsic interprets its first argument as an unsigned
integer quantity and converts it to the corresponding floating-point value. If
the value cannot be exactly represented, it is rounded using the default
rounding mode. The conversion is performed on lane positions below the
@@ -23647,7 +23650,7 @@ This is an overloaded intrinsic.
Overview:
"""""""""
-The '``llvm.vp.sitofp``' intrinsic converts its signed integer operand to the
+The '``llvm.vp.sitofp``' intrinsic converts its signed integer argument to the
:ref:`floating-point <t_floating>` return type. The operation has a mask and
an explicit vector length parameter.
@@ -23655,18 +23658,18 @@ an explicit vector length parameter.
Arguments:
""""""""""
-The '``llvm.vp.sitofp``' intrinsic takes a value to cast as its first operand.
+The '``llvm.vp.sitofp``' intrinsic takes a value to cast as its first argument.
The value to cast must be vector of :ref:`integer <t_integer>` type. The
return type is the type to cast the value to. The return type must be a vector
-of :ref:`floating-point <t_floating>` type. The second operand is the vector
+of :ref:`floating-point <t_floating>` type. The second argument is the vector
mask. The return type, the value to cast, and the vector mask have the same
-number of elements. The third operand is the explicit vector length of the
+number of elements. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
-The '``llvm.vp.sitofp``' intrinsic interprets its first operand as a signed
+The '``llvm.vp.sitofp``' intrinsic interprets its first argument as a signed
integer quantity and converts it to the corresponding floating-point value. If
the value cannot be exactly represented, it is rounded using the default
rounding mode. The conversion is performed on lane positions below the
@@ -23710,12 +23713,12 @@ type. The operation has a mask and an explicit vector length parameter.
Arguments:
""""""""""
-The '``llvm.vp.ptrtoint``' intrinsic takes a value to cast as its first operand
+The '``llvm.vp.ptrtoint``' intrinsic takes a value to cast as its first argument
, which must be a vector of pointers, and a type to cast it to return type,
which must be a vector of :ref:`integer <t_integer>` type.
-The second operand is the vector mask. The return type, the value to cast, and
+The second argument is the vector mask. The return type, the value to cast, and
the vector mask have the same number of elements.
-The third operand is the explicit vector length of the operation.
+The third argument is the explicit vector length of the operation.
Semantics:
""""""""""
@@ -23767,12 +23770,12 @@ return type. The operation has a mask and an explicit vector length parameter.
Arguments:
""""""""""
-The '``llvm.vp.inttoptr``' intrinsic takes a value to cast as its first operand
+The '``llvm.vp.inttoptr``' intrinsic takes a value to cast as its first argument
, which must be a vector of :ref:`integer <t_integer>` type, and a type to cast
it to return type, which must be a vector of pointers type.
-The second operand is the vector mask. The return type, the value to cast, and
+The second argument is the vector mask. The return type, the value to cast, and
the vector mask have the same number of elements.
-The third operand is the explicit vector length of the operation.
+The third argument is the explicit vector length of the operation.
Semantics:
""""""""""
@@ -23816,7 +23819,7 @@ Overview:
"""""""""
The '``llvm.vp.fcmp``' intrinsic returns a vector of boolean values based on
-the comparison of its operands. The operation has a mask and an explicit vector
+the comparison of its arguments. The operation has a mask and an explicit vector
length parameter.
@@ -23824,21 +23827,21 @@ Arguments:
""""""""""
The '``llvm.vp.fcmp``' intrinsic takes the two values to compare as its first
-and second operands. These two values must be vectors of :ref:`floating-point
+and second arguments. These two values must be vectors of :ref:`floating-point
<t_floating>` types.
The return type is the result of the comparison. The return type must be a
-vector of :ref:`i1 <t_integer>` type. The fourth operand is the vector mask.
+vector of :ref:`i1 <t_integer>` type. The fourth argument is the vector mask.
The return type, the values to compare, and the vector mask have the same
-number of elements. The third operand is the condition code indicating the kind
+number of elements. The third argument is the condition code indicating the kind
of comparison to perform. It must be a metadata string with :ref:`one of the
-supported floating-point condition code values <fcmp_md_cc>`. The fifth operand
+supported floating-point condition code values <fcmp_md_cc>`. The fifth argument
is the explicit vector length of the operation.
Semantics:
""""""""""
-The '``llvm.vp.fcmp``' compares its first two operands according to the
-condition code given as the third operand. The operands are compared element by
+The '``llvm.vp.fcmp``' compares its first two arguments according to the
+condition code given as the third argument. The arguments are compared element by
element on each enabled lane, where the semantics of the comparison are
defined :ref:`according to the condition code <fcmp_md_cc_sem>`. Masked-off
lanes are ``poison``.
@@ -23874,7 +23877,7 @@ Overview:
"""""""""
The '``llvm.vp.icmp``' intrinsic returns a vector of boolean values based on
-the comparison of its operands. The operation has a mask and an explicit vector
+the comparison of its arguments. The operation has a mask and an explicit vector
length parameter.
@@ -23882,21 +23885,21 @@ Arguments:
""""""""""
The '``llvm.vp.icmp``' intrinsic takes the two values to compare as its first
-and second operands. These two values must be vectors of :ref:`integer
+and second arguments. These two values must be vectors of :ref:`integer
<t_integer>` types.
The return type is the result of the comparison. The return type must be a
-vector of :ref:`i1 <t_integer>` type. The fourth operand is the vector mask.
+vector of :ref:`i1 <t_integer>` type. The fourth argument is the vector mask.
The return type, the values to compare, and the vector mask have the same
-number of elements. The third operand is the condition code indicating the kind
+number of elements. The third argument is the condition code indicating the kind
of comparison to perform. It must be a metadata string with :ref:`one of the
-supported integer condition code values <icmp_md_cc>`. The fifth operand is the
+supported integer condition code values <icmp_md_cc>`. The fifth argument is the
explicit vector length of the operation.
Semantics:
""""""""""
-The '``llvm.vp.icmp``' compares its first two operands according to the
-condition code given as the third operand. The operands are compared element by
+The '``llvm.vp.icmp``' compares its first two arguments according to the
+condition code given as the third argument. The arguments are compared element by
element on each enabled lane, where the semantics of the comparison are
defined :ref:`according to the condition code <icmp_md_cc_sem>`. Masked-off
lanes are ``poison``.
@@ -23936,16 +23939,16 @@ Predicated floating-point ceiling of a vector of floating-point values.
Arguments:
""""""""""
-The first operand and the result have the same vector of floating-point type.
-The second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of floating-point type.
+The second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.ceil``' intrinsic performs floating-point ceiling
-(:ref:`ceil <int_ceil>`) of the first vector operand on each enabled lane. The
+(:ref:`ceil <int_ceil>`) of the first vector argument on each enabled lane. The
result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -23983,16 +23986,16 @@ Predicated floating-point floor of a vector of floating-point values.
Arguments:
""""""""""
-The first operand and the result have the same vector of floating-point type.
-The second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of floating-point type.
+The second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.floor``' intrinsic performs floating-point floor
-(:ref:`floor <int_floor>`) of the first vector operand on each enabled lane.
+(:ref:`floor <int_floor>`) of the first vector argument on each enabled lane.
The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24030,16 +24033,16 @@ Predicated floating-point rint of a vector of floating-point values.
Arguments:
""""""""""
-The first operand and the result have the same vector of floating-point type.
-The second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of floating-point type.
+The second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.rint``' intrinsic performs floating-point rint
-(:ref:`rint <int_rint>`) of the first vector operand on each enabled lane.
+(:ref:`rint <int_rint>`) of the first vector argument on each enabled lane.
The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24077,16 +24080,16 @@ Predicated floating-point nearbyint of a vector of floating-point values.
Arguments:
""""""""""
-The first operand and the result have the same vector of floating-point type.
-The second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of floating-point type.
+The second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.nearbyint``' intrinsic performs floating-point nearbyint
-(:ref:`nearbyint <int_nearbyint>`) of the first vector operand on each enabled lane.
+(:ref:`nearbyint <int_nearbyint>`) of the first vector argument on each enabled lane.
The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24124,16 +24127,16 @@ Predicated floating-point round of a vector of floating-point values.
Arguments:
""""""""""
-The first operand and the result have the same vector of floating-point type.
-The second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of floating-point type.
+The second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.round``' intrinsic performs floating-point round
-(:ref:`round <int_round>`) of the first vector operand on each enabled lane.
+(:ref:`round <int_round>`) of the first vector argument on each enabled lane.
The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24171,16 +24174,16 @@ Predicated floating-point roundeven of a vector of floating-point values.
Arguments:
""""""""""
-The first operand and the result have the same vector of floating-point type.
-The second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of floating-point type.
+The second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.roundeven``' intrinsic performs floating-point roundeven
-(:ref:`roundeven <int_roundeven>`) of the first vector operand on each enabled
+(:ref:`roundeven <int_roundeven>`) of the first vector argument on each enabled
lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24218,16 +24221,16 @@ Predicated floating-point round-to-zero of a vector of floating-point values.
Arguments:
""""""""""
-The first operand and the result have the same vector of floating-point type.
-The second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of floating-point type.
+The second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.roundtozero``' intrinsic performs floating-point roundeven
-(:ref:`llvm.trunc <int_llvm_trunc>`) of the first vector operand on each enabled lane. The
+(:ref:`llvm.trunc <int_llvm_trunc>`) of the first vector argument on each enabled lane. The
result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24265,16 +24268,16 @@ Predicated lrint of a vector of floating-point values.
Arguments:
""""""""""
-The result is an integer vector and the first operand is a vector of :ref:`floating-point <t_floating>`
+The result is an integer vector and the first argument is a vector of :ref:`floating-point <t_floating>`
type with the same number of elements as the result vector type. The second
-operand is the vector mask and has the same number of elements as the result
-vector type. The third operand is the explicit vector length of the operation.
+argument is the vector mask and has the same number of elements as the result
+vector type. The third argument is the explicit vector length of the operation.
Semantics:
""""""""""
The '``llvm.vp.lrint``' intrinsic performs lrint (:ref:`lrint <int_lrint>`) of
-the first vector operand on each enabled lane. The result on disabled lanes is a
+the first vector argument on each enabled lane. The result on disabled lanes is a
:ref:`poison value <poisonvalues>`.
Examples:
@@ -24311,16 +24314,16 @@ Predicated llrint of a vector of floating-point values.
Arguments:
""""""""""
-The result is an integer vector and the first operand is a vector of :ref:`floating-point <t_floating>`
+The result is an integer vector and the first argument is a vector of :ref:`floating-point <t_floating>`
type with the same number of elements as the result vector type. The second
-operand is the vector mask and has the same number of elements as the result
-vector type. The third operand is the explicit vector length of the operation.
+argument is the vector mask and has the same number of elements as the result
+vector type. The third argument is the explicit vector length of the operation.
Semantics:
""""""""""
The '``llvm.vp.llrint``' intrinsic performs lrint (:ref:`llrint <int_llrint>`) of
-the first vector operand on each enabled lane. The result on disabled lanes is a
+the first vector argument on each enabled lane. The result on disabled lanes is a
:ref:`poison value <poisonvalues>`.
Examples:
@@ -24359,15 +24362,15 @@ Predicated bitreverse of a vector of integers.
Arguments:
""""""""""
-The first operand and the result have the same vector of integer type. The
-second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of integer type. The
+second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
-The '``llvm.vp.bitreverse``' intrinsic performs bitreverse (:ref:`bitreverse <int_bitreverse>`) of the first operand on each
+The '``llvm.vp.bitreverse``' intrinsic performs bitreverse (:ref:`bitreverse <int_bitreverse>`) of the first argument on each
enabled lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24406,15 +24409,15 @@ Predicated bswap of a vector of integers.
Arguments:
""""""""""
-The first operand and the result have the same vector of integer type. The
-second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of integer type. The
+second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
-The '``llvm.vp.bswap``' intrinsic performs bswap (:ref:`bswap <int_bswap>`) of the first operand on each
+The '``llvm.vp.bswap``' intrinsic performs bswap (:ref:`bswap <int_bswap>`) of the first argument on each
enabled lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24453,15 +24456,15 @@ Predicated ctpop of a vector of integers.
Arguments:
""""""""""
-The first operand and the result have the same vector of integer type. The
-second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
+The first argument and the result have the same vector of integer type. The
+second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
operation.
Semantics:
""""""""""
-The '``llvm.vp.ctpop``' intrinsic performs ctpop (:ref:`ctpop <int_ctpop>`) of the first operand on each
+The '``llvm.vp.ctpop``' intrinsic performs ctpop (:ref:`ctpop <int_ctpop>`) of the first argument on each
enabled lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24500,17 +24503,17 @@ Predicated ctlz of a vector of integers.
Arguments:
""""""""""
-The first operand and the result have the same vector of integer type. The
-second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
-operation. The fourth operand is a constant flag that indicates whether the
-intrinsic returns a valid result if the first operand is zero. If the first
-operand is zero and the fourth operand is true, the result is poison.
+The first argument and the result have the same vector of integer type. The
+second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
+operation. The fourth argument is a constant flag that indicates whether the
+intrinsic returns a valid result if the first argument is zero. If the first
+argument is zero and the fourth argument is true, the result is poison.
Semantics:
""""""""""
-The '``llvm.vp.ctlz``' intrinsic performs ctlz (:ref:`ctlz <int_ctlz>`) of the first operand on each
+The '``llvm.vp.ctlz``' intrinsic performs ctlz (:ref:`ctlz <int_ctlz>`) of the first argument on each
enabled lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24549,17 +24552,17 @@ Predicated cttz of a vector of integers.
Arguments:
""""""""""
-The first operand and the result have the same vector of integer type. The
-second operand is the vector mask and has the same number of elements as the
-result vector type. The third operand is the explicit vector length of the
-operation. The fourth operand is a constant flag that indicates whether the
-intrinsic returns a valid result if the first operand is zero. If the first
-operand is zero and the fourth operand is true, the result is poison.
+The first argument and the result have the same vector of integer type. The
+second argument is the vector mask and has the same number of elements as the
+result vector type. The third argument is the explicit vector length of the
+operation. The fourth argument is a constant flag that indicates whether the
+intrinsic returns a valid result if the first argument is zero. If the first
+argument is zero and the fourth argument is true, the result is poison.
Semantics:
""""""""""
-The '``llvm.vp.cttz``' intrinsic performs cttz (:ref:`cttz <int_cttz>`) of the first operand on each
+The '``llvm.vp.cttz``' intrinsic performs cttz (:ref:`cttz <int_cttz>`) of the first argument on each
enabled lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24609,18 +24612,18 @@ for the number of elements in the input vector.
The second argument is a constant flag that indicates whether the intrinsic
returns a valid result if the first argument is all zero.
-The third operand is the vector mask and has the same number of elements as the
-input vector type. The fourth operand is the explicit vector length of the
+The third argument is the vector mask and has the same number of elements as the
+input vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.cttz.elts``' intrinsic counts the trailing (least
-significant / lowest-numbered) zero elements in the first operand on each
+significant / lowest-numbered) zero elements in the first argument on each
enabled lane. If the first argument is all zero and the second argument is true,
the result is poison. Otherwise, it returns the explicit vector length (i.e. the
-fourth operand).
+fourth argument).
.. _int_vp_sadd_sat:
@@ -24646,16 +24649,16 @@ Predicated signed saturating addition of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.sadd.sat``' intrinsic performs sadd.sat (:ref:`sadd.sat <int_sadd_sat>`)
-of the first and second vector operands on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`.
@@ -24695,16 +24698,16 @@ Predicated unsigned saturating addition of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.uadd.sat``' intrinsic performs uadd.sat (:ref:`uadd.sat <int_uadd_sat>`)
-of the first and second vector operands on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`.
@@ -24744,16 +24747,16 @@ Predicated signed saturating subtraction of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.ssub.sat``' intrinsic performs ssub.sat (:ref:`ssub.sat <int_ssub_sat>`)
-of the first and second vector operands on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`.
@@ -24793,16 +24796,16 @@ Predicated unsigned saturating subtraction of two vectors of integers.
Arguments:
""""""""""
-The first two operands and the result have the same vector of integer type. The
-third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+The first two arguments and the result have the same vector of integer type. The
+third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.usub.sat``' intrinsic performs usub.sat (:ref:`usub.sat <int_usub_sat>`)
-of the first and second vector operands on each enabled lane. The result on
+of the first and second vector arguments on each enabled lane. The result on
disabled lanes is a :ref:`poison value <poisonvalues>`.
@@ -24842,16 +24845,16 @@ Predicated fshl of three vectors of integers.
Arguments:
""""""""""
-The first three operand and the result have the same vector of integer type. The
-fourth operand is the vector mask and has the same number of elements as the
-result vector type. The fifth operand is the explicit vector length of the
+The first three arguments and the result have the same vector of integer type. The
+fourth argument is the vector mask and has the same number of elements as the
+result vector type. The fifth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.fshl``' intrinsic performs fshl (:ref:`fshl <int_fshl>`) of the first, second, and third
-vector operand on each enabled lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
+vector argument on each enabled lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24888,16 +24891,16 @@ Predicated fshr of three vectors of integers.
Arguments:
""""""""""
-The first three operand and the result have the same vector of integer type. The
-fourth operand is the vector mask and has the same number of elements as the
-result vector type. The fifth operand is the explicit vector length of the
+The first three arguments and the result have the same vector of integer type. The
+fourth argument is the vector mask and has the same number of elements as the
+result vector type. The fifth argument is the explicit vector length of the
operation.
Semantics:
""""""""""
The '``llvm.vp.fshr``' intrinsic performs fshr (:ref:`fshr <int_fshr>`) of the first, second, and third
-vector operand on each enabled lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
+vector argument on each enabled lane. The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
Examples:
@@ -24931,11 +24934,11 @@ Predicated llvm.is.fpclass :ref:`llvm.is.fpclass <llvm.is.fpclass>`
Arguments:
""""""""""
-The first operand is a floating-point vector, the result type is a vector of
+The first argument is a floating-point vector, the result type is a vector of
boolean with the same number of elements as the first argument. The second
-operand specifies, which tests to perform :ref:`llvm.is.fpclass <llvm.is.fpclass>`.
-The third operand is the vector mask and has the same number of elements as the
-result vector type. The fourth operand is the explicit vector length of the
+argument specifies, which tests to perform :ref:`llvm.is.fpclass <llvm.is.fpclass>`.
+The third argument is the vector mask and has the same number of elements as the
+result vector type. The fourth argument is the explicit vector length of the
operation.
Semantics:
>From 717d839be4e36ab5330e60d61e0e1265c233b2ea Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 2 Jul 2024 11:59:16 -0700
Subject: [PATCH 031/246] [LangRef] Rename 'operand' to 'argument' in
descriptions for masked load/store/gather/scatter. NFC (#97440)
Following up on the renaming started in #97437.
---
llvm/docs/LangRef.rst | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 1dc97d45c0953..3efe47d35b2e0 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24960,7 +24960,7 @@ Examples:
Masked Vector Load and Store Intrinsics
---------------------------------------
-LLVM provides intrinsics for predicated vector load and store operations. The predicate is specified by a mask operand, which holds one bit per vector element, switching the associated vector lane on or off. The memory addresses corresponding to the "off" lanes are not accessed. When all bits of the mask are on, the intrinsic is identical to a regular vector load or store. When all bits are off, no memory is accessed.
+LLVM provides intrinsics for predicated vector load and store operations. The predicate is specified by a mask argument, which holds one bit per vector element, switching the associated vector lane on or off. The memory addresses corresponding to the "off" lanes are not accessed. When all bits of the mask are on, the intrinsic is identical to a regular vector load or store. When all bits are off, no memory is accessed.
.. _int_mload:
@@ -24981,13 +24981,13 @@ This is an overloaded intrinsic. The loaded data is a vector of any integer, flo
Overview:
"""""""""
-Reads a vector from memory according to the provided mask. The mask holds a bit for each vector lane, and is used to prevent memory accesses to the masked-off lanes. The masked-off lanes in the result vector are taken from the corresponding lanes of the '``passthru``' operand.
+Reads a vector from memory according to the provided mask. The mask holds a bit for each vector lane, and is used to prevent memory accesses to the masked-off lanes. The masked-off lanes in the result vector are taken from the corresponding lanes of the '``passthru``' argument.
Arguments:
""""""""""
-The first operand is the base pointer for the load. The second operand is the alignment of the source location. It must be a power of two constant integer value. The third operand, mask, is a vector of boolean values with the same number of elements as the return type. The fourth is a pass-through value that is used to fill the masked-off lanes of the result. The return type, underlying type of the base pointer and the type of the '``passthru``' operand are the same vector types.
+The first argument is the base pointer for the load. The second argument is the alignment of the source location. It must be a power of two constant integer value. The third argument, mask, is a vector of boolean values with the same number of elements as the return type. The fourth is a pass-through value that is used to fill the masked-off lanes of the result. The return type, underlying type of the base pointer and the type of the '``passthru``' argument are the same vector types.
Semantics:
""""""""""
@@ -25028,7 +25028,7 @@ Writes a vector to memory according to the provided mask. The mask holds a bit f
Arguments:
""""""""""
-The first operand is the vector value to be written to memory. The second operand is the base pointer for the store, it has the same underlying type as the value operand. The third operand is the alignment of the destination location. It must be a power of two constant integer value. The fourth operand, mask, is a vector of boolean values. The types of the mask and the value operand must have the same number of vector elements.
+The first argument is the vector value to be written to memory. The second argument is the base pointer for the store, it has the same underlying type as the value argument. The third argument is the alignment of the destination location. It must be a power of two constant integer value. The fourth argument, mask, is a vector of boolean values. The types of the mask and the value argument must have the same number of vector elements.
Semantics:
@@ -25050,7 +25050,7 @@ The result of this operation is equivalent to a load-modify-store sequence. Howe
Masked Vector Gather and Scatter Intrinsics
-------------------------------------------
-LLVM provides intrinsics for vector gather and scatter operations. They are similar to :ref:`Masked Vector Load and Store <int_mload_mstore>`, except they are designed for arbitrary memory accesses, rather than sequential memory accesses. Gather and scatter also employ a mask operand, which holds one bit per vector element, switching the associated vector lane on or off. The memory addresses corresponding to the "off" lanes are not accessed. When all bits are off, no memory is accessed.
+LLVM provides intrinsics for vector gather and scatter operations. They are similar to :ref:`Masked Vector Load and Store <int_mload_mstore>`, except they are designed for arbitrary memory accesses, rather than sequential memory accesses. Gather and scatter also employ a mask argument, which holds one bit per vector element, switching the associated vector lane on or off. The memory addresses corresponding to the "off" lanes are not accessed. When all bits are off, no memory is accessed.
.. _int_mgather:
@@ -25070,13 +25070,13 @@ This is an overloaded intrinsic. The loaded data are multiple scalar values of a
Overview:
"""""""""
-Reads scalar values from arbitrary memory locations and gathers them into one vector. The memory locations are provided in the vector of pointers '``ptrs``'. The memory is accessed according to the provided mask. The mask holds a bit for each vector lane, and is used to prevent memory accesses to the masked-off lanes. The masked-off lanes in the result vector are taken from the corresponding lanes of the '``passthru``' operand.
+Reads scalar values from arbitrary memory locations and gathers them into one vector. The memory locations are provided in the vector of pointers '``ptrs``'. The memory is accessed according to the provided mask. The mask holds a bit for each vector lane, and is used to prevent memory accesses to the masked-off lanes. The masked-off lanes in the result vector are taken from the corresponding lanes of the '``passthru``' argument.
Arguments:
""""""""""
-The first operand is a vector of pointers which holds all memory addresses to read. The second operand is an alignment of the source addresses. It must be 0 or a power of two constant integer value. The third operand, mask, is a vector of boolean values with the same number of elements as the return type. The fourth is a pass-through value that is used to fill the masked-off lanes of the result. The return type, underlying type of the vector of pointers and the type of the '``passthru``' operand are the same vector types.
+The first argument is a vector of pointers which holds all memory addresses to read. The second argument is an alignment of the source addresses. It must be 0 or a power of two constant integer value. The third argument, mask, is a vector of boolean values with the same number of elements as the return type. The fourth is a pass-through value that is used to fill the masked-off lanes of the result. The return type, underlying type of the vector of pointers and the type of the '``passthru``' argument are the same vector types.
Semantics:
""""""""""
@@ -25128,7 +25128,7 @@ Writes each element from the value vector to the corresponding memory address. T
Arguments:
""""""""""
-The first operand is a vector value to be written to memory. The second operand is a vector of pointers, pointing to where the value elements should be stored. It has the same underlying type as the value operand. The third operand is an alignment of the destination addresses. It must be 0 or a power of two constant integer value. The fourth operand, mask, is a vector of boolean values. The types of the mask and the value operand must have the same number of vector elements.
+The first argument is a vector value to be written to memory. The second argument is a vector of pointers, pointing to where the value elements should be stored. It has the same underlying type as the value argument. The third argument is an alignment of the destination addresses. It must be 0 or a power of two constant integer value. The fourth argument, mask, is a vector of boolean values. The types of the mask and the value argument must have the same number of vector elements.
Semantics:
""""""""""
@@ -25178,16 +25178,16 @@ This is an overloaded intrinsic. Several values of integer, floating point or po
Overview:
"""""""""
-Reads a number of scalar values sequentially from memory location provided in '``ptr``' and spreads them in a vector. The '``mask``' holds a bit for each vector lane. The number of elements read from memory is equal to the number of '1' bits in the mask. The loaded elements are positioned in the destination vector according to the sequence of '1' and '0' bits in the mask. E.g., if the mask vector is '10010001', "expandload" reads 3 values from memory addresses ptr, ptr+1, ptr+2 and places them in lanes 0, 3 and 7 accordingly. The masked-off lanes are filled by elements from the corresponding lanes of the '``passthru``' operand.
+Reads a number of scalar values sequentially from memory location provided in '``ptr``' and spreads them in a vector. The '``mask``' holds a bit for each vector lane. The number of elements read from memory is equal to the number of '1' bits in the mask. The loaded elements are positioned in the destination vector according to the sequence of '1' and '0' bits in the mask. E.g., if the mask vector is '10010001', "expandload" reads 3 values from memory addresses ptr, ptr+1, ptr+2 and places them in lanes 0, 3 and 7 accordingly. The masked-off lanes are filled by elements from the corresponding lanes of the '``passthru``' argument.
Arguments:
""""""""""
-The first operand is the base pointer for the load. It has the same underlying type as the element of the returned vector. The second operand, mask, is a vector of boolean values with the same number of elements as the return type. The third is a pass-through value that is used to fill the masked-off lanes of the result. The return type and the type of the '``passthru``' operand have the same vector type.
+The first argument is the base pointer for the load. It has the same underlying type as the element of the returned vector. The second argument, mask, is a vector of boolean values with the same number of elements as the return type. The third is a pass-through value that is used to fill the masked-off lanes of the result. The return type and the type of the '``passthru``' argument have the same vector type.
The :ref:`align <attr_align>` parameter attribute can be provided for the first
-operand. The pointer alignment defaults to 1.
+argument. The pointer alignment defaults to 1.
Semantics:
""""""""""
@@ -25244,10 +25244,10 @@ Selects elements from input vector '``value``' according to the '``mask``'. All
Arguments:
""""""""""
-The first operand is the input vector, from which elements are collected and written to memory. The second operand is the base pointer for the store, it has the same underlying type as the element of the input vector operand. The third operand is the mask, a vector of boolean values. The mask and the input vector must have the same number of vector elements.
+The first argument is the input vector, from which elements are collected and written to memory. The second argument is the base pointer for the store, it has the same underlying type as the element of the input vector argument. The third argument is the mask, a vector of boolean values. The mask and the input vector must have the same number of vector elements.
The :ref:`align <attr_align>` parameter attribute can be provided for the second
-operand. The pointer alignment defaults to 1.
+argument. The pointer alignment defaults to 1.
Semantics:
""""""""""
>From 139508eb87bda18c8adbfb87bba6acac8ce29a36 Mon Sep 17 00:00:00 2001
From: Alexey Bataev <a.bataev at outlook.com>
Date: Tue, 2 Jul 2024 12:03:06 -0700
Subject: [PATCH 032/246] [SLP][NFC]Add a test with inefficient reordering of
operands, NFC.
---
.../X86/reordering-single-phi.ll | 80 +++++++++++++++++++
1 file changed, 80 insertions(+)
create mode 100644 llvm/test/Transforms/SLPVectorizer/X86/reordering-single-phi.ll
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reordering-single-phi.ll b/llvm/test/Transforms/SLPVectorizer/X86/reordering-single-phi.ll
new file mode 100644
index 0000000000000..156ab54dbf237
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reordering-single-phi.ll
@@ -0,0 +1,80 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux < %s | FileCheck %s
+
+ at a = external global [32000 x float], align 64
+
+define void @test() {
+; CHECK-LABEL: define void @test() {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[TMP0:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[TMP16:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
+; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x float>, ptr [[ARRAYIDX11]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 4
+; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 [[TMP5]]
+; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX31]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> [[TMP7]], float [[TMP2]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 poison>
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> [[TMP9]], <4 x i32> <i32 0, i32 1, i32 5, i32 poison>
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x float> [[TMP10]], float [[TMP6]], i32 3
+; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x float> poison, float [[TMP2]], i32 0
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x float> [[TMP12]], <4 x float> [[TMP9]], <4 x i32> <i32 0, i32 4, i32 poison, i32 6>
+; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x float> [[TMP13]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 3>
+; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <4 x float> [[TMP11]], [[TMP14]]
+; CHECK-NEXT: store <4 x float> [[TMP15]], ptr [[ARRAYIDX6]], align 4
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 5
+; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[TMP16]] = load float, ptr [[ARRAYIDX41]], align 4
+; CHECK-NEXT: [[MUL45:%.*]] = fmul fast float [[TMP16]], [[TMP6]]
+; CHECK-NEXT: store float [[MUL45]], ptr [[ARRAYIDX31]], align 4
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[INDVARS_IV]], 31990
+; CHECK-NEXT: br i1 [[CMP2]], label %[[FOR_BODY]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %0 = phi float [ 0.000000e+00, %entry ], [ %9, %for.body ]
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %1 = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %1
+ %2 = load float, ptr %arrayidx, align 4
+ %arrayidx6 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %indvars.iv
+ %mul = fmul fast float %0, %2
+ store float %mul, ptr %arrayidx6, align 4
+ %3 = add nuw nsw i64 %indvars.iv, 2
+ %arrayidx11 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %3
+ %4 = load float, ptr %arrayidx11, align 4
+ %mul15 = fmul fast float %4, %2
+ store float %mul15, ptr %arrayidx, align 4
+ %5 = add nuw nsw i64 %indvars.iv, 3
+ %arrayidx21 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %5
+ %6 = load float, ptr %arrayidx21, align 4
+ %mul25 = fmul fast float %6, %4
+ store float %mul25, ptr %arrayidx11, align 4
+ %7 = add nuw nsw i64 %indvars.iv, 4
+ %arrayidx31 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %7
+ %8 = load float, ptr %arrayidx31, align 4
+ %mul35 = fmul fast float %8, %6
+ store float %mul35, ptr %arrayidx21, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 5
+ %arrayidx41 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %indvars.iv.next
+ %9 = load float, ptr %arrayidx41, align 4
+ %mul45 = fmul fast float %9, %8
+ store float %mul45, ptr %arrayidx31, align 4
+ %cmp2 = icmp ult i64 %indvars.iv, 31990
+ br i1 %cmp2, label %for.body, label %exit
+
+exit:
+ ret void
+}
>From b8bbc57b68454fda9811fd956a1d2caa61d4d323 Mon Sep 17 00:00:00 2001
From: Petr Hosek <phosek at google.com>
Date: Tue, 2 Jul 2024 12:34:06 -0700
Subject: [PATCH 033/246] [libc] Use -nostdlibinc in the full build mode
(#97461)
This avoids accidentally including system headers.
---
libc/cmake/modules/CheckCompilerFeatures.cmake | 3 +++
libc/cmake/modules/LLVMLibCCompileOptionRules.cmake | 13 ++++++++-----
2 files changed, 11 insertions(+), 5 deletions(-)
diff --git a/libc/cmake/modules/CheckCompilerFeatures.cmake b/libc/cmake/modules/CheckCompilerFeatures.cmake
index 17806588550eb..d84c07b35d2d7 100644
--- a/libc/cmake/modules/CheckCompilerFeatures.cmake
+++ b/libc/cmake/modules/CheckCompilerFeatures.cmake
@@ -73,3 +73,6 @@ check_cxx_compiler_flag("-ftrivial-auto-var-init=pattern" LIBC_CC_SUPPORTS_PATTE
# clang-6+, gcc-13+
check_cxx_compiler_flag("-nostdlib++" LIBC_CC_SUPPORTS_NOSTDLIBPP)
+
+# clang-3.0+
+check_cxx_compiler_flag("-nostdlibinc" LIBC_CC_SUPPORTS_NOSTDLIBINC)
diff --git a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
index 3bf429381d4af..d283e966bffcf 100644
--- a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
+++ b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
@@ -46,6 +46,14 @@ function(_get_common_compile_options output_var flags)
list(APPEND compile_options "-DLIBC_FULL_BUILD")
# Only add -ffreestanding flag in full build mode.
list(APPEND compile_options "-ffreestanding")
+ # Manually disable standard include paths to prevent system headers from
+ # being included.
+ if(LIBC_CC_SUPPORTS_NOSTDLIBINC)
+ list(APPEND compile_options "-nostdlibinc")
+ else()
+ list(APPEND compile_options "-isystem${COMPILER_RESOURCE_DIR}/include")
+ list(APPEND compile_options "-nostdinc")
+ endif()
endif()
if(LIBC_COMPILER_HAS_FIXED_POINT)
@@ -108,11 +116,6 @@ function(_get_common_compile_options output_var flags)
elseif(LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
list(APPEND compile_options "SHELL:-Xclang -mcode-object-version=none")
endif()
-
- # Manually disable all standard include paths and include the resource
- # directory to prevent system headers from being included.
- list(APPEND compile_options "-isystem${COMPILER_RESOURCE_DIR}/include")
- list(APPEND compile_options "-nostdinc")
endif()
set(${output_var} ${compile_options} PARENT_SCOPE)
endfunction()
>From 3584a82333bf518e25c84d30e31ab5decd0f3fb6 Mon Sep 17 00:00:00 2001
From: Petr Hosek <phosek at google.com>
Date: Tue, 2 Jul 2024 12:48:48 -0700
Subject: [PATCH 034/246] [libc] Only use COMPILER_RESOURCE_DIR if available
(#97465)
This avoids build error when COMPILER_RESOURCE_DIR is unset.
---
libc/cmake/modules/LLVMLibCCompileOptionRules.cmake | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
index d283e966bffcf..28379213029a3 100644
--- a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
+++ b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
@@ -50,7 +50,7 @@ function(_get_common_compile_options output_var flags)
# being included.
if(LIBC_CC_SUPPORTS_NOSTDLIBINC)
list(APPEND compile_options "-nostdlibinc")
- else()
+ elseif(COMPILER_RESOURCE_DIR)
list(APPEND compile_options "-isystem${COMPILER_RESOURCE_DIR}/include")
list(APPEND compile_options "-nostdinc")
endif()
>From 59f4267c8e0625c6583327be2db1608930f2d796 Mon Sep 17 00:00:00 2001
From: Med Ismail Bennani <ismail at bennani.ma>
Date: Tue, 2 Jul 2024 12:52:50 -0700
Subject: [PATCH 035/246] [lldb/docs] Add scripting extensions documentation to
the website (#97262)
This patch adds the documentation for a subset of scripting extensions
such as scripted process, scripted thread, operating system threads &
scritped thread plans to the lldb website.
Signed-off-by: Med Ismail Bennani <ismail at bennani.ma>
---
lldb/docs/CMakeLists.txt | 7 +++-
lldb/docs/index.rst | 1 +
.../python/templates/operating_system.py | 20 ++++-----
.../python/templates/scripted_platform.py | 25 ++++++-----
.../python/templates/scripted_process.py | 42 +++++++++----------
5 files changed, 48 insertions(+), 47 deletions(-)
diff --git a/lldb/docs/CMakeLists.txt b/lldb/docs/CMakeLists.txt
index f482e91d1b10c..ed4296bbf03a4 100644
--- a/lldb/docs/CMakeLists.txt
+++ b/lldb/docs/CMakeLists.txt
@@ -27,8 +27,13 @@ if (LLDB_ENABLE_PYTHON AND SPHINX_FOUND)
get_target_property(lldb_bindings_dir swig_wrapper_python BINARY_DIR)
add_custom_target(lldb-python-doc-package
COMMAND "${CMAKE_COMMAND}" -E copy "${lldb_bindings_dir}/lldb.py" "${CMAKE_CURRENT_BINARY_DIR}/lldb/__init__.py"
+ COMMAND "${CMAKE_COMMAND}" -E make_directory "${CMAKE_CURRENT_BINARY_DIR}/lldb/plugins"
+ COMMAND "${CMAKE_COMMAND}" -E copy "${LLDB_SOURCE_DIR}/examples/python/templates/scripted_process.py" "${CMAKE_CURRENT_BINARY_DIR}/lldb/plugins/"
+ COMMAND "${CMAKE_COMMAND}" -E copy "${LLDB_SOURCE_DIR}/examples/python/templates/scripted_platform.py" "${CMAKE_CURRENT_BINARY_DIR}/lldb/plugins/"
+ COMMAND "${CMAKE_COMMAND}" -E copy "${LLDB_SOURCE_DIR}/examples/python/templates/operating_system.py" "${CMAKE_CURRENT_BINARY_DIR}/lldb/plugins/"
COMMENT "Copying lldb.py to pretend its a Python package.")
- add_dependencies(lldb-python-doc-package swig_wrapper_python)
+
+ add_dependencies(lldb-python-doc-package swig_wrapper_python lldb-python)
# FIXME: Don't treat Sphinx warnings as errors. The files generated by
# automodapi are full of warnings (partly caused by SWIG, our documentation
diff --git a/lldb/docs/index.rst b/lldb/docs/index.rst
index 1e7d69002dd3e..3ce23beec2a5e 100644
--- a/lldb/docs/index.rst
+++ b/lldb/docs/index.rst
@@ -141,6 +141,7 @@ interesting areas to contribute to lldb.
use/python
use/python-reference
Python API <python_api>
+ Python Extensions <python_extensions>
.. toctree::
diff --git a/lldb/examples/python/templates/operating_system.py b/lldb/examples/python/templates/operating_system.py
index a8053bcaa21af..d83019079ee90 100644
--- a/lldb/examples/python/templates/operating_system.py
+++ b/lldb/examples/python/templates/operating_system.py
@@ -10,16 +10,16 @@ class OperatingSystem(ScriptedThread):
"""
Class that provides data for an instance of a LLDB 'OperatingSystemPython' plug-in class.
- ```
- thread_info = {
- "tid": tid,
- "name": "four",
- "queue": "queue4",
- "state": "stopped",
- "stop_reason": "none",
- "core" : 2
- }
- ```
+ .. code-block:: python
+
+ thread_info = {
+ "tid": tid,
+ "name": "four",
+ "queue": "queue4",
+ "state": "stopped",
+ "stop_reason": "none",
+ "core" : 2
+ }
- tid : thread ID (mandatory)
- name : thread name (optional key/value pair)
diff --git a/lldb/examples/python/templates/scripted_platform.py b/lldb/examples/python/templates/scripted_platform.py
index fb1bde8fd4cb7..5805f99dea4ca 100644
--- a/lldb/examples/python/templates/scripted_platform.py
+++ b/lldb/examples/python/templates/scripted_platform.py
@@ -10,9 +10,6 @@ class ScriptedPlatform(metaclass=ABCMeta):
Most of the base class methods are `@abstractmethod` that need to be
overwritten by the inheriting class.
-
- DISCLAIMER: THIS INTERFACE IS STILL UNDER DEVELOPMENT AND NOT STABLE.
- THE METHODS EXPOSED MIGHT CHANGE IN THE FUTURE.
"""
processes = None
@@ -32,16 +29,18 @@ def __init__(self, exe_ctx, args):
def list_processes(self):
"""Get a list of processes that are running or that can be attached to on the platform.
- processes = {
- 420: {
- name: a.out,
- arch: aarch64,
- pid: 420,
- parent_pid: 42 (optional),
- uid: 0 (optional),
- gid: 0 (optional),
- },
- }
+ .. code-block:: python
+
+ processes = {
+ 420: {
+ name: a.out,
+ arch: aarch64,
+ pid: 420,
+ parent_pid: 42 (optional),
+ uid: 0 (optional),
+ gid: 0 (optional),
+ },
+ }
Returns:
Dict: The processes represented as a dictionary, with at least the
diff --git a/lldb/examples/python/templates/scripted_process.py b/lldb/examples/python/templates/scripted_process.py
index 3ddcebd128eaa..b7b6499580e50 100644
--- a/lldb/examples/python/templates/scripted_process.py
+++ b/lldb/examples/python/templates/scripted_process.py
@@ -11,9 +11,6 @@ class ScriptedProcess(metaclass=ABCMeta):
Most of the base class methods are `@abstractmethod` that need to be
overwritten by the inheriting class.
-
- DISCLAIMER: THIS INTERFACE IS STILL UNDER DEVELOPMENT AND NOT STABLE.
- THE METHODS EXPOSED MIGHT CHANGE IN THE FUTURE.
"""
capabilities = None
@@ -106,8 +103,8 @@ def write_memory_at_address(self, addr, data, error):
Args:
addr (int): Address from which we should start reading.
- data (lldb.SBData): An `lldb.SBData` buffer to write to the
- process memory.
+ data (lldb.SBData): An `lldb.SBData` buffer to write to the process
+ memory.
error (lldb.SBError): Error object.
Returns:
@@ -121,13 +118,13 @@ def write_memory_at_address(self, addr, data, error):
def get_loaded_images(self):
"""Get the list of loaded images for the scripted process.
- ```
- scripted_image = {
- uuid = "c6ea2b64-f77c-3d27-9528-74f507b9078b",
- path = "/usr/lib/dyld"
- load_addr = 0xbadc0ffee
- }
- ```
+ .. code-block:: python
+
+ scripted_image = {
+ uuid = "c6ea2b64-f77c-3d27-9528-74f507b9078b",
+ path = "/usr/lib/dyld"
+ load_addr = 0xbadc0ffee
+ }
Returns:
List[scripted_image]: A list of `scripted_image` dictionaries
@@ -238,9 +235,6 @@ class ScriptedThread(metaclass=ABCMeta):
Most of the base class methods are `@abstractmethod` that need to be
overwritten by the inheriting class.
-
- DISCLAIMER: THIS INTERFACE IS STILL UNDER DEVELOPMENT AND NOT STABLE.
- THE METHODS EXPOSED MIGHT CHANGE IN THE FUTURE.
"""
@abstractmethod
@@ -305,10 +299,12 @@ def get_name(self):
def get_state(self):
"""Get the scripted thread state type.
+ .. code-block:: python
+
eStateStopped, ///< Process or thread is stopped and can be examined.
eStateRunning, ///< Process or thread is running and can't be examined.
- eStateStepping, ///< Process or thread is in the process of stepping and can
- /// not be examined.
+ eStateStepping, ///< Process or thread is in the process of stepping and
+ /// can not be examined.
eStateCrashed, ///< Process or thread has crashed and can be examined.
Returns:
@@ -340,12 +336,12 @@ def get_stop_reason(self):
def get_stackframes(self):
"""Get the list of stack frames for the scripted thread.
- ```
- scripted_frame = {
- idx = 0,
- pc = 0xbadc0ffee
- }
- ```
+ .. code-block:: python
+
+ scripted_frame = {
+ idx = 0,
+ pc = 0xbadc0ffee
+ }
Returns:
List[scripted_frame]: A list of `scripted_frame` dictionaries
>From 93e0ffa790c5f738171ed90004ab2b9e46f8d387 Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Tue, 2 Jul 2024 15:35:51 -0500
Subject: [PATCH 036/246] [lld] Add `--lto-emit-asm` and alias
`--plugin-opt=emit-llvm` to it (#97469)
Summary:
The LTO pass currently supporting emitting LTO via the
`--plugin-opt=emit-llvm` option. However, there is a very similar option
called `--lto-emit-asm`. This patch just makes the usage more
consistent and more obvious that emitting LLVM-IR is supported.
---
lld/ELF/Driver.cpp | 2 +-
lld/ELF/Options.td | 5 ++++-
lld/test/ELF/lto/emit-llvm.ll | 1 +
3 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index 73e260073da0c..a4863d6717efb 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -1269,7 +1269,7 @@ static void readConfigs(opt::InputArgList &args) {
config->dynamicLinker = getDynamicLinker(args);
config->ehFrameHdr =
args.hasFlag(OPT_eh_frame_hdr, OPT_no_eh_frame_hdr, false);
- config->emitLLVM = args.hasArg(OPT_plugin_opt_emit_llvm, false);
+ config->emitLLVM = args.hasArg(OPT_lto_emit_llvm);
config->emitRelocs = args.hasArg(OPT_emit_relocs);
config->enableNewDtags =
args.hasFlag(OPT_enable_new_dtags, OPT_disable_new_dtags, true);
diff --git a/lld/ELF/Options.td b/lld/ELF/Options.td
index 7d91b02b51079..74733efb28ff5 100644
--- a/lld/ELF/Options.td
+++ b/lld/ELF/Options.td
@@ -614,6 +614,8 @@ def lto_debug_pass_manager: FF<"lto-debug-pass-manager">,
HelpText<"Debug new pass manager">;
def lto_emit_asm: FF<"lto-emit-asm">,
HelpText<"Emit assembly code">;
+def lto_emit_llvm: FF<"lto-emit-llvm">,
+ HelpText<"Emit LLVM-IR bitcode">;
def lto_newpm_passes: JJ<"lto-newpm-passes=">,
HelpText<"Passes to run during LTO">;
def lto_O: JJ<"lto-O">, MetaVarName<"<opt-level>">,
@@ -695,7 +697,8 @@ def plugin_opt_dwo_dir_eq: J<"plugin-opt=dwo_dir=">,
HelpText<"Directory to store .dwo files when LTO and debug fission are used">;
def plugin_opt_emit_asm: F<"plugin-opt=emit-asm">,
Alias<lto_emit_asm>, HelpText<"Alias for --lto-emit-asm">;
-def plugin_opt_emit_llvm: F<"plugin-opt=emit-llvm">;
+def plugin_opt_emit_llvm: F<"plugin-opt=emit-llvm">,
+ Alias<lto_emit_llvm>, HelpText<"Alias for --lto-emit-llvm">;
def: J<"plugin-opt=jobs=">, Alias<thinlto_jobs_eq>, HelpText<"Alias for --thinlto-jobs=">;
def: J<"plugin-opt=lto-partitions=">, Alias<lto_partitions>, HelpText<"Alias for --lto-partitions">;
def plugin_opt_mcpu_eq: J<"plugin-opt=mcpu=">;
diff --git a/lld/test/ELF/lto/emit-llvm.ll b/lld/test/ELF/lto/emit-llvm.ll
index e80ef570b4e81..01f5a056e0c0d 100644
--- a/lld/test/ELF/lto/emit-llvm.ll
+++ b/lld/test/ELF/lto/emit-llvm.ll
@@ -7,6 +7,7 @@
;; Regression test for D112297: bitcode writer used to crash when
;; --plugin-opt=emit-llvmis enabled and the output is /dev/null.
; RUN: ld.lld --plugin-opt=emit-llvm -mllvm -bitcode-flush-threshold=0 -o /dev/null %t.o
+; RUN: ld.lld --lto-emit-llvm -mllvm -bitcode-flush-threshold=0 -o /dev/null %t.o
; CHECK: define internal void @main()
>From 2ee86a1ebb9be7ff7be893b411a4af0a1dcee420 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tgymnich at icloud.com>
Date: Tue, 2 Jul 2024 23:01:58 +0200
Subject: [PATCH 037/246] [AArch64][GlobalISel] Improve non-SVE popcount for
32bit and 64 bit using udot (#96409)
Follow up for #95881
Use udot instead of a sequence of uaddlp instructions when summing up
lanes for popcount.
---
.../AArch64/GISel/AArch64LegalizerInfo.cpp | 25 ++
llvm/test/CodeGen/AArch64/popcount.ll | 239 ++++++++++++++++++
2 files changed, 264 insertions(+)
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 82d5a56d1ff25..c6eb4d2b3ec78 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1908,6 +1908,31 @@ bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI,
auto CTPOP = MIRBuilder.buildCTPOP(VTy, Val);
// Sum across lanes.
+
+ if (ST->hasDotProd() && Ty.isVector() && Ty.getNumElements() >= 2 &&
+ Ty.getScalarSizeInBits() != 16) {
+ LLT Dt = Ty == LLT::fixed_vector(2, 64) ? LLT::fixed_vector(4, 32) : Ty;
+ auto Zeros = MIRBuilder.buildConstant(Dt, 0);
+ auto Ones = MIRBuilder.buildConstant(VTy, 1);
+ MachineInstrBuilder Sum;
+
+ if (Ty == LLT::fixed_vector(2, 64)) {
+ auto UDOT =
+ MIRBuilder.buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones, CTPOP});
+ Sum = MIRBuilder.buildInstr(AArch64::G_UADDLP, {Ty}, {UDOT});
+ } else if (Ty == LLT::fixed_vector(4, 32)) {
+ Sum = MIRBuilder.buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones, CTPOP});
+ } else if (Ty == LLT::fixed_vector(2, 32)) {
+ Sum = MIRBuilder.buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones, CTPOP});
+ } else {
+ llvm_unreachable("unexpected vector shape");
+ }
+
+ Sum->getOperand(0).setReg(Dst);
+ MI.eraseFromParent();
+ return true;
+ }
+
Register HSum = CTPOP.getReg(0);
unsigned Opc;
SmallVector<LLT> HAddTys;
diff --git a/llvm/test/CodeGen/AArch64/popcount.ll b/llvm/test/CodeGen/AArch64/popcount.ll
index f60d0db43c4a9..1fc4de1c48b7d 100644
--- a/llvm/test/CodeGen/AArch64/popcount.ll
+++ b/llvm/test/CodeGen/AArch64/popcount.ll
@@ -3,6 +3,12 @@
; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mattr=+neon | FileCheck %s --check-prefixes=CHECK,NEON
; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mattr=+neon,+dotprod | FileCheck %s --check-prefixes=CHECK,DOT
; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mattr=+sve | FileCheck %s --check-prefixes=CHECK,SVE
+; RUN: llc < %s -global-isel -mtriple=aarch64-unknown-unknown | FileCheck %s --check-prefix=GISEL
+; RUN: llc < %s -O0 -global-isel -mtriple=aarch64-unknown-unknown | FileCheck %s --check-prefix=GISELO0
+; RUN: llc < %s -global-isel -mtriple=aarch64-unknown-unknown -mattr=+neon | FileCheck %s --check-prefixes=GISEL,NEON-GISEL
+; RUN: llc < %s -global-isel -mtriple=aarch64-unknown-unknown -mattr=+neon,+dotprod | FileCheck %s --check-prefixes=GISEL,DOT-GISEL
+; RUN: llc < %s -global-isel -mtriple=aarch64-unknown-unknown -mattr=+sve | FileCheck %s --check-prefixes=GISEL,SVE-GISEL
+
; Function Attrs: nobuiltin nounwind readonly
define i8 @popcount128(ptr nocapture nonnull readonly %0) {
@@ -25,6 +31,24 @@ define i8 @popcount128(ptr nocapture nonnull readonly %0) {
; CHECK-NEXT: uaddlv h0, v0.16b
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
+;
+; GISEL-LABEL: popcount128:
+; GISEL: // %bb.0: // %Entry
+; GISEL-NEXT: ldr q0, [x0]
+; GISEL-NEXT: cnt v0.16b, v0.16b
+; GISEL-NEXT: uaddlv h0, v0.16b
+; GISEL-NEXT: fmov w0, s0
+; GISEL-NEXT: ret
+;
+; GISELO0-LABEL: popcount128:
+; GISELO0: // %bb.0: // %Entry
+; GISELO0-NEXT: ldr q0, [x0]
+; GISELO0-NEXT: cnt v0.16b, v0.16b
+; GISELO0-NEXT: uaddlv h0, v0.16b
+; GISELO0-NEXT: // kill: def $q0 killed $h0
+; GISELO0-NEXT: // kill: def $s0 killed $s0 killed $q0
+; GISELO0-NEXT: fmov w0, s0
+; GISELO0-NEXT: ret
Entry:
%1 = load i128, ptr %0, align 16
%2 = tail call i128 @llvm.ctpop.i128(i128 %1)
@@ -86,6 +110,57 @@ define i16 @popcount256(ptr nocapture nonnull readonly %0) {
; CHECK-NEXT: fmov w9, s1
; CHECK-NEXT: add w0, w9, w8
; CHECK-NEXT: ret
+;
+; GISEL-LABEL: popcount256:
+; GISEL: // %bb.0: // %Entry
+; GISEL-NEXT: ldp x8, x9, [x0, #16]
+; GISEL-NEXT: mov v0.d[0], x8
+; GISEL-NEXT: ldp x8, x10, [x0]
+; GISEL-NEXT: mov v1.d[0], x8
+; GISEL-NEXT: mov v0.d[1], x9
+; GISEL-NEXT: mov v1.d[1], x10
+; GISEL-NEXT: cnt v0.16b, v0.16b
+; GISEL-NEXT: cnt v1.16b, v1.16b
+; GISEL-NEXT: uaddlv h0, v0.16b
+; GISEL-NEXT: uaddlv h1, v1.16b
+; GISEL-NEXT: mov w8, v0.s[0]
+; GISEL-NEXT: fmov w9, s1
+; GISEL-NEXT: add x0, x8, w9, uxtw
+; GISEL-NEXT: // kill: def $w0 killed $w0 killed $x0
+; GISEL-NEXT: ret
+;
+; GISELO0-LABEL: popcount256:
+; GISELO0: // %bb.0: // %Entry
+; GISELO0-NEXT: ldr x11, [x0]
+; GISELO0-NEXT: ldr x10, [x0, #8]
+; GISELO0-NEXT: ldr x9, [x0, #16]
+; GISELO0-NEXT: ldr x8, [x0, #24]
+; GISELO0-NEXT: // implicit-def: $q1
+; GISELO0-NEXT: mov v1.d[0], x11
+; GISELO0-NEXT: mov v1.d[1], x10
+; GISELO0-NEXT: // implicit-def: $q0
+; GISELO0-NEXT: mov v0.d[0], x9
+; GISELO0-NEXT: mov v0.d[1], x8
+; GISELO0-NEXT: cnt v1.16b, v1.16b
+; GISELO0-NEXT: uaddlv h1, v1.16b
+; GISELO0-NEXT: // kill: def $q1 killed $h1
+; GISELO0-NEXT: // kill: def $s1 killed $s1 killed $q1
+; GISELO0-NEXT: fmov w0, s1
+; GISELO0-NEXT: mov w10, wzr
+; GISELO0-NEXT: mov w9, w0
+; GISELO0-NEXT: mov w8, w10
+; GISELO0-NEXT: bfi x9, x8, #32, #32
+; GISELO0-NEXT: cnt v0.16b, v0.16b
+; GISELO0-NEXT: uaddlv h0, v0.16b
+; GISELO0-NEXT: // kill: def $q0 killed $h0
+; GISELO0-NEXT: // kill: def $s0 killed $s0 killed $q0
+; GISELO0-NEXT: fmov w0, s0
+; GISELO0-NEXT: mov w8, w0
+; GISELO0-NEXT: // kill: def $x10 killed $w10
+; GISELO0-NEXT: bfi x8, x10, #32, #32
+; GISELO0-NEXT: adds x8, x8, x9
+; GISELO0-NEXT: mov w0, w8
+; GISELO0-NEXT: ret
Entry:
%1 = load i256, ptr %0, align 16
%2 = tail call i256 @llvm.ctpop.i256(i256 %1)
@@ -125,6 +200,33 @@ define <1 x i128> @popcount1x128(<1 x i128> %0) {
; CHECK-NEXT: mov x1, v0.d[1]
; CHECK-NEXT: fmov x0, d0
; CHECK-NEXT: ret
+;
+; GISEL-LABEL: popcount1x128:
+; GISEL: // %bb.0: // %Entry
+; GISEL-NEXT: mov v0.d[0], x0
+; GISEL-NEXT: mov v0.d[1], x1
+; GISEL-NEXT: mov x1, xzr
+; GISEL-NEXT: cnt v0.16b, v0.16b
+; GISEL-NEXT: uaddlv h0, v0.16b
+; GISEL-NEXT: mov w0, v0.s[0]
+; GISEL-NEXT: ret
+;
+; GISELO0-LABEL: popcount1x128:
+; GISELO0: // %bb.0: // %Entry
+; GISELO0-NEXT: // implicit-def: $q0
+; GISELO0-NEXT: mov v0.d[0], x0
+; GISELO0-NEXT: mov v0.d[1], x1
+; GISELO0-NEXT: cnt v0.16b, v0.16b
+; GISELO0-NEXT: uaddlv h0, v0.16b
+; GISELO0-NEXT: // kill: def $q0 killed $h0
+; GISELO0-NEXT: mov x1, xzr
+; GISELO0-NEXT: // kill: def $s0 killed $s0 killed $q0
+; GISELO0-NEXT: fmov w0, s0
+; GISELO0-NEXT: mov w8, wzr
+; GISELO0-NEXT: // kill: def $x0 killed $w0
+; GISELO0-NEXT: // kill: def $x8 killed $w8
+; GISELO0-NEXT: bfi x0, x8, #32, #32
+; GISELO0-NEXT: ret
Entry:
%1 = tail call <1 x i128> @llvm.ctpop.v1i128(<1 x i128> %0)
ret <1 x i128> %1
@@ -165,6 +267,39 @@ define <2 x i64> @popcount2x64(<2 x i64> %0) {
; SVE-NEXT: uaddlp v0.4s, v0.8h
; SVE-NEXT: uaddlp v0.2d, v0.4s
; SVE-NEXT: ret
+;
+; GISELO0-LABEL: popcount2x64:
+; GISELO0: // %bb.0: // %Entry
+; GISELO0-NEXT: cnt v0.16b, v0.16b
+; GISELO0-NEXT: uaddlp v0.8h, v0.16b
+; GISELO0-NEXT: uaddlp v0.4s, v0.8h
+; GISELO0-NEXT: uaddlp v0.2d, v0.4s
+; GISELO0-NEXT: ret
+;
+; NEON-GISEL-LABEL: popcount2x64:
+; NEON-GISEL: // %bb.0: // %Entry
+; NEON-GISEL-NEXT: cnt v0.16b, v0.16b
+; NEON-GISEL-NEXT: uaddlp v0.8h, v0.16b
+; NEON-GISEL-NEXT: uaddlp v0.4s, v0.8h
+; NEON-GISEL-NEXT: uaddlp v0.2d, v0.4s
+; NEON-GISEL-NEXT: ret
+;
+; DOT-GISEL-LABEL: popcount2x64:
+; DOT-GISEL: // %bb.0: // %Entry
+; DOT-GISEL-NEXT: movi v1.2d, #0000000000000000
+; DOT-GISEL-NEXT: cnt v0.16b, v0.16b
+; DOT-GISEL-NEXT: movi v2.16b, #1
+; DOT-GISEL-NEXT: udot v1.4s, v2.16b, v0.16b
+; DOT-GISEL-NEXT: uaddlp v0.2d, v1.4s
+; DOT-GISEL-NEXT: ret
+;
+; SVE-GISEL-LABEL: popcount2x64:
+; SVE-GISEL: // %bb.0: // %Entry
+; SVE-GISEL-NEXT: cnt v0.16b, v0.16b
+; SVE-GISEL-NEXT: uaddlp v0.8h, v0.16b
+; SVE-GISEL-NEXT: uaddlp v0.4s, v0.8h
+; SVE-GISEL-NEXT: uaddlp v0.2d, v0.4s
+; SVE-GISEL-NEXT: ret
Entry:
%1 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
ret <2 x i64> %1
@@ -192,6 +327,26 @@ define <1 x i64> @popcount1x64(<1 x i64> %0) {
; CHECK-NEXT: uaddlp v0.2s, v0.4h
; CHECK-NEXT: uaddlp v0.1d, v0.2s
; CHECK-NEXT: ret
+;
+; GISEL-LABEL: popcount1x64:
+; GISEL: // %bb.0: // %Entry
+; GISEL-NEXT: cnt v0.8b, v0.8b
+; GISEL-NEXT: uaddlv h0, v0.8b
+; GISEL-NEXT: mov w8, v0.s[0]
+; GISEL-NEXT: fmov d0, x8
+; GISEL-NEXT: ret
+;
+; GISELO0-LABEL: popcount1x64:
+; GISELO0: // %bb.0: // %Entry
+; GISELO0-NEXT: fmov x0, d0
+; GISELO0-NEXT: fmov d0, x0
+; GISELO0-NEXT: cnt v0.8b, v0.8b
+; GISELO0-NEXT: uaddlv h0, v0.8b
+; GISELO0-NEXT: // kill: def $q0 killed $h0
+; GISELO0-NEXT: mov w8, v0.s[0]
+; GISELO0-NEXT: // kill: def $x8 killed $w8
+; GISELO0-NEXT: fmov d0, x8
+; GISELO0-NEXT: ret
Entry:
%1 = tail call <1 x i64> @llvm.ctpop.v1i64(<1 x i64> %0)
ret <1 x i64> %1
@@ -228,6 +383,36 @@ define <4 x i32> @popcount4x32(<4 x i32> %0) {
; SVE-NEXT: uaddlp v0.8h, v0.16b
; SVE-NEXT: uaddlp v0.4s, v0.8h
; SVE-NEXT: ret
+;
+; GISELO0-LABEL: popcount4x32:
+; GISELO0: // %bb.0: // %Entry
+; GISELO0-NEXT: cnt v0.16b, v0.16b
+; GISELO0-NEXT: uaddlp v0.8h, v0.16b
+; GISELO0-NEXT: uaddlp v0.4s, v0.8h
+; GISELO0-NEXT: ret
+;
+; NEON-GISEL-LABEL: popcount4x32:
+; NEON-GISEL: // %bb.0: // %Entry
+; NEON-GISEL-NEXT: cnt v0.16b, v0.16b
+; NEON-GISEL-NEXT: uaddlp v0.8h, v0.16b
+; NEON-GISEL-NEXT: uaddlp v0.4s, v0.8h
+; NEON-GISEL-NEXT: ret
+;
+; DOT-GISEL-LABEL: popcount4x32:
+; DOT-GISEL: // %bb.0: // %Entry
+; DOT-GISEL-NEXT: movi v1.2d, #0000000000000000
+; DOT-GISEL-NEXT: cnt v0.16b, v0.16b
+; DOT-GISEL-NEXT: movi v2.16b, #1
+; DOT-GISEL-NEXT: udot v1.4s, v2.16b, v0.16b
+; DOT-GISEL-NEXT: mov v0.16b, v1.16b
+; DOT-GISEL-NEXT: ret
+;
+; SVE-GISEL-LABEL: popcount4x32:
+; SVE-GISEL: // %bb.0: // %Entry
+; SVE-GISEL-NEXT: cnt v0.16b, v0.16b
+; SVE-GISEL-NEXT: uaddlp v0.8h, v0.16b
+; SVE-GISEL-NEXT: uaddlp v0.4s, v0.8h
+; SVE-GISEL-NEXT: ret
Entry:
%1 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
ret <4 x i32> %1
@@ -265,6 +450,36 @@ define <2 x i32> @popcount2x32(<2 x i32> %0) {
; SVE-NEXT: uaddlp v0.4h, v0.8b
; SVE-NEXT: uaddlp v0.2s, v0.4h
; SVE-NEXT: ret
+;
+; GISELO0-LABEL: popcount2x32:
+; GISELO0: // %bb.0: // %Entry
+; GISELO0-NEXT: cnt v0.8b, v0.8b
+; GISELO0-NEXT: uaddlp v0.4h, v0.8b
+; GISELO0-NEXT: uaddlp v0.2s, v0.4h
+; GISELO0-NEXT: ret
+;
+; NEON-GISEL-LABEL: popcount2x32:
+; NEON-GISEL: // %bb.0: // %Entry
+; NEON-GISEL-NEXT: cnt v0.8b, v0.8b
+; NEON-GISEL-NEXT: uaddlp v0.4h, v0.8b
+; NEON-GISEL-NEXT: uaddlp v0.2s, v0.4h
+; NEON-GISEL-NEXT: ret
+;
+; DOT-GISEL-LABEL: popcount2x32:
+; DOT-GISEL: // %bb.0: // %Entry
+; DOT-GISEL-NEXT: movi v1.2d, #0000000000000000
+; DOT-GISEL-NEXT: cnt v0.8b, v0.8b
+; DOT-GISEL-NEXT: movi v2.8b, #1
+; DOT-GISEL-NEXT: udot v1.2s, v2.8b, v0.8b
+; DOT-GISEL-NEXT: fmov d0, d1
+; DOT-GISEL-NEXT: ret
+;
+; SVE-GISEL-LABEL: popcount2x32:
+; SVE-GISEL: // %bb.0: // %Entry
+; SVE-GISEL-NEXT: cnt v0.8b, v0.8b
+; SVE-GISEL-NEXT: uaddlp v0.4h, v0.8b
+; SVE-GISEL-NEXT: uaddlp v0.2s, v0.4h
+; SVE-GISEL-NEXT: ret
Entry:
%1 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %0)
ret <2 x i32> %1
@@ -284,6 +499,18 @@ define <8 x i16> @popcount8x16(<8 x i16> %0) {
; CHECK-NEXT: cnt v0.16b, v0.16b
; CHECK-NEXT: uaddlp v0.8h, v0.16b
; CHECK-NEXT: ret
+;
+; GISEL-LABEL: popcount8x16:
+; GISEL: // %bb.0: // %Entry
+; GISEL-NEXT: cnt v0.16b, v0.16b
+; GISEL-NEXT: uaddlp v0.8h, v0.16b
+; GISEL-NEXT: ret
+;
+; GISELO0-LABEL: popcount8x16:
+; GISELO0: // %bb.0: // %Entry
+; GISELO0-NEXT: cnt v0.16b, v0.16b
+; GISELO0-NEXT: uaddlp v0.8h, v0.16b
+; GISELO0-NEXT: ret
Entry:
%1 = tail call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %0)
ret <8 x i16> %1
@@ -303,6 +530,18 @@ define <4 x i16> @popcount4x16(<4 x i16> %0) {
; CHECK-NEXT: cnt v0.8b, v0.8b
; CHECK-NEXT: uaddlp v0.4h, v0.8b
; CHECK-NEXT: ret
+;
+; GISEL-LABEL: popcount4x16:
+; GISEL: // %bb.0: // %Entry
+; GISEL-NEXT: cnt v0.8b, v0.8b
+; GISEL-NEXT: uaddlp v0.4h, v0.8b
+; GISEL-NEXT: ret
+;
+; GISELO0-LABEL: popcount4x16:
+; GISELO0: // %bb.0: // %Entry
+; GISELO0-NEXT: cnt v0.8b, v0.8b
+; GISELO0-NEXT: uaddlp v0.4h, v0.8b
+; GISELO0-NEXT: ret
Entry:
%1 = tail call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %0)
ret <4 x i16> %1
>From 9fa7f401b2651663407562932529f72142bf8aaa Mon Sep 17 00:00:00 2001
From: alx32 <103613512+alx32 at users.noreply.github.com>
Date: Tue, 2 Jul 2024 14:11:13 -0700
Subject: [PATCH 038/246] [lld-macho] Category merger: handle addends when
getting symbol at offset (#91238)
Currently the `tryFindDefinedOnIsec` takes in an `InputSection` and an
`offset` and is supposed to return the target symbol that is referenced
on that `InputSection` at the given offset.
However, it does not deal with the reloc `addend` and might return the
incorrect symbol.
Here we add support for handling the reloc's `addend`.
---
lld/MachO/ObjC.cpp | 29 +-
lld/test/MachO/objc-category-merging-swift.s | 410 +++++++++++++++++++
2 files changed, 436 insertions(+), 3 deletions(-)
create mode 100644 lld/test/MachO/objc-category-merging-swift.s
diff --git a/lld/MachO/ObjC.cpp b/lld/MachO/ObjC.cpp
index 413fa0bb64390..3b3d6a7951f60 100644
--- a/lld/MachO/ObjC.cpp
+++ b/lld/MachO/ObjC.cpp
@@ -490,6 +490,7 @@ class ObjcCategoryMerger {
Defined *emitCategoryName(const std::string &name, ObjFile *objFile);
void createSymbolReference(Defined *refFrom, const Symbol *refTo,
uint32_t offset, const Reloc &relocTemplate);
+ Defined *tryFindDefinedOnIsec(const InputSection *isec, uint32_t offset);
Symbol *tryGetSymbolAtIsecOffset(const ConcatInputSection *isec,
uint32_t offset);
Defined *tryGetDefinedAtIsecOffset(const ConcatInputSection *isec,
@@ -566,7 +567,25 @@ ObjcCategoryMerger::tryGetSymbolAtIsecOffset(const ConcatInputSection *isec,
if (!reloc)
return nullptr;
- return reloc->referent.get<Symbol *>();
+ Symbol *sym = reloc->referent.get<Symbol *>();
+
+ if (reloc->addend) {
+ assert(isa<Defined>(sym) && "Expected defined for non-zero addend");
+ Defined *definedSym = cast<Defined>(sym);
+ sym = tryFindDefinedOnIsec(definedSym->isec(),
+ definedSym->value + reloc->addend);
+ }
+
+ return sym;
+}
+
+Defined *ObjcCategoryMerger::tryFindDefinedOnIsec(const InputSection *isec,
+ uint32_t offset) {
+ for (Defined *sym : isec->symbols)
+ if ((sym->value <= offset) && (sym->value + sym->size > offset))
+ return sym;
+
+ return nullptr;
}
Defined *
@@ -1288,8 +1307,12 @@ void ObjcCategoryMerger::eraseMergedCategories() {
continue;
eraseISec(catInfo.catBodyIsec);
-
- tryEraseDefinedAtIsecOffset(catInfo.catBodyIsec, catLayout.nameOffset);
+ // We can't erase 'catLayout.nameOffset' for Swift categories because the
+ // name will be referenced for generating relative offsets
+ // See usages of 'l_.str.11.SimpleClass' in objc-category-merging-swift.s
+ // TODO: handle the above in a smarter way
+ if (catInfo.sourceLanguage != SourceLanguage::Swift)
+ tryEraseDefinedAtIsecOffset(catInfo.catBodyIsec, catLayout.nameOffset);
tryEraseDefinedAtIsecOffset(catInfo.catBodyIsec,
catLayout.instanceMethodsOffset);
tryEraseDefinedAtIsecOffset(catInfo.catBodyIsec,
diff --git a/lld/test/MachO/objc-category-merging-swift.s b/lld/test/MachO/objc-category-merging-swift.s
new file mode 100644
index 0000000000000..28025b0bc577a
--- /dev/null
+++ b/lld/test/MachO/objc-category-merging-swift.s
@@ -0,0 +1,410 @@
+# REQUIRES: aarch64
+# RUN: rm -rf %t; mkdir %t && cd %t
+
+############ Test merging multiple categories into a single category ############
+## Apply category merging to swiftc code just make sure we can handle addends
+## and don't erase category names for swift -- in order to not crash
+# RUN: llvm-mc -filetype=obj -triple=arm64-apple-macos -o cat_swift.o %s
+# RUN: %lld -arch arm64 -dylib -o cat_swift.dylib cat_swift.o -objc_category_merging
+# RUN: llvm-objdump --objc-meta-data --macho cat_swift.dylib | FileCheck %s --check-prefixes=CHECK-MERGE
+
+; CHECK-MERGE: Contents of (__DATA_CONST,__objc_classlist) section
+; CHECK-MERGE-NEXT: _$s11SimpleClassAACN
+; CHECK-MERGE-NEXT: isa {{.+}} _OBJC_METACLASS_$__TtC11SimpleClass11SimpleClass
+; CHECK-MERGE-NEXT: superclass 0x0
+; CHECK-MERGE-NEXT: cache 0x0
+; CHECK-MERGE-NEXT: vtable 0x0
+; CHECK-MERGE-NEXT: data {{.+}} (struct class_ro_t *) Swift class
+; CHECK-MERGE-NEXT: flags 0x80
+; CHECK-MERGE-NEXT: instanceStart 8
+; CHECK-MERGE-NEXT: instanceSize 8
+; CHECK-MERGE-NEXT: reserved 0x0
+; CHECK-MERGE-NEXT: ivarLayout 0x0
+; CHECK-MERGE-NEXT: name {{.+}} _TtC11SimpleClass11SimpleClass
+; CHECK-MERGE-NEXT: baseMethods {{.+}} (struct method_list_t *)
+; CHECK-MERGE-NEXT: entsize 24
+; CHECK-MERGE-NEXT: count 3
+; CHECK-MERGE-NEXT: name {{.+}} categoryInstanceMethod
+; CHECK-MERGE-NEXT: types {{.+}} q16 at 0:8
+; CHECK-MERGE-NEXT: imp _$s11SimpleClassAAC22categoryInstanceMethodSiyFTo
+; CHECK-MERGE-NEXT: name {{.+}} baseClassInstanceMethod
+; CHECK-MERGE-NEXT: types {{.+}} i16 at 0:8
+; CHECK-MERGE-NEXT: imp _$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyFTo
+; CHECK-MERGE-NEXT: name {{.+}} init
+; CHECK-MERGE-NEXT: types {{.+}} @16 at 0:8
+; CHECK-MERGE-NEXT: imp _$s11SimpleClassAACABycfcTo
+; CHECK-MERGE-NEXT: baseProtocols 0x0
+; CHECK-MERGE-NEXT: ivars 0x0
+; CHECK-MERGE-NEXT: weakIvarLayout 0x0
+; CHECK-MERGE-NEXT: baseProperties 0x0
+; CHECK-MERGE-NEXT: Meta Class
+; CHECK-MERGE-NEXT: isa 0x0
+; CHECK-MERGE-NEXT: superclass 0x0
+; CHECK-MERGE-NEXT: cache 0x0
+; CHECK-MERGE-NEXT: vtable 0x0
+; CHECK-MERGE-NEXT: data {{.+}} (struct class_ro_t *)
+; CHECK-MERGE-NEXT: flags 0x81 RO_META
+; CHECK-MERGE-NEXT: instanceStart 40
+; CHECK-MERGE-NEXT: instanceSize 40
+; CHECK-MERGE-NEXT: reserved 0x0
+; CHECK-MERGE-NEXT: ivarLayout 0x0
+; CHECK-MERGE-NEXT: name {{.+}} _TtC11SimpleClass11SimpleClass
+; CHECK-MERGE-NEXT: baseMethods 0x0 (struct method_list_t *)
+; CHECK-MERGE-NEXT: baseProtocols 0x0
+; CHECK-MERGE-NEXT: ivars 0x0
+; CHECK-MERGE-NEXT: weakIvarLayout 0x0
+; CHECK-MERGE-NEXT: baseProperties 0x0
+; CHECK-MERGE-NEXT: Contents of (__DATA_CONST,__objc_imageinfo) section
+; CHECK-MERGE-NEXT: version 0
+; CHECK-MERGE-NEXT: flags 0x740 OBJC_IMAGE_HAS_CATEGORY_CLASS_PROPERTIES Swift 5 or later
+
+; ================== Generated from Swift: ==================
+;; > xcrun swiftc --version
+;; swift-driver version: 1.109.2 Apple Swift version 6.0 (swiftlang-6.0.0.3.300 clang-1600.0.20.10)
+;; > xcrun swiftc -S SimpleClass.swift -o SimpleClass.s
+; import Foundation
+; @objc class SimpleClass: NSObject {
+; @objc func baseClassInstanceMethod() -> Int32 {
+; return 2
+; }
+; }
+; extension SimpleClass {
+; @objc func categoryInstanceMethod() -> Int {
+; return 3
+; }
+; }
+
+; ================== Generated from Swift: ==================
+ .section __TEXT,__text,regular,pure_instructions
+ .build_version macos, 11, 0 sdk_version 12, 0
+ .globl _main
+ .p2align 2
+_main:
+ .cfi_startproc
+ mov w0, #0
+ ret
+ .cfi_endproc
+
+ .private_extern _$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyF
+ .globl _$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyF
+ .p2align 2
+_$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyF:
+ .cfi_startproc
+ ret
+ .cfi_endproc
+
+ .p2align 2
+_$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyFTo:
+ .cfi_startproc
+ ret
+ .cfi_endproc
+
+ .private_extern _$s11SimpleClassAACABycfC
+ .globl _$s11SimpleClassAACABycfC
+ .p2align 2
+_$s11SimpleClassAACABycfC:
+ .cfi_startproc
+ ret
+ .cfi_endproc
+
+ .private_extern _$s11SimpleClassAACABycfc
+ .globl _$s11SimpleClassAACABycfc
+ .p2align 2
+_$s11SimpleClassAACABycfc:
+ .cfi_startproc
+ ret
+ .cfi_endproc
+
+ .private_extern _$s11SimpleClassAACMa
+ .globl _$s11SimpleClassAACMa
+ .p2align 2
+_$s11SimpleClassAACMa:
+ ret
+
+ .p2align 2
+_$s11SimpleClassAACABycfcTo:
+ .cfi_startproc
+ ret
+ .cfi_endproc
+
+ .private_extern _$s11SimpleClassAACfD
+ .globl _$s11SimpleClassAACfD
+ .p2align 2
+_$s11SimpleClassAACfD:
+ .cfi_startproc
+ ret
+ .cfi_endproc
+
+ .private_extern _$s11SimpleClassAAC22categoryInstanceMethodSiyF
+ .globl _$s11SimpleClassAAC22categoryInstanceMethodSiyF
+ .p2align 2
+_$s11SimpleClassAAC22categoryInstanceMethodSiyF:
+ .cfi_startproc
+ ret
+ .cfi_endproc
+
+ .p2align 2
+_$s11SimpleClassAAC22categoryInstanceMethodSiyFTo:
+ .cfi_startproc
+ ret
+ .cfi_endproc
+
+ .section __TEXT,__objc_methname,cstring_literals
+"L_selector_data(init)":
+ .asciz "init"
+
+ .section __DATA,__objc_selrefs,literal_pointers,no_dead_strip
+ .p2align 3, 0x0
+"L_selector(init)":
+ .quad "L_selector_data(init)"
+
+ .section __TEXT,__objc_methname,cstring_literals
+"L_selector_data(dealloc)":
+ .asciz "dealloc"
+
+ .section __DATA,__objc_selrefs,literal_pointers,no_dead_strip
+ .p2align 3, 0x0
+"L_selector(dealloc)":
+ .quad "L_selector_data(dealloc)"
+
+ .section __TEXT,__swift5_entry,regular,no_dead_strip
+ .p2align 2, 0x0
+l_entry_point:
+ .long _main-l_entry_point
+ .long 0
+
+ .private_extern _OBJC_METACLASS_$__TtC11SimpleClass11SimpleClass
+ .section __DATA,__data
+ .globl _OBJC_METACLASS_$__TtC11SimpleClass11SimpleClass
+ .p2align 3, 0x0
+_OBJC_METACLASS_$__TtC11SimpleClass11SimpleClass:
+ .quad _OBJC_METACLASS_$_NSObject
+ .quad _OBJC_METACLASS_$_NSObject
+ .quad __objc_empty_cache
+ .quad 0
+ .quad __METACLASS_DATA__TtC11SimpleClass11SimpleClass
+
+ .section __TEXT,__cstring,cstring_literals
+ .p2align 4, 0x0
+l_.str.30._TtC11SimpleClass11SimpleClass:
+ .asciz "_TtC11SimpleClass11SimpleClass"
+
+ .section __DATA,__objc_const
+ .p2align 3, 0x0
+__METACLASS_DATA__TtC11SimpleClass11SimpleClass:
+ .long 129
+ .long 40
+ .long 40
+ .long 0
+ .quad 0
+ .quad l_.str.30._TtC11SimpleClass11SimpleClass
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .section __TEXT,__objc_methname,cstring_literals
+"L_selector_data(baseClassInstanceMethod)":
+ .asciz "baseClassInstanceMethod"
+
+ .section __TEXT,__cstring,cstring_literals
+"l_.str.7.i16 at 0:8":
+ .asciz "i16 at 0:8"
+
+"l_.str.7. at 16@0:8":
+ .asciz "@16 at 0:8"
+
+ .section __DATA,__objc_data
+ .p2align 3, 0x0
+__INSTANCE_METHODS__TtC11SimpleClass11SimpleClass:
+ .long 24
+ .long 2
+ .quad "L_selector_data(baseClassInstanceMethod)"
+ .quad "l_.str.7.i16 at 0:8"
+ .quad _$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyFTo
+ .quad "L_selector_data(init)"
+ .quad "l_.str.7. at 16@0:8"
+ .quad _$s11SimpleClassAACABycfcTo
+
+ .p2align 3, 0x0
+__DATA__TtC11SimpleClass11SimpleClass:
+ .long 128
+ .long 8
+ .long 8
+ .long 0
+ .quad 0
+ .quad l_.str.30._TtC11SimpleClass11SimpleClass
+ .quad __INSTANCE_METHODS__TtC11SimpleClass11SimpleClass
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .section __TEXT,__const
+l_.str.11.SimpleClass:
+ .asciz "SimpleClass"
+
+ .private_extern _$s11SimpleClassMXM
+ .section __TEXT,__constg_swiftt
+ .globl _$s11SimpleClassMXM
+ .weak_definition _$s11SimpleClassMXM
+ .p2align 2, 0x0
+_$s11SimpleClassMXM:
+ .long 0
+ .long 0
+ .long (l_.str.11.SimpleClass-_$s11SimpleClassMXM)-8
+
+ .private_extern "_symbolic So8NSObjectC"
+ .section __TEXT,__swift5_typeref
+ .globl "_symbolic So8NSObjectC"
+ .weak_definition "_symbolic So8NSObjectC"
+ .p2align 1, 0x0
+"_symbolic So8NSObjectC":
+ .ascii "So8NSObjectC"
+ .byte 0
+
+ .private_extern _$s11SimpleClassAACMn
+ .section __TEXT,__constg_swiftt
+ .globl _$s11SimpleClassAACMn
+ .p2align 2, 0x0
+_$s11SimpleClassAACMn:
+ .long 2147483728
+ .long (_$s11SimpleClassMXM-_$s11SimpleClassAACMn)-4
+ .long (l_.str.11.SimpleClass-_$s11SimpleClassAACMn)-8
+ .long (_$s11SimpleClassAACMa-_$s11SimpleClassAACMn)-12
+ .long (_$s11SimpleClassAACMF-_$s11SimpleClassAACMn)-16
+ .long ("_symbolic So8NSObjectC"-_$s11SimpleClassAACMn)-20
+ .long 3
+ .long 11
+ .long 1
+ .long 0
+ .long 10
+ .long 10
+ .long 1
+ .long 16
+ .long (_$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyF-_$s11SimpleClassAACMn)-56
+
+ .section __DATA,__objc_data
+ .p2align 3, 0x0
+_$s11SimpleClassAACMf:
+ .quad 0
+ .quad _$s11SimpleClassAACfD
+ .quad _$sBOWV
+ .quad _OBJC_METACLASS_$__TtC11SimpleClass11SimpleClass
+ .quad _OBJC_CLASS_$_NSObject
+ .quad __objc_empty_cache
+ .quad 0
+ .quad __DATA__TtC11SimpleClass11SimpleClass+2
+ .long 0
+ .long 0
+ .long 8
+ .short 7
+ .short 0
+ .long 112
+ .long 24
+ .quad _$s11SimpleClassAACMn
+ .quad 0
+ .quad _$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyF
+
+ .private_extern "_symbolic _____ 11SimpleClassAAC"
+ .section __TEXT,__swift5_typeref
+ .globl "_symbolic _____ 11SimpleClassAAC"
+ .weak_definition "_symbolic _____ 11SimpleClassAAC"
+ .p2align 1, 0x0
+"_symbolic _____ 11SimpleClassAAC":
+ .byte 1
+ .long (_$s11SimpleClassAACMn-"_symbolic _____ 11SimpleClassAAC")-1
+ .byte 0
+
+ .section __TEXT,__swift5_fieldmd
+ .p2align 2, 0x0
+_$s11SimpleClassAACMF:
+ .long "_symbolic _____ 11SimpleClassAAC"-_$s11SimpleClassAACMF
+ .long ("_symbolic So8NSObjectC"-_$s11SimpleClassAACMF)-4
+ .short 7
+ .short 12
+ .long 0
+
+ .section __TEXT,__objc_methname,cstring_literals
+"L_selector_data(categoryInstanceMethod)":
+ .asciz "categoryInstanceMethod"
+
+ .section __TEXT,__cstring,cstring_literals
+"l_.str.7.q16 at 0:8":
+ .asciz "q16 at 0:8"
+
+ .section __DATA,__objc_data
+ .p2align 3, 0x0
+__CATEGORY_INSTANCE_METHODS__TtC11SimpleClass11SimpleClass_$_SimpleClass:
+ .long 24
+ .long 1
+ .quad "L_selector_data(categoryInstanceMethod)"
+ .quad "l_.str.7.q16 at 0:8"
+ .quad _$s11SimpleClassAAC22categoryInstanceMethodSiyFTo
+
+ .section __DATA,__objc_const
+ .p2align 3, 0x0
+__CATEGORY__TtC11SimpleClass11SimpleClass_$_SimpleClass:
+ .quad l_.str.11.SimpleClass
+ .quad _$s11SimpleClassAACMf+24
+ .quad __CATEGORY_INSTANCE_METHODS__TtC11SimpleClass11SimpleClass_$_SimpleClass
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .long 60
+ .space 4
+
+ .section __TEXT,__swift5_types
+ .p2align 2, 0x0
+l_$s11SimpleClassAACHn:
+ .long _$s11SimpleClassAACMn-l_$s11SimpleClassAACHn
+
+ .private_extern ___swift_reflection_version
+ .section __TEXT,__const
+ .globl ___swift_reflection_version
+ .weak_definition ___swift_reflection_version
+ .p2align 1, 0x0
+___swift_reflection_version:
+ .short 3
+
+ .section __DATA,__objc_classlist,regular,no_dead_strip
+ .p2align 3, 0x0
+_objc_classes_$s11SimpleClassAACN:
+ .quad _$s11SimpleClassAACN
+
+ .section __DATA,__objc_catlist,regular,no_dead_strip
+ .p2align 3, 0x0
+_objc_categories:
+ .quad __CATEGORY__TtC11SimpleClass11SimpleClass_$_SimpleClass
+
+ .no_dead_strip _main
+ .no_dead_strip l_entry_point
+ .no_dead_strip _$s11SimpleClassAACMF
+ .no_dead_strip l_$s11SimpleClassAACHn
+ .no_dead_strip ___swift_reflection_version
+ .no_dead_strip _objc_classes_$s11SimpleClassAACN
+ .no_dead_strip _objc_categories
+ .section __DATA,__objc_imageinfo,regular,no_dead_strip
+L_OBJC_IMAGE_INFO:
+ .long 0
+ .long 100665152
+
+ .globl _$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyFTq
+ .private_extern _$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyFTq
+ .alt_entry _$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyFTq
+.set _$s11SimpleClassAAC04baseB14InstanceMethods5Int32VyFTq, _$s11SimpleClassAACMn+52
+ .globl _$s11SimpleClassAACN
+ .private_extern _$s11SimpleClassAACN
+ .alt_entry _$s11SimpleClassAACN
+.set _$s11SimpleClassAACN, _$s11SimpleClassAACMf+24
+ .globl _OBJC_CLASS_$__TtC11SimpleClass11SimpleClass
+ .private_extern _OBJC_CLASS_$__TtC11SimpleClass11SimpleClass
+.subsections_via_symbols
+
+_OBJC_CLASS_$_NSObject:
+_OBJC_METACLASS_$_NSObject:
+__objc_empty_cache:
+_$sBOWV:
+ .quad 0
>From 1490141145db1f9136a16bbce0f020e576613a72 Mon Sep 17 00:00:00 2001
From: Fangrui Song <i at maskray.me>
Date: Tue, 2 Jul 2024 14:11:17 -0700
Subject: [PATCH 039/246] Move MCSection::LayoutOrder to MCSectionMachO
This variable is similar to `Ordinal` but only used for Mach-O to place
zerofill sections ("virtual sections" in MC term) after non-zerofill ones.
Follow-up to 7840c0066837797cdeb62aab63044b964aa7f372.
Pull Request: https://github.com/llvm/llvm-project/pull/97474
---
llvm/include/llvm/MC/MCSection.h | 5 -----
llvm/include/llvm/MC/MCSectionMachO.h | 7 +++++++
llvm/lib/MC/MachObjectWriter.cpp | 6 +++---
3 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h
index 58ba34c07262b..54f7eb1d0fcfc 100644
--- a/llvm/include/llvm/MC/MCSection.h
+++ b/llvm/include/llvm/MC/MCSection.h
@@ -86,8 +86,6 @@ class MCSection {
Align Alignment;
/// The section index in the assemblers section list.
unsigned Ordinal = 0;
- /// The index of this section in the layout order.
- unsigned LayoutOrder = 0;
/// Keeping track of bundle-locked state.
BundleLockStateType BundleLockState = NotBundleLocked;
@@ -167,9 +165,6 @@ class MCSection {
unsigned getOrdinal() const { return Ordinal; }
void setOrdinal(unsigned Value) { Ordinal = Value; }
- unsigned getLayoutOrder() const { return LayoutOrder; }
- void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }
-
BundleLockStateType getBundleLockState() const { return BundleLockState; }
void setBundleLockState(BundleLockStateType NewState);
bool isBundleLocked() const { return BundleLockState != NotBundleLocked; }
diff --git a/llvm/include/llvm/MC/MCSectionMachO.h b/llvm/include/llvm/MC/MCSectionMachO.h
index 3b7623fd450e5..1f38d24a20d2e 100644
--- a/llvm/include/llvm/MC/MCSectionMachO.h
+++ b/llvm/include/llvm/MC/MCSectionMachO.h
@@ -32,6 +32,10 @@ class MCSectionMachO final : public MCSection {
/// for example.
unsigned Reserved2;
+ // The index of this section in MachObjectWriter::SectionOrder, which is
+ // different from MCSection::Ordinal.
+ unsigned LayoutOrder = 0;
+
// The defining non-temporary symbol for each fragment.
SmallVector<const MCSymbol *, 0> Atoms;
@@ -80,6 +84,9 @@ class MCSectionMachO final : public MCSection {
const MCSymbol *getAtom(size_t I) const;
void setAtom(size_t I, const MCSymbol *Sym);
+ unsigned getLayoutOrder() const { return LayoutOrder; }
+ void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }
+
static bool classof(const MCSection *S) {
return S->getVariant() == SV_MachO;
}
diff --git a/llvm/lib/MC/MachObjectWriter.cpp b/llvm/lib/MC/MachObjectWriter.cpp
index 3f5bdd88574ac..12048e2e53442 100644
--- a/llvm/lib/MC/MachObjectWriter.cpp
+++ b/llvm/lib/MC/MachObjectWriter.cpp
@@ -125,7 +125,7 @@ uint64_t MachObjectWriter::getSymbolAddress(const MCSymbol &S,
uint64_t MachObjectWriter::getPaddingSize(const MCAssembler &Asm,
const MCSection *Sec) const {
uint64_t EndAddr = getSectionAddress(Sec) + Asm.getSectionAddressSize(*Sec);
- unsigned Next = Sec->getLayoutOrder() + 1;
+ unsigned Next = cast<MCSectionMachO>(Sec)->getLayoutOrder() + 1;
if (Next >= SectionOrder.size())
return 0;
@@ -676,13 +676,13 @@ void MachObjectWriter::computeSectionAddresses(const MCAssembler &Asm) {
for (MCSection &Sec : Asm) {
if (!Sec.isVirtualSection()) {
SectionOrder.push_back(&Sec);
- Sec.setLayoutOrder(i++);
+ cast<MCSectionMachO>(Sec).setLayoutOrder(i++);
}
}
for (MCSection &Sec : Asm) {
if (Sec.isVirtualSection()) {
SectionOrder.push_back(&Sec);
- Sec.setLayoutOrder(i++);
+ cast<MCSectionMachO>(Sec).setLayoutOrder(i++);
}
}
>From 45507166a1b38ce2831bd1e32f43977f647ccf47 Mon Sep 17 00:00:00 2001
From: Igor Kudrin <ikudrin at accesssoftek.com>
Date: Wed, 3 Jul 2024 04:17:34 +0700
Subject: [PATCH 040/246] [AArch64][MachineOutliner][NFC] Re-enable some tests
(#96376)
Part of the tests did not run and some checks were missing due to
incorrect prefixes. The patch also updates the check lines to catch up
with recent changes.
The problematic revisions were:
* [D70635](https://reviews.llvm.org/D70635)
* [D71658](https://reviews.llvm.org/D71658)
* [D111780](https://reviews.llvm.org/D111780)
---
.../machine-outliner-retaddr-sign-cfi.ll | 32 +++++++++----------
...tliner-retaddr-sign-diff-scope-same-key.ll | 18 +++++------
.../machine-outliner-retaddr-sign-non-leaf.ll | 11 +++----
.../machine-outliner-retaddr-sign-regsave.mir | 4 +--
...tliner-retaddr-sign-same-scope-diff-key.ll | 14 ++++----
...iner-retaddr-sign-same-scope-same-key-a.ll | 19 ++++++-----
...iner-retaddr-sign-same-scope-same-key-b.ll | 19 ++++++-----
...machine-outliner-retaddr-sign-subtarget.ll | 4 +--
.../machine-outliner-retaddr-sign-thunk.ll | 24 +++++++-------
.../machine-outliner-retaddr-sign-v8-3.ll | 8 ++---
10 files changed, 74 insertions(+), 79 deletions(-)
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-cfi.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-cfi.ll
index e9492d2a87e15..4bbbe40176313 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-cfi.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-cfi.ll
@@ -1,18 +1,17 @@
-; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN: aarch64 %s -o - | FileCheck %s --check-prefixes CHECK,V8A
-; RUN-V83A: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN-V83A: aarch64 -mattr=+v8.3a %s -o - > %t
-; RUN-V83A: FileCheck --check-prefixes CHECK,V83A < %t %s
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V8A
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 -mattr=+v8.3a %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V83A
-; Function a's outlining candidate contains a sp modifying add without a
-; corresponsing sub, so we shouldn't outline it.
+;; Function a's outlining candidate contains a sp modifying add without a
+;; corresponsing sub, so we shouldn't outline it.
define void @a() "sign-return-address"="all" "sign-return-address-key"="b_key" {
; CHECK-LABEL: a: // @a
; CHECK: // %bb.0:
; CHECK-NEXT: .cfi_b_key_frame
; V8A-NEXT: hint #27
; V83A-NEXT: pacibsp
-; V8A-NEXT, V83A-NEXT: .cfi_negate_ra_state
+; CHECK-NEXT: .cfi_negate_ra_state
%1 = alloca i32, align 4
%2 = alloca i32, align 4
%3 = alloca i32, align 4
@@ -27,9 +26,8 @@ define void @a() "sign-return-address"="all" "sign-return-address-key"="b_key" {
store i32 6, ptr %6, align 4
; CHECK-NOT: bl OUTLINED_FUNCTION_{{[0-9]+}}
; V8A: hint #31
-; V83A: autibsp
-; V8A-NEXT, V83A-NEXT: .cfi_negate_ra_state
-; V8A-NEXT, V83A-NEXT: ret
+; V8A-NEXT: ret
+; V83A: retab
ret void
}
@@ -52,8 +50,8 @@ define void @b() "sign-return-address"="all" "sign-return-address-key"="b_key" n
store i32 6, ptr %6, align 4
; CHECK: bl [[OUTLINED_FUNC:OUTLINED_FUNCTION_[0-9]+]]
; V8A: hint #31
-; V83A: autibsp
-; V8A-NEXT, V83A-NEXT: ret
+; V8A-NEXT: ret
+; V83A: retab
ret void
}
@@ -76,8 +74,8 @@ define void @c() "sign-return-address"="all" "sign-return-address-key"="b_key" n
store i32 6, ptr %6, align 4
; CHECK: bl [[OUTLINED_FUNC]]
; V8A: hint #31
-; V83A: autibsp
-; V8A-NEXT, V83A-NEXT: ret
+; V8A-NEXT: ret
+; V83A: retab
ret void
}
@@ -86,5 +84,5 @@ define void @c() "sign-return-address"="all" "sign-return-address-key"="b_key" n
; V8A-NEXT: hint #27
; V83A-NEXT: pacibsp
; V8A: hint #31
-; V83A: autibsp
-; V8A-NEXT, V83A-NEXT: ret
+; V8A-NEXT: ret
+; V83A: retab
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
index a5757a70843a9..f4e9c0a4c2204 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
@@ -1,8 +1,7 @@
-; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN: aarch64 %s -o - | FileCheck %s --check-prefixes CHECK,V8A
-; RUN-V83A: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN-V83A: aarch64 -mattr=+v8.3a %s -o - > %t
-; RUN-V83A: FileCheck --check-prefixes CHECK,V83A < %t %s
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V8A
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 -mattr=+v8.3a %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V83A
define void @a() "sign-return-address"="all" {
; CHECK-LABEL: a: // @a
@@ -22,13 +21,13 @@ define void @a() "sign-return-address"="all" {
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
; V8A: hint #29
-; V83A: autiasp
+; V83A: retaa
ret void
; CHECK: .cfi_endproc
}
define void @b() "sign-return-address"="non-leaf" {
-; CHECK-LABE: b: // @b
+; CHECK-LABEL: b: // @b
; V8A-NOT: hint #25
; V83A-NOT: paciasp
; CHECK-NOT: .cfi_negate_ra_state
@@ -46,6 +45,7 @@ define void @b() "sign-return-address"="non-leaf" {
store i32 6, ptr %6, align 4
; V8A-NOT: hint #29
; V83A-NOT: autiasp
+; V83A-NOT: retaa
ret void
; CHECK: .cfi_endproc
}
@@ -54,7 +54,7 @@ define void @c() "sign-return-address"="all" {
; CHECK-LABEL: c: // @c
; V8A: hint #25
; V83A: paciasp
-; V8A-NEXT, V83A-NEXT: .cfi_negate_ra_state
+; CHECK-NEXT: .cfi_negate_ra_state
%1 = alloca i32, align 4
%2 = alloca i32, align 4
%3 = alloca i32, align 4
@@ -68,7 +68,7 @@ define void @c() "sign-return-address"="all" {
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
; V8A: hint #29
-; V83A: autiasp
+; V83A: retaa
ret void
; CHECK: .cfi_endproc
}
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-non-leaf.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-non-leaf.ll
index 0d948f379af1a..5dfdba21ed6a6 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-non-leaf.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-non-leaf.ll
@@ -1,8 +1,7 @@
-; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN: aarch64 %s -o - | FileCheck %s --check-prefixes CHECK,V8A
-; RUN-V83A: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN-V83A: aarch64 -mattr=+v8.3a %s -o - > %t
-; RUN-V83A: FileCheck --check-prefixes CHECK,V83A < %t %s
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V8A
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 -mattr=+v8.3a %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V83A
define i64 @a(i64 %x) "sign-return-address"="non-leaf" "sign-return-address-key"="b_key" {
; CHECK-LABEL: a: // @a
@@ -70,7 +69,7 @@ define i64 @c(i64 %x) "sign-return-address"="non-leaf" "sign-return-address-key"
ret i64 %x
}
-; Outlined function is leaf-function => don't sign it
+;; Outlined function is leaf-function => don't sign it
; CHECK-LABEL: OUTLINED_FUNCTION_0:
; CHECK-NOT: .cfi_b_key_frame
; CHECK-NOT: paci{{[a,b]}}sp
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-regsave.mir b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-regsave.mir
index 28986064e7946..9a983cbd6714e 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-regsave.mir
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-regsave.mir
@@ -126,6 +126,6 @@ body: |
# CHECK: body:
# CHECK-NEXT: bb.0:
# CHECK-NOT: frame-setup EMITBKEY
-# CHECK-NOT: frame-setup PACI{{[A,B]]}}SP
+# CHECK-NOT: frame-setup PACI{{[A,B]}}SP
# CHECK-NOT: frame-setup CFI_INSTRUCTION negate_ra_sign_state
-# CHECK-NOT: frame-destroy AUTI{{[A,B]]}}SP
+# CHECK-NOT: frame-destroy AUTI{{[A,B]}}SP
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-diff-key.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-diff-key.ll
index 1e4d6286cd22c..decf6a9bae23c 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-diff-key.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-diff-key.ll
@@ -1,8 +1,7 @@
-; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN: aarch64 %s -o - | FileCheck %s --check-prefixes CHECK,V8A
-; RUN-V83A: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN-V83A: aarch64 -mattr=+v8.3a %s -o - > %t
-; RUN-V83A: FileCheck --check-prefixes CHECK,V83A < %t %s
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V8A
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 -mattr=+v8.3a %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V83A
define void @a() "sign-return-address"="all" {
; CHECK-LABEL: a: // @a
@@ -22,7 +21,7 @@ define void @a() "sign-return-address"="all" {
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
; V8A: hint #29
-; V83A: autiasp
+; V83A: retaa
ret void
; CHECK: .cfi_endproc
}
@@ -47,6 +46,7 @@ define void @b() "sign-return-address"="all" "sign-return-address-key"="b_key" {
store i32 6, ptr %6, align 4
; V8A-NOT: hint #29
; V83A-NOT: autiasp
+; V83A-NOT: retaa
ret void
; CHECK: .cfi_endproc
}
@@ -69,7 +69,7 @@ define void @c() "sign-return-address"="all" {
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
; V8A: hint #29
-; V83A: autiasp
+; V83A: retaa
ret void
; CHECK: .cfi_endproc
}
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-a.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-a.ll
index 82a65cf06f602..88789a150ee74 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-a.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-a.ll
@@ -1,8 +1,7 @@
-; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN: aarch64 %s -o - | FileCheck %s --check-prefixes CHECK,V8A
-; RUN-V83A: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN-V83A: aarch64 -mattr=+v8.3a %s -o - > %t
-; RUN-V83A: FileCheck --check-prefixes CHECK,V83A < %t %s
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V8A
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 -mattr=+v8.3a %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V83A
define void @a() "sign-return-address"="all" "sign-return-address-key"="a_key" nounwind {
; CHECK-LABEL: a: // @a
@@ -21,7 +20,7 @@ define void @a() "sign-return-address"="all" "sign-return-address-key"="a_key" n
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
; V8A: hint #29
-; V83A: autiasp
+; V83A: retaa
ret void
}
@@ -42,7 +41,7 @@ define void @b() "sign-return-address"="all" nounwind {
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
; V8A: hint #29
-; V83A: autiasp
+; V83A: retaa
ret void
}
@@ -63,7 +62,7 @@ define void @c() "sign-return-address"="all" nounwind {
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
; V8A: hint #29
-; V83A: autiasp
+; V83A: retaa
ret void
}
@@ -71,5 +70,5 @@ define void @c() "sign-return-address"="all" nounwind {
; V8A: hint #25
; V83A: paciasp
; V8A: hint #29
-; V83A: autiasp
-; CHECK-NEXT: ret
+; V8A-NEXT: ret
+; V83A: retaa
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-b.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-b.ll
index 690b803034343..f3c9adc41d6fb 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-b.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-b.ll
@@ -1,8 +1,7 @@
-; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN: aarch64 %s -o - | FileCheck %s --check-prefixes CHECK,V8A
-; RUN-V83A: llc -verify-machineinstrs -enable-machine-outliner -mtriple \
-; RUN-V83A: aarch64 -mattr=+v8.3a %s -o - > %t
-; RUN-V83A: FileCheck --check-prefixes CHECK,V83A < %t %s
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V8A
+; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple aarch64 -mattr=+v8.3a %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V83A
define void @a() "sign-return-address"="all" "sign-return-address-key"="b_key" nounwind {
; CHECK-LABEL: a: // @a
@@ -22,7 +21,7 @@ define void @a() "sign-return-address"="all" "sign-return-address-key"="b_key" n
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
; V8A: hint #31
-; V83A: autibsp
+; V83A: retab
ret void
}
@@ -44,7 +43,7 @@ define void @b() "sign-return-address"="all" "sign-return-address-key"="b_key" n
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
; V8A: hint #31
-; V83A: autibsp
+; V83A: retab
ret void
}
@@ -66,7 +65,7 @@ define void @c() "sign-return-address"="all" "sign-return-address-key"="b_key" n
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
; V8A: hint #31
-; V83A: autibsp
+; V83A: retab
ret void
}
@@ -75,5 +74,5 @@ define void @c() "sign-return-address"="all" "sign-return-address-key"="b_key" n
; V8A-NEXT: hint #27
; V83A-NEXT: pacibsp
; V8A: hint #31
-; V83A: autibsp
-; CHECK-NEXT: ret
+; V8A-NEXT: ret
+; V83A: retab
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-subtarget.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-subtarget.ll
index fd61bc03a3430..a7ea32952f3b7 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-subtarget.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-subtarget.ll
@@ -24,8 +24,8 @@ define void @a() #0 {
store i32 4, ptr %4, align 4
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
+; CHECK-NOT: auti{{[a,b]}}sp
; CHECK: retab
-; CHECK-NOT: auti[a,b]sp
ret void
}
@@ -48,8 +48,8 @@ define void @b() #0 {
store i32 4, ptr %4, align 4
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
+; CHECK-NOT: auti{{[a,b]}}sp
; CHECK: retab
-; CHECK-NOT: auti[a,b]sp
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll
index 618973b9368d1..e09b1e3104221 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll
@@ -1,8 +1,8 @@
; RUN: llc -mtriple aarch64-arm-linux-gnu --enable-machine-outliner -outliner-leaf-descendants=false \
-; RUN: -verify-machineinstrs %s -o - | FileCheck --check-prefixes CHECK,V8A %s
-; RUN-V83A: llc -mtriple aarch64 -enable-machine-outliner -outliner-leaf-descendants=false \
-; RUN-V83A: -verify-machineinstrs -mattr=+v8.3a %s -o - > %t
-; RUN-V83A: FileCheck --check-prefixes CHECK,V83A < %t %s
+; RUN: -verify-machineinstrs %s -o - | FileCheck --check-prefixes CHECK,V8A %s
+; RUN: llc -mtriple aarch64 -enable-machine-outliner -outliner-leaf-descendants=false \
+; RUN: -verify-machineinstrs -mattr=+v8.3a %s -o - | \
+; RUN: FileCheck %s --check-prefixes CHECK,V83A
declare i32 @thunk_called_fn(i32, i32, i32, i32)
@@ -13,8 +13,8 @@ define i32 @a() #0 {
; V83A-NEXT: paciasp
; CHECK-NEXT: .cfi_negate_ra_state
; V8A: hint #29
-; V83A: autiasp
-; CHECK-NEXT: ret
+; V8A-NEXT: ret
+; V83A: retaa
entry:
%call = tail call i32 @thunk_called_fn(i32 1, i32 2, i32 3, i32 4)
%cx = add i32 %call, 8
@@ -28,8 +28,8 @@ define i32 @b() #0 {
; V83A-NEXT: paciasp
; CHECK-NEXT: .cfi_negate_ra_state
; V8A: hint #29
-; V83A: autiasp
-; CHECK-NEXT: ret
+; V8A-NEXT: ret
+; V83A: retaa
entry:
%call = tail call i32 @thunk_called_fn(i32 1, i32 2, i32 3, i32 4)
%cx = add i32 %call, 88
@@ -43,8 +43,8 @@ define hidden i32 @c(ptr %fptr) #0 {
; V83A-NEXT: paciasp
; CHECK-NEXT: .cfi_negate_ra_state
; V8A: hint #29
-; V83A: autiasp
-; CHECK-NEXT: ret
+; V8A-NEXT: ret
+; V83A: retaa
entry:
%call = tail call i32 %fptr(i32 1, i32 2, i32 3, i32 4)
%add = add nsw i32 %call, 8
@@ -58,8 +58,8 @@ define hidden i32 @d(ptr %fptr) #0 {
; V83A-NEXT: paciasp
; CHECK-NEXT: .cfi_negate_ra_state
; V8A: hint #29
-; V83A: autiasp
-; CHECK-NEXT: ret
+; V8A-NEXT: ret
+; V83A: retaa
entry:
%call = tail call i32 %fptr(i32 1, i32 2, i32 3, i32 4)
%add = add nsw i32 %call, 88
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-v8-3.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-v8-3.ll
index fd92316594271..05236b0ec4a8d 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-v8-3.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-v8-3.ll
@@ -21,8 +21,8 @@ define void @a() #0 {
store i32 4, ptr %4, align 4
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
+; CHECK-NOT: auti{{[a,b]}}sp
; CHECK: retab
-; CHECK-NOT: auti[a,b]sp
ret void
}
@@ -43,8 +43,8 @@ define void @b() #0 {
store i32 4, ptr %4, align 4
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
+; CHECK-NOT: auti{{[a,b]}}sp
; CHECK: retab
-; CHECK-NOT: auti[a,b]sp
ret void
}
@@ -65,8 +65,8 @@ define void @c() #0 {
store i32 4, ptr %4, align 4
store i32 5, ptr %5, align 4
store i32 6, ptr %6, align 4
+; CHECK-NOT: auti{{[a,b]}}sp
; CHECK: retab
-; CHECK-NOT: auti[a,b]sp
ret void
}
@@ -78,5 +78,5 @@ attributes #0 = { "sign-return-address"="all"
; CHECK: OUTLINED_FUNC
; CHECK: // %bb.0:
; CHECK-NEXT: pacibsp
+; CHECK-NOT: auti{{[a,b]}}sp
; CHECK: retab
-; CHECK-NOT: auti[a,b]sp
>From 79516ddbee3a1d6c95cfbe6d14c790f741167165 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Tue, 2 Jul 2024 23:18:25 +0200
Subject: [PATCH 041/246] AMDGPU: Fix assert from wrong address space size
assumption (#97267)
This was assuming the source address space was at least as large
as the destination of the cast. I'm not sure why this was casting
to begin with; the assumption seems to be the source
address space from the root addrspacecast matches the underlying
object so directly check that.
Fixes #97457
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 9 +-
.../codegen-prepare-addrspacecast-non-null.ll | 91 ++++++++++++++++++-
2 files changed, 98 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 6e7d34f5adaa3..052e1140533f3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2035,6 +2035,11 @@ static bool isPtrKnownNeverNull(const Value *V, const DataLayout &DL,
if (const auto *Arg = dyn_cast<Argument>(V); Arg && Arg->hasNonNullAttr())
return true;
+ // getUnderlyingObject may have looked through another addrspacecast, although
+ // the optimizable situations most likely folded out by now.
+ if (AS != cast<PointerType>(V->getType())->getAddressSpace())
+ return false;
+
// TODO: Calls that return nonnull?
// For all other things, use KnownBits.
@@ -2043,8 +2048,10 @@ static bool isPtrKnownNeverNull(const Value *V, const DataLayout &DL,
//
// TODO: Use ValueTracking's isKnownNeverNull if it becomes aware that some
// address spaces have non-zero null values.
- auto SrcPtrKB = computeKnownBits(V, DL).trunc(DL.getPointerSizeInBits(AS));
+ auto SrcPtrKB = computeKnownBits(V, DL);
const auto NullVal = TM.getNullPointerValue(AS);
+
+ assert(SrcPtrKB.getBitWidth() == DL.getPointerSizeInBits(AS));
assert((NullVal == 0 || NullVal == -1) &&
"don't know how to check for this null value!");
return NullVal ? !SrcPtrKB.getMaxValue().isAllOnes() : SrcPtrKB.isNonZero();
diff --git a/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
index cddc3161038b0..e2b4865410db8 100644
--- a/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
+++ b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: opt -mtriple=amdgcn-- -amdgpu-codegenprepare -S < %s | FileCheck -check-prefix=OPT %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,DAGISEL-ASM
; RUN: llc -mtriple=amdgcn-amd-amdhsa -global-isel -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,GISEL-ASM
@@ -270,3 +270,92 @@ finally:
end:
ret void
}
+
+; This used to assert due to assuming the size of the source address
+; space was larger than the destination.
+
+define i32 @cast_private_to_flat_to_private(ptr addrspace(5) %private.ptr) {
+; OPT-LABEL: define i32 @cast_private_to_flat_to_private(
+; OPT-SAME: ptr addrspace(5) [[PRIVATE_PTR:%.*]]) {
+; OPT-NEXT: [[FLAT_PTR:%.*]] = addrspacecast ptr addrspace(5) [[PRIVATE_PTR]] to ptr
+; OPT-NEXT: [[CAST_BACK:%.*]] = addrspacecast ptr [[FLAT_PTR]] to ptr addrspace(5)
+; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, ptr addrspace(5) [[CAST_BACK]], align 4
+; OPT-NEXT: ret i32 [[LOAD]]
+;
+; ASM-LABEL: cast_private_to_flat_to_private:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen glc
+; ASM-NEXT: s_waitcnt vmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %flat.ptr = addrspacecast ptr addrspace(5) %private.ptr to ptr
+ %cast.back = addrspacecast ptr %flat.ptr to ptr addrspace(5)
+ %load = load volatile i32, ptr addrspace(5) %cast.back
+ ret i32 %load
+}
+
+; This is UB but shouldn't assert.
+define i32 @cast_private_to_flat_to_local(ptr addrspace(5) %private.ptr) {
+; OPT-LABEL: define i32 @cast_private_to_flat_to_local(
+; OPT-SAME: ptr addrspace(5) [[PRIVATE_PTR:%.*]]) {
+; OPT-NEXT: [[FLAT_PTR:%.*]] = addrspacecast ptr addrspace(5) [[PRIVATE_PTR]] to ptr
+; OPT-NEXT: [[CAST_BACK:%.*]] = addrspacecast ptr [[FLAT_PTR]] to ptr addrspace(3)
+; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, ptr addrspace(3) [[CAST_BACK]], align 4
+; OPT-NEXT: ret i32 [[LOAD]]
+;
+; DAGISEL-ASM-LABEL: cast_private_to_flat_to_local:
+; DAGISEL-ASM: ; %bb.0:
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
+; DAGISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; DAGISEL-ASM-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
+; DAGISEL-ASM-NEXT: ds_read_b32 v0, v0
+; DAGISEL-ASM-NEXT: s_waitcnt lgkmcnt(0)
+; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-ASM-LABEL: cast_private_to_flat_to_local:
+; GISEL-ASM: ; %bb.0:
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; GISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
+; GISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GISEL-ASM-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-ASM-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
+; GISEL-ASM-NEXT: ds_read_b32 v0, v0
+; GISEL-ASM-NEXT: s_waitcnt lgkmcnt(0)
+; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+ %flat.ptr = addrspacecast ptr addrspace(5) %private.ptr to ptr
+ %cast.back = addrspacecast ptr %flat.ptr to ptr addrspace(3)
+ %load = load volatile i32, ptr addrspace(3) %cast.back
+ ret i32 %load
+}
+
+; This is UB but shouldn't assert.
+define i32 @cast_private_to_flat_to_global(ptr addrspace(6) %const32.ptr) {
+; OPT-LABEL: define i32 @cast_private_to_flat_to_global(
+; OPT-SAME: ptr addrspace(6) [[CONST32_PTR:%.*]]) {
+; OPT-NEXT: [[FLAT_PTR:%.*]] = addrspacecast ptr addrspace(6) [[CONST32_PTR]] to ptr
+; OPT-NEXT: [[LOCAL_PTR:%.*]] = addrspacecast ptr [[FLAT_PTR]] to ptr addrspace(3)
+; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, ptr addrspace(3) [[LOCAL_PTR]], align 4
+; OPT-NEXT: ret i32 [[LOAD]]
+;
+; ASM-LABEL: cast_private_to_flat_to_global:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: v_mov_b32_e32 v1, 0
+; ASM-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; ASM-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
+; ASM-NEXT: ds_read_b32 v0, v0
+; ASM-NEXT: s_waitcnt lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %flat.ptr = addrspacecast ptr addrspace(6) %const32.ptr to ptr
+ %local.ptr = addrspacecast ptr %flat.ptr to ptr addrspace(3)
+ %load = load volatile i32, ptr addrspace(3) %local.ptr
+ ret i32 %load
+}
>From 57555c6a0a96790bf1408b056405abe07899ead4 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 2 Jul 2024 13:57:10 -0700
Subject: [PATCH 042/246] [RISCV] Don't custom lower f16 SCALAR_TO_VECTOR with
Zvfhmin.
This doesn't appear to be tested and our custom handler doesn't
support this right now.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index ee45f730dc450..7e38e14689fa0 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1074,7 +1074,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
VT, Custom);
setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
- ISD::EXTRACT_SUBVECTOR, ISD::SCALAR_TO_VECTOR},
+ ISD::EXTRACT_SUBVECTOR},
VT, Custom);
if (Subtarget.hasStdExtZfhminOrZhinxmin())
setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
@@ -1317,7 +1317,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
VT, Custom);
setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
- ISD::EXTRACT_SUBVECTOR, ISD::SCALAR_TO_VECTOR},
+ ISD::EXTRACT_SUBVECTOR},
VT, Custom);
setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
>From 9ed4b171e95e9704286a5406c41a9a14580e2c42 Mon Sep 17 00:00:00 2001
From: Tom Stellard <tstellar at redhat.com>
Date: Tue, 2 Jul 2024 15:12:15 -0700
Subject: [PATCH 043/246] llvm_gtest: Only install once (#96959)
llvm_gtest and its dependencies were being installed twice (and into two
different locations depending on the cmake options).
---
third-party/unittest/CMakeLists.txt | 2 --
1 file changed, 2 deletions(-)
diff --git a/third-party/unittest/CMakeLists.txt b/third-party/unittest/CMakeLists.txt
index bf6ef54555144..8b38791629a4e 100644
--- a/third-party/unittest/CMakeLists.txt
+++ b/third-party/unittest/CMakeLists.txt
@@ -85,8 +85,6 @@ target_include_directories(llvm_gtest
add_subdirectory(UnitTestMain)
if (LLVM_INSTALL_GTEST)
- install(TARGETS llvm_gtest llvm_gtest_main LLVMTestingSupport LLVMTestingAnnotations
- ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" COMPONENT llvm_gtest)
install(DIRECTORY googletest/include/gtest/ DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/llvm-gtest/gtest/" COMPONENT llvm_gtest)
install(DIRECTORY googlemock/include/gmock/ DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/llvm-gmock/gmock/" COMPONENT llvm_gtest)
endif()
>From 21276fd7beb640d5fb1a10c228c9f48f620a8eac Mon Sep 17 00:00:00 2001
From: Fangrui Song <i at maskray.me>
Date: Tue, 2 Jul 2024 15:19:28 -0700
Subject: [PATCH 044/246] [MC] Don't treat altentry symbols as atoms
The current `setAtom` is inaccurate: a `.alt_entry` label can also be
recognized as an atom. This is mostly benign, but might cause two
locations only separated by an `.alt_entry` to have different atoms.
https://reviews.llvm.org/D153167 changed a `evaluateKnownAbsolute` to
`evaluateAsAbsolute` and would not fold `A-B` even if they are only
separated by a `.alt_entry` label, leading to a spurious error
`invalid CFI advance_loc expression`.
The fix is similar to #82268: add a special case for `.alt_entry`.
Fix #97116
Pull Request: https://github.com/llvm/llvm-project/pull/97479
---
llvm/lib/MC/MCMachOStreamer.cpp | 2 +-
llvm/test/MC/MachO/cfi-advance-loc-err.s | 5 +++++
2 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/MC/MCMachOStreamer.cpp b/llvm/lib/MC/MCMachOStreamer.cpp
index d67b95820a8a9..0b34d87033b7b 100644
--- a/llvm/lib/MC/MCMachOStreamer.cpp
+++ b/llvm/lib/MC/MCMachOStreamer.cpp
@@ -508,7 +508,7 @@ void MCMachOStreamer::finishImpl() {
DenseMap<const MCFragment *, const MCSymbol *> DefiningSymbolMap;
for (const MCSymbol &Symbol : getAssembler().symbols()) {
if (getAssembler().isSymbolLinkerVisible(Symbol) && Symbol.isInSection() &&
- !Symbol.isVariable()) {
+ !Symbol.isVariable() && !cast<MCSymbolMachO>(Symbol).isAltEntry()) {
// An atom defining symbol should never be internal to a fragment.
assert(Symbol.getOffset() == 0 &&
"Invalid offset in atom defining symbol!");
diff --git a/llvm/test/MC/MachO/cfi-advance-loc-err.s b/llvm/test/MC/MachO/cfi-advance-loc-err.s
index 3143dd84efc63..77b6544cb12d8 100644
--- a/llvm/test/MC/MachO/cfi-advance-loc-err.s
+++ b/llvm/test/MC/MachO/cfi-advance-loc-err.s
@@ -9,6 +9,11 @@ _foo:
subq $8, %rsp
.cfi_adjust_cfa_offset 8
+ .alt_entry _bar
+_bar: # alt_entry label can appear here as it is not an atom
+ addq $8, %rsp
+ .cfi_adjust_cfa_offset -8
+
tmp0: # non-private label cannot appear here
addq $8, %rsp
# CHECK: :[[#@LINE+1]]:3: error: invalid CFI advance_loc expression
>From 622df0ee9226b90e924538909337d55333d5d2fa Mon Sep 17 00:00:00 2001
From: Med Ismail Bennani <ismail at bennani.ma>
Date: Tue, 2 Jul 2024 15:20:18 -0700
Subject: [PATCH 045/246] [lldb] Add scripted thread plan python base class to
lldb & website (#97481)
Following a feedback request in #97262, I took out the scripted thread
plan python base class from it and make a separate PR for it.
This patch adds the scripted thread plan base python class to the lldb
python module as well as the lldb documentation website.
Signed-off-by: Med Ismail Bennani <ismail at bennani.ma>
---
lldb/bindings/python/CMakeLists.txt | 4 +-
lldb/docs/CMakeLists.txt | 1 +
lldb/docs/python_extensions.rst | 39 +++++++++++
.../python/templates/scripted_thread_plan.py | 70 +++++++++++++++++++
4 files changed, 113 insertions(+), 1 deletion(-)
create mode 100644 lldb/docs/python_extensions.rst
create mode 100644 lldb/examples/python/templates/scripted_thread_plan.py
diff --git a/lldb/bindings/python/CMakeLists.txt b/lldb/bindings/python/CMakeLists.txt
index def6941e802bb..69306a384e0b1 100644
--- a/lldb/bindings/python/CMakeLists.txt
+++ b/lldb/bindings/python/CMakeLists.txt
@@ -108,7 +108,9 @@ function(finish_swig_python swig_target lldb_python_bindings_dir lldb_python_tar
"${LLDB_SOURCE_DIR}/examples/python/templates/parsed_cmd.py"
"${LLDB_SOURCE_DIR}/examples/python/templates/scripted_process.py"
"${LLDB_SOURCE_DIR}/examples/python/templates/scripted_platform.py"
- "${LLDB_SOURCE_DIR}/examples/python/templates/operating_system.py")
+ "${LLDB_SOURCE_DIR}/examples/python/templates/operating_system.py"
+ "${LLDB_SOURCE_DIR}/examples/python/templates/scripted_thread_plan.py"
+ )
if(APPLE)
create_python_package(
diff --git a/lldb/docs/CMakeLists.txt b/lldb/docs/CMakeLists.txt
index ed4296bbf03a4..f1664a6965332 100644
--- a/lldb/docs/CMakeLists.txt
+++ b/lldb/docs/CMakeLists.txt
@@ -31,6 +31,7 @@ if (LLDB_ENABLE_PYTHON AND SPHINX_FOUND)
COMMAND "${CMAKE_COMMAND}" -E copy "${LLDB_SOURCE_DIR}/examples/python/templates/scripted_process.py" "${CMAKE_CURRENT_BINARY_DIR}/lldb/plugins/"
COMMAND "${CMAKE_COMMAND}" -E copy "${LLDB_SOURCE_DIR}/examples/python/templates/scripted_platform.py" "${CMAKE_CURRENT_BINARY_DIR}/lldb/plugins/"
COMMAND "${CMAKE_COMMAND}" -E copy "${LLDB_SOURCE_DIR}/examples/python/templates/operating_system.py" "${CMAKE_CURRENT_BINARY_DIR}/lldb/plugins/"
+ COMMAND "${CMAKE_COMMAND}" -E copy "${LLDB_SOURCE_DIR}/examples/python/templates/scripted_thread_plan.py" "${CMAKE_CURRENT_BINARY_DIR}/lldb/plugins/"
COMMENT "Copying lldb.py to pretend its a Python package.")
add_dependencies(lldb-python-doc-package swig_wrapper_python lldb-python)
diff --git a/lldb/docs/python_extensions.rst b/lldb/docs/python_extensions.rst
new file mode 100644
index 0000000000000..7e5f1ba6879db
--- /dev/null
+++ b/lldb/docs/python_extensions.rst
@@ -0,0 +1,39 @@
+Python Extensions
+=================
+
+LLDB provides scriptable extensions to augment the debugger's capabilities.
+This gives users the ability to tailor their debugging experience to their own needs.
+
+This page describes some of these scripting extensions:
+
+Operating System Thread Plugins
+-------------------------------
+
+.. automodapi:: lldb.plugins.operating_system
+ :no-heading:
+ :skip: ScriptedThread
+ :no-inheritance-diagram:
+
+Scripted Process Plugins
+-------------------------------
+
+.. automodapi:: lldb.plugins.scripted_process
+ :no-heading:
+ :skip: ABCMeta
+ :no-inheritance-diagram:
+
+Scripted Platform Plugins
+-------------------------------
+
+.. automodapi:: lldb.plugins.scripted_platform
+ :no-heading:
+ :skip: ABCMeta
+ :no-inheritance-diagram:
+
+Scripted Thread Plan Plugins
+-------------------------------
+
+.. automodapi:: lldb.plugins.scripted_thread_plan
+ :no-heading:
+ :no-inheritance-diagram:
+
diff --git a/lldb/examples/python/templates/scripted_thread_plan.py b/lldb/examples/python/templates/scripted_thread_plan.py
new file mode 100644
index 0000000000000..67396cdfc53a2
--- /dev/null
+++ b/lldb/examples/python/templates/scripted_thread_plan.py
@@ -0,0 +1,70 @@
+from abc import abstractmethod
+
+import lldb
+
+
+class ScriptedThreadPlan:
+ """
+ Class that provides data for an instance of a LLDB 'ScriptedThreadPlan' plug-in class used to construct custom stepping logic.
+
+ """
+
+ def __init__(self, thread_plan: lldb.SBThreadPlan):
+ """Initialization needs a valid lldb.SBThreadPlan object. This plug-in will get created after a live process is valid and has stopped.
+
+ Args:
+ thread_plan (lldb.SBThreadPlan): The underlying `ThreadPlan` that is pushed onto the plan stack.
+ """
+ self.thread_plan = thread_plan
+
+ def explains_stop(self, event: lldb.SBEvent) -> bool:
+ """Each plan is asked from youngest to oldest if it "explains" the stop. The first plan to claim the stop wins.
+
+ Args:
+ event (lldb.SBEvent): The process stop event.
+
+ Returns:
+ bool: `True` if this stop could be claimed by this thread plan, `False` otherwise.
+ Defaults to `True`.
+ """
+ return True
+
+ def is_stale(self) -> bool:
+ """If your plan is no longer relevant (for instance, you were stepping in a particular stack frame, but some other operation pushed that frame off the stack) return True and your plan will get popped.
+
+ Returns:
+ bool: `True` if this thread plan is stale, `False` otherwise.
+ Defaults to `False`.
+ """
+ return False
+
+ def should_stop(self, event: lldb.SBEvent) -> bool:
+ """Whether this thread plan should stop and return control to the user.
+ If your plan is done at this point, call SetPlanComplete on your thread plan instance. Also, do any work you need here to set up the next stage of stepping.
+
+ Args:
+ event (lldb.SBEvent): The process stop event.
+
+ Returns:
+ bool: `True` if this plan wants to stop and return control to the user at this point, `False` otherwise.
+ Defaults to `False`.
+ """
+ self.thread_plan.SetPlanComplete(True)
+ return True
+
+ def should_step(self) -> bool:
+ """Whether this thread plan should instruction step one instruction, or continue till the next breakpoint is hit.
+
+ Returns:
+ bool: `True` if this plan will instruction step one instruction, `False` otherwise.
+ Defaults to `True`.
+ """
+ return True
+
+ def stop_description(self, stream: lldb.SBStream) -> None:
+ """Customize the thread plan stop reason when the thread plan is complete.
+
+ Args:
+ stream (lldb.SBStream): The stream containing the stop description.
+ """
+ pass
>From 5196a91b0827b895aba63ce150ebc8f10795316c Mon Sep 17 00:00:00 2001
From: Helena Kotas <hekotas at microsoft.com>
Date: Tue, 2 Jul 2024 15:21:11 -0700
Subject: [PATCH 046/246] [HLSL] Run availability diagnostic on exported
functions (#97352)
Implements availability diagnostic on `export` functions.
For shader libraries the HLSL availability diagnostic should run on all
entry points and export functions. Now that the `export` keyword is
implemented (llvm/llvm-project#96823), we can detect which functions are
exported and run the diagnostic on them.
Exported functions can be nested in namespaces and in export
declarations so we need to scan not just the current translation unit
but also namespace and export declarations contexts.
Fixes #92073
---
clang/lib/Sema/SemaHLSL.cpp | 69 +++++++++++++------
.../Availability/avail-diag-default-lib.hlsl | 50 ++++++++++++++
.../Availability/avail-diag-relaxed-lib.hlsl | 32 +++++++++
.../Availability/avail-diag-strict-lib.hlsl | 52 +++++++++++++-
4 files changed, 181 insertions(+), 22 deletions(-)
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index eebe17a5b4bf7..babb984995f13 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -671,30 +671,58 @@ void DiagnoseHLSLAvailability::HandleFunctionOrMethodRef(FunctionDecl *FD,
void DiagnoseHLSLAvailability::RunOnTranslationUnit(
const TranslationUnitDecl *TU) {
+
// Iterate over all shader entry functions and library exports, and for those
// that have a body (definiton), run diag scan on each, setting appropriate
// shader environment context based on whether it is a shader entry function
- // or an exported function.
- for (auto &D : TU->decls()) {
- const FunctionDecl *FD = llvm::dyn_cast<FunctionDecl>(D);
- if (!FD || !FD->isThisDeclarationADefinition())
- continue;
+ // or an exported function. Exported functions can be in namespaces and in
+ // export declarations so we need to scan those declaration contexts as well.
+ llvm::SmallVector<const DeclContext *, 8> DeclContextsToScan;
+ DeclContextsToScan.push_back(TU);
+
+ while (!DeclContextsToScan.empty()) {
+ const DeclContext *DC = DeclContextsToScan.pop_back_val();
+ for (auto &D : DC->decls()) {
+ // do not scan implicit declaration generated by the implementation
+ if (D->isImplicit())
+ continue;
+
+ // for namespace or export declaration add the context to the list to be
+ // scanned later
+ if (llvm::dyn_cast<NamespaceDecl>(D) || llvm::dyn_cast<ExportDecl>(D)) {
+ DeclContextsToScan.push_back(llvm::dyn_cast<DeclContext>(D));
+ continue;
+ }
- // shader entry point
- auto ShaderAttr = FD->getAttr<HLSLShaderAttr>();
- if (ShaderAttr) {
- SetShaderStageContext(ShaderAttr->getType());
- RunOnFunction(FD);
- continue;
- }
- // exported library function with definition
- // FIXME: tracking issue #92073
-#if 0
- if (FD->getFormalLinkage() == Linkage::External) {
- SetUnknownShaderStageContext();
- RunOnFunction(FD);
+ // skip over other decls or function decls without body
+ const FunctionDecl *FD = llvm::dyn_cast<FunctionDecl>(D);
+ if (!FD || !FD->isThisDeclarationADefinition())
+ continue;
+
+ // shader entry point
+ if (HLSLShaderAttr *ShaderAttr = FD->getAttr<HLSLShaderAttr>()) {
+ SetShaderStageContext(ShaderAttr->getType());
+ RunOnFunction(FD);
+ continue;
+ }
+ // exported library function
+ // FIXME: replace this loop with external linkage check once issue #92071
+ // is resolved
+ bool isExport = FD->isInExportDeclContext();
+ if (!isExport) {
+ for (const auto *Redecl : FD->redecls()) {
+ if (Redecl->isInExportDeclContext()) {
+ isExport = true;
+ break;
+ }
+ }
+ }
+ if (isExport) {
+ SetUnknownShaderStageContext();
+ RunOnFunction(FD);
+ continue;
+ }
}
-#endif
}
}
@@ -707,8 +735,7 @@ void DiagnoseHLSLAvailability::RunOnFunction(const FunctionDecl *FD) {
// For any CallExpr found during the traversal add it's callee to the top of
// the stack to be processed next. Functions already processed are stored in
// ScannedDecls.
- const FunctionDecl *FD = DeclsToScan.back();
- DeclsToScan.pop_back();
+ const FunctionDecl *FD = DeclsToScan.pop_back_val();
// Decl was already scanned
const unsigned ScannedStages = GetScannedStages(FD);
diff --git a/clang/test/SemaHLSL/Availability/avail-diag-default-lib.hlsl b/clang/test/SemaHLSL/Availability/avail-diag-default-lib.hlsl
index 515e4c5f9df03..6bfc8577670cc 100644
--- a/clang/test/SemaHLSL/Availability/avail-diag-default-lib.hlsl
+++ b/clang/test/SemaHLSL/Availability/avail-diag-default-lib.hlsl
@@ -110,6 +110,55 @@ class MyClass
}
};
+// Exported function without body, not used
+export void exportedFunctionUnused(float f);
+
+// Exported function with body, without export, not used
+void exportedFunctionUnused(float f) {
+ // expected-error@#exportedFunctionUnused_fx_call {{'fx' is only available on Shader Model 6.5 or newer}}
+ // expected-note@#fx {{'fx' has been marked as being introduced in Shader Model 6.5 here, but the deployment target is Shader Model 6.0}}
+ float A = fx(f); // #exportedFunctionUnused_fx_call
+
+ // API with shader-stage-specific availability in unused exported library function
+ // - no errors expected because the actual shader stage this function
+ // will be used in not known at this time
+ float B = fy(f);
+ float C = fz(f);
+}
+
+// Exported function with body - called from main() which is a compute shader entry point
+export void exportedFunctionUsed(float f) {
+ // expected-error@#exportedFunctionUsed_fx_call {{'fx' is only available on Shader Model 6.5 or newer}}
+ // expected-note@#fx {{'fx' has been marked as being introduced in Shader Model 6.5 here, but the deployment target is Shader Model 6.0}}
+ float A = fx(f); // #exportedFunctionUsed_fx_call
+
+ // expected-error@#exportedFunctionUsed_fy_call {{'fy' is only available in compute environment on Shader Model 6.5 or newer}}
+ // expected-note@#fy {{'fy' has been marked as being introduced in Shader Model 6.5 in compute environment here, but the deployment target is Shader Model 6.0 compute environment}}
+ float B = fy(f); // #exportedFunctionUsed_fy_call
+
+ // expected-error@#exportedFunctionUsed_fz_call {{'fz' is unavailable}}
+ // expected-note@#fz {{'fz' has been marked as being introduced in Shader Model 6.5 in mesh environment here, but the deployment target is Shader Model 6.0 compute environment}}
+ float C = fz(f); // #exportedFunctionUsed_fz_call
+}
+
+namespace A {
+ namespace B {
+ export {
+ void exportedFunctionInNS(float x) {
+ // expected-error@#exportedFunctionInNS_fx_call {{'fx' is only available on Shader Model 6.5 or newer}}
+ // expected-note@#fx {{'fx' has been marked as being introduced in Shader Model 6.5 here, but the deployment target is Shader Model 6.0}}
+ float A = fx(x); // #exportedFunctionInNS_fx_call
+
+ // API with shader-stage-specific availability in exported library function
+ // - no errors expected because the actual shader stage this function
+ // will be used in not known at this time
+ float B = fy(x);
+ float C = fz(x);
+ }
+ }
+ }
+}
+
// Shader entry point without body
[shader("compute")]
[numthreads(4,1,1)]
@@ -126,5 +175,6 @@ float main() {
float c = C.makeF();
float d = test((float)1.0);
float e = test((half)1.0);
+ exportedFunctionUsed(1.0f);
return a * b * c;
}
diff --git a/clang/test/SemaHLSL/Availability/avail-diag-relaxed-lib.hlsl b/clang/test/SemaHLSL/Availability/avail-diag-relaxed-lib.hlsl
index 6bd20450f8bfa..4c9783138f670 100644
--- a/clang/test/SemaHLSL/Availability/avail-diag-relaxed-lib.hlsl
+++ b/clang/test/SemaHLSL/Availability/avail-diag-relaxed-lib.hlsl
@@ -110,6 +110,37 @@ class MyClass
}
};
+// Exported function without body, not used
+export void exportedFunctionUnused(float f);
+
+// Exported function with body, without export, not used
+void exportedFunctionUnused(float f) {
+ // expected-warning@#exportedFunctionUnused_fx_call {{'fx' is only available on Shader Model 6.5 or newer}}
+ // expected-note@#fx {{'fx' has been marked as being introduced in Shader Model 6.5 here, but the deployment target is Shader Model 6.0}}
+ float A = fx(f); // #exportedFunctionUnused_fx_call
+
+ // API with shader-stage-specific availability in unused exported library function
+ // - no errors expected because the actual shader stage this function
+ // will be used in not known at this time
+ float B = fy(f);
+ float C = fz(f);
+}
+
+// Exported function with body - called from main() which is a compute shader entry point
+export void exportedFunctionUsed(float f) {
+ // expected-warning@#exportedFunctionUsed_fx_call {{'fx' is only available on Shader Model 6.5 or newer}}
+ // expected-note@#fx {{'fx' has been marked as being introduced in Shader Model 6.5 here, but the deployment target is Shader Model 6.0}}
+ float A = fx(f); // #exportedFunctionUsed_fx_call
+
+ // expected-warning@#exportedFunctionUsed_fy_call {{'fy' is only available in compute environment on Shader Model 6.5 or newer}}
+ // expected-note@#fy {{'fy' has been marked as being introduced in Shader Model 6.5 in compute environment here, but the deployment target is Shader Model 6.0 compute environment}}
+ float B = fy(f); // #exportedFunctionUsed_fy_call
+
+ // expected-warning@#exportedFunctionUsed_fz_call {{'fz' is unavailable}}
+ // expected-note@#fz {{'fz' has been marked as being introduced in Shader Model 6.5 in mesh environment here, but the deployment target is Shader Model 6.0 compute environment}}
+ float C = fz(f); // #exportedFunctionUsed_fz_call
+}
+
// Shader entry point without body
[shader("compute")]
[numthreads(4,1,1)]
@@ -126,5 +157,6 @@ float main() {
float c = C.makeF();
float d = test((float)1.0);
float e = test((half)1.0);
+ exportedFunctionUsed(1.0f);
return a * b * c;
}
diff --git a/clang/test/SemaHLSL/Availability/avail-diag-strict-lib.hlsl b/clang/test/SemaHLSL/Availability/avail-diag-strict-lib.hlsl
index 4c9675051e570..c7be5afbc2d22 100644
--- a/clang/test/SemaHLSL/Availability/avail-diag-strict-lib.hlsl
+++ b/clang/test/SemaHLSL/Availability/avail-diag-strict-lib.hlsl
@@ -129,6 +129,55 @@ class MyClass
}
};
+// Exported function without body, not used
+export void exportedFunctionUnused(float f);
+
+// Exported function with body, without export, not used
+void exportedFunctionUnused(float f) {
+ // expected-error@#exportedFunctionUnused_fx_call {{'fx' is only available on Shader Model 6.5 or newer}}
+ // expected-note@#fx {{'fx' has been marked as being introduced in Shader Model 6.5 here, but the deployment target is Shader Model 6.0}}
+ float A = fx(f); // #exportedFunctionUnused_fx_call
+
+ // API with shader-stage-specific availability in unused exported library function
+ // - no errors expected because the actual shader stage this function
+ // will be used in not known at this time
+ float B = fy(f);
+ float C = fz(f);
+}
+
+// Exported function with body - called from main() which is a compute shader entry point
+export void exportedFunctionUsed(float f) {
+ // expected-error@#exportedFunctionUsed_fx_call {{'fx' is only available on Shader Model 6.5 or newer}}
+ // expected-note@#fx {{'fx' has been marked as being introduced in Shader Model 6.5 here, but the deployment target is Shader Model 6.0}}
+ float A = fx(f); // #exportedFunctionUsed_fx_call
+
+ // expected-error@#exportedFunctionUsed_fy_call {{'fy' is only available in compute environment on Shader Model 6.5 or newer}}
+ // expected-note@#fy {{'fy' has been marked as being introduced in Shader Model 6.5 in compute environment here, but the deployment target is Shader Model 6.0 compute environment}}
+ float B = fy(f); // #exportedFunctionUsed_fy_call
+
+ // expected-error@#exportedFunctionUsed_fz_call {{'fz' is unavailable}}
+ // expected-note@#fz {{'fz' has been marked as being introduced in Shader Model 6.5 in mesh environment here, but the deployment target is Shader Model 6.0 compute environment}}
+ float C = fz(f); // #exportedFunctionUsed_fz_call
+}
+
+namespace A {
+ namespace B {
+ export {
+ void exportedFunctionInNS(float x) {
+ // expected-error@#exportedFunctionInNS_fx_call {{'fx' is only available on Shader Model 6.5 or newer}}
+ // expected-note@#fx {{'fx' has been marked as being introduced in Shader Model 6.5 here, but the deployment target is Shader Model 6.0}}
+ float A = fx(x); // #exportedFunctionInNS_fx_call
+
+ // API with shader-stage-specific availability in exported library function
+ // - no errors expected because the actual shader stage this function
+ // will be used in not known at this time
+ float B = fy(x);
+ float C = fz(x);
+ }
+ }
+ }
+}
+
[shader("compute")]
[numthreads(4,1,1)]
float main() {
@@ -138,5 +187,6 @@ float main() {
float c = C.makeF();
float d = test((float)1.0);
float e = test((half)1.0);
+ exportedFunctionUsed(1.0f);
return a * b * c;
-}
\ No newline at end of file
+}
>From 60d4a3517610494e5b2ef6bf347cdc71a6a979e5 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 2 Jul 2024 15:29:57 -0700
Subject: [PATCH 047/246] [LangRef] Rename 'operand' to 'argument' in
description of contrained intrinsics. NFC (#97462)
Continues the renaming started in #97437.
---
llvm/docs/LangRef.rst | 150 +++++++++++++++++++++---------------------
1 file changed, 75 insertions(+), 75 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 3efe47d35b2e0..c98332d3a24fc 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -25637,7 +25637,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.fadd``' intrinsic returns the sum of its
-two operands.
+two arguments.
Arguments:
@@ -25653,8 +25653,8 @@ behavior as described above.
Semantics:
""""""""""
-The value produced is the floating-point sum of the two value operands and has
-the same type as the operands.
+The value produced is the floating-point sum of the two value arguments and has
+the same type as the arguments.
'``llvm.experimental.constrained.fsub``' Intrinsic
@@ -25674,7 +25674,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.fsub``' intrinsic returns the difference
-of its two operands.
+of its two arguments.
Arguments:
@@ -25690,8 +25690,8 @@ behavior as described above.
Semantics:
""""""""""
-The value produced is the floating-point difference of the two value operands
-and has the same type as the operands.
+The value produced is the floating-point difference of the two value arguments
+and has the same type as the arguments.
'``llvm.experimental.constrained.fmul``' Intrinsic
@@ -25711,7 +25711,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.fmul``' intrinsic returns the product of
-its two operands.
+its two arguments.
Arguments:
@@ -25727,8 +25727,8 @@ behavior as described above.
Semantics:
""""""""""
-The value produced is the floating-point product of the two value operands and
-has the same type as the operands.
+The value produced is the floating-point product of the two value arguments and
+has the same type as the arguments.
'``llvm.experimental.constrained.fdiv``' Intrinsic
@@ -25748,7 +25748,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.fdiv``' intrinsic returns the quotient of
-its two operands.
+its two arguments.
Arguments:
@@ -25764,8 +25764,8 @@ behavior as described above.
Semantics:
""""""""""
-The value produced is the floating-point quotient of the two value operands and
-has the same type as the operands.
+The value produced is the floating-point quotient of the two value arguments and
+has the same type as the arguments.
'``llvm.experimental.constrained.frem``' Intrinsic
@@ -25785,7 +25785,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.frem``' intrinsic returns the remainder
-from the division of its two operands.
+from the division of its two arguments.
Arguments:
@@ -25804,7 +25804,7 @@ Semantics:
""""""""""
The value produced is the floating-point remainder from the division of the two
-value operands and has the same type as the operands. The remainder has the
+value arguments and has the same type as the arguments. The remainder has the
same sign as the dividend.
'``llvm.experimental.constrained.fma``' Intrinsic
@@ -25824,7 +25824,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.fma``' intrinsic returns the result of a
-fused-multiply-add operation on its operands.
+fused-multiply-add operation on its arguments.
Arguments:
""""""""""
@@ -25839,8 +25839,8 @@ as described above.
Semantics:
""""""""""
-The result produced is the product of the first two operands added to the third
-operand computed with infinite precision, and then rounded to the target
+The result produced is the product of the first two arguments added to the third
+argument computed with infinite precision, and then rounded to the target
precision.
'``llvm.experimental.constrained.fptoui``' Intrinsic
@@ -25874,7 +25874,7 @@ Semantics:
""""""""""
The result produced is an unsigned integer converted from the floating
-point operand. The value is truncated, so it is rounded towards zero.
+point argument. The value is truncated, so it is rounded towards zero.
'``llvm.experimental.constrained.fptosi``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -25907,7 +25907,7 @@ Semantics:
""""""""""
The result produced is a signed integer converted from the floating
-point operand. The value is truncated, so it is rounded towards zero.
+point argument. The value is truncated, so it is rounded towards zero.
'``llvm.experimental.constrained.uitofp``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -25943,7 +25943,7 @@ Semantics:
An inexact floating-point exception will be raised if rounding is required.
Any result produced is a floating point value converted from the input
-integer operand.
+integer argument.
'``llvm.experimental.constrained.sitofp``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -25979,7 +25979,7 @@ Semantics:
An inexact floating-point exception will be raised if rounding is required.
Any result produced is a floating point value converted from the input
-integer operand.
+integer argument.
'``llvm.experimental.constrained.fptrunc``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -26015,7 +26015,7 @@ Semantics:
""""""""""
The result produced is a floating point value truncated to be smaller in size
-than the operand.
+than the argument.
'``llvm.experimental.constrained.fpext``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -26049,7 +26049,7 @@ Semantics:
""""""""""
The result produced is a floating point value extended to be larger in size
-than the operand. All restrictions that apply to the fpext instruction also
+than the argument. All restrictions that apply to the fpext instruction also
apply to this intrinsic.
'``llvm.experimental.constrained.fcmp``' and '``llvm.experimental.constrained.fcmps``' Intrinsics
@@ -26074,13 +26074,13 @@ Overview:
The '``llvm.experimental.constrained.fcmp``' and
'``llvm.experimental.constrained.fcmps``' intrinsics return a boolean
-value or vector of boolean values based on comparison of its operands.
+value or vector of boolean values based on comparison of its arguments.
-If the operands are floating-point scalars, then the result type is a
+If the arguments are floating-point scalars, then the result type is a
boolean (:ref:`i1 <t_integer>`).
-If the operands are floating-point vectors, then the result type is a
-vector of boolean with the same number of elements as the operands being
+If the arguments are floating-point vectors, then the result type is a
+vector of boolean with the same number of elements as the arguments being
compared.
The '``llvm.experimental.constrained.fcmp``' intrinsic performs a quiet
@@ -26115,8 +26115,8 @@ to perform. It must be a metadata string with one of the following values:
- "``une``": unordered or not equal
- "``uno``": unordered (either nans)
-*Ordered* means that neither operand is a NAN while *unordered* means
-that either operand may be a NAN.
+*Ordered* means that neither argument is a NAN while *unordered* means
+that either argument may be a NAN.
The fourth argument specifies the exception behavior as described above.
@@ -26124,44 +26124,44 @@ Semantics:
""""""""""
``op1`` and ``op2`` are compared according to the condition code given
-as the third argument. If the operands are vectors, then the
+as the third argument. If the arguments are vectors, then the
vectors are compared element by element. Each comparison performed
always yields an :ref:`i1 <t_integer>` result, as follows:
.. _fcmp_md_cc_sem:
-- "``oeq``": yields ``true`` if both operands are not a NAN and ``op1``
+- "``oeq``": yields ``true`` if both arguments are not a NAN and ``op1``
is equal to ``op2``.
-- "``ogt``": yields ``true`` if both operands are not a NAN and ``op1``
+- "``ogt``": yields ``true`` if both arguments are not a NAN and ``op1``
is greater than ``op2``.
-- "``oge``": yields ``true`` if both operands are not a NAN and ``op1``
+- "``oge``": yields ``true`` if both arguments are not a NAN and ``op1``
is greater than or equal to ``op2``.
-- "``olt``": yields ``true`` if both operands are not a NAN and ``op1``
+- "``olt``": yields ``true`` if both arguments are not a NAN and ``op1``
is less than ``op2``.
-- "``ole``": yields ``true`` if both operands are not a NAN and ``op1``
+- "``ole``": yields ``true`` if both arguments are not a NAN and ``op1``
is less than or equal to ``op2``.
-- "``one``": yields ``true`` if both operands are not a NAN and ``op1``
+- "``one``": yields ``true`` if both arguments are not a NAN and ``op1``
is not equal to ``op2``.
-- "``ord``": yields ``true`` if both operands are not a NAN.
-- "``ueq``": yields ``true`` if either operand is a NAN or ``op1`` is
+- "``ord``": yields ``true`` if both arguments are not a NAN.
+- "``ueq``": yields ``true`` if either argument is a NAN or ``op1`` is
equal to ``op2``.
-- "``ugt``": yields ``true`` if either operand is a NAN or ``op1`` is
+- "``ugt``": yields ``true`` if either argument is a NAN or ``op1`` is
greater than ``op2``.
-- "``uge``": yields ``true`` if either operand is a NAN or ``op1`` is
+- "``uge``": yields ``true`` if either argument is a NAN or ``op1`` is
greater than or equal to ``op2``.
-- "``ult``": yields ``true`` if either operand is a NAN or ``op1`` is
+- "``ult``": yields ``true`` if either argument is a NAN or ``op1`` is
less than ``op2``.
-- "``ule``": yields ``true`` if either operand is a NAN or ``op1`` is
+- "``ule``": yields ``true`` if either argument is a NAN or ``op1`` is
less than or equal to ``op2``.
-- "``une``": yields ``true`` if either operand is a NAN or ``op1`` is
+- "``une``": yields ``true`` if either argument is a NAN or ``op1`` is
not equal to ``op2``.
-- "``uno``": yields ``true`` if either operand is a NAN.
+- "``uno``": yields ``true`` if either argument is a NAN.
The quiet comparison operation performed by
'``llvm.experimental.constrained.fcmp``' will only raise an exception
-if either operand is a SNAN. The signaling comparison operation
+if either argument is a SNAN. The signaling comparison operation
performed by '``llvm.experimental.constrained.fcmps``' will raise an
-exception if either operand is a NAN (QNAN or SNAN). Such an exception
+exception if either argument is a NAN (QNAN or SNAN). Such an exception
does not preclude a result being produced (e.g. exception might only
set a flag), therefore the distinction between ordered and unordered
comparisons is also relevant for the
@@ -26297,8 +26297,8 @@ Syntax:
Overview:
"""""""""
-The '``llvm.experimental.constrained.pow``' intrinsic returns the first operand
-raised to the (positive or negative) power specified by the second operand.
+The '``llvm.experimental.constrained.pow``' intrinsic returns the first argument
+raised to the (positive or negative) power specified by the second argument.
Arguments:
""""""""""
@@ -26334,8 +26334,8 @@ Syntax:
Overview:
"""""""""
-The '``llvm.experimental.constrained.powi``' intrinsic returns the first operand
-raised to the (positive or negative) power specified by the second operand. The
+The '``llvm.experimental.constrained.powi``' intrinsic returns the first argument
+raised to the (positive or negative) power specified by the second argument. The
order of evaluation of multiplications is not defined. When a vector of
floating-point type is used, the second argument remains a scalar integer value.
@@ -26415,7 +26415,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.sin``' intrinsic returns the sine of the
-first operand.
+first argument.
Arguments:
""""""""""
@@ -26429,7 +26429,7 @@ behavior as described above.
Semantics:
""""""""""
-This function returns the sine of the specified operand, returning the
+This function returns the sine of the specified argument, returning the
same values as the libm ``sin`` functions would, and handles error
conditions in the same way.
@@ -26451,7 +26451,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.cos``' intrinsic returns the cosine of the
-first operand.
+first argument.
Arguments:
""""""""""
@@ -26465,7 +26465,7 @@ behavior as described above.
Semantics:
""""""""""
-This function returns the cosine of the specified operand, returning the
+This function returns the cosine of the specified argument, returning the
same values as the libm ``cos`` functions would, and handles error
conditions in the same way.
@@ -26487,7 +26487,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.tan``' intrinsic returns the tangent of the
-first operand.
+first argument.
Arguments:
""""""""""
@@ -26501,7 +26501,7 @@ behavior as described above.
Semantics:
""""""""""
-This function returns the tangent of the specified operand, returning the
+This function returns the tangent of the specified argument, returning the
same values as the libm ``tan`` functions would, and handles error
conditions in the same way.
@@ -26700,8 +26700,8 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.rint``' intrinsic returns the first
-operand rounded to the nearest integer. It may raise an inexact floating-point
-exception if the operand is not an integer.
+argument rounded to the nearest integer. It may raise an inexact floating-point
+exception if the argument is not an integer.
Arguments:
""""""""""
@@ -26739,8 +26739,8 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.lrint``' intrinsic returns the first
-operand rounded to the nearest integer. An inexact floating-point exception
-will be raised if the operand is not an integer. An invalid exception is
+argument rounded to the nearest integer. An inexact floating-point exception
+will be raised if the argument is not an integer. An invalid exception is
raised if the result is too large to fit into a supported integer type,
and in this case the result is undefined.
@@ -26787,8 +26787,8 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.llrint``' intrinsic returns the first
-operand rounded to the nearest integer. An inexact floating-point exception
-will be raised if the operand is not an integer. An invalid exception is
+argument rounded to the nearest integer. An inexact floating-point exception
+will be raised if the argument is not an integer. An invalid exception is
raised if the result is too large to fit into a supported integer type,
and in this case the result is undefined.
@@ -26835,8 +26835,8 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.nearbyint``' intrinsic returns the first
-operand rounded to the nearest integer. It will not raise an inexact
-floating-point exception if the operand is not an integer.
+argument rounded to the nearest integer. It will not raise an inexact
+floating-point exception if the argument is not an integer.
Arguments:
@@ -27002,7 +27002,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.ceil``' intrinsic returns the ceiling of the
-first operand.
+first argument.
Arguments:
""""""""""
@@ -27035,7 +27035,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.floor``' intrinsic returns the floor of the
-first operand.
+first argument.
Arguments:
""""""""""
@@ -27068,7 +27068,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.round``' intrinsic returns the first
-operand rounded to the nearest integer.
+argument rounded to the nearest integer.
Arguments:
""""""""""
@@ -27101,7 +27101,7 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.roundeven``' intrinsic returns the first
-operand rounded to the nearest integer in floating-point format, rounding
+argument rounded to the nearest integer in floating-point format, rounding
halfway cases to even (that is, to the nearest value that is an even integer),
regardless of the current rounding direction.
@@ -27118,7 +27118,7 @@ Semantics:
This function implements IEEE-754 operation ``roundToIntegralTiesToEven``. It
also behaves in the same way as C standard function ``roundeven`` and can signal
-the invalid operation exception for a SNAN operand.
+the invalid operation exception for a SNAN argument.
'``llvm.experimental.constrained.lround``' Intrinsic
@@ -27137,8 +27137,8 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.lround``' intrinsic returns the first
-operand rounded to the nearest integer with ties away from zero. It will
-raise an inexact floating-point exception if the operand is not an integer.
+argument rounded to the nearest integer with ties away from zero. It will
+raise an inexact floating-point exception if the argument is not an integer.
An invalid exception is raised if the result is too large to fit into a
supported integer type, and in this case the result is undefined.
@@ -27175,8 +27175,8 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.llround``' intrinsic returns the first
-operand rounded to the nearest integer with ties away from zero. It will
-raise an inexact floating-point exception if the operand is not an integer.
+argument rounded to the nearest integer with ties away from zero. It will
+raise an inexact floating-point exception if the argument is not an integer.
An invalid exception is raised if the result is too large to fit into a
supported integer type, and in this case the result is undefined.
@@ -27213,8 +27213,8 @@ Overview:
"""""""""
The '``llvm.experimental.constrained.trunc``' intrinsic returns the first
-operand rounded to the nearest integer not larger in magnitude than the
-operand.
+argument rounded to the nearest integer not larger in magnitude than the
+argument.
Arguments:
""""""""""
>From 23db37c51cd3dcdcf069345aa7fab7d84b6f6f6e Mon Sep 17 00:00:00 2001
From: Igor Kudrin <ikudrin at accesssoftek.com>
Date: Wed, 3 Jul 2024 05:36:02 +0700
Subject: [PATCH 048/246] [CodeGen] Do not emit TRAP for `unreachable` after
`@llvm.trap` (#94570)
With `--trap-unreachable`, `clang` can emit double `TRAP` instructions
for code that contains a call to `__builtin_trap()`:
```
> cat test.c
void test() { __builtin_trap(); }
> clang test.c --target=x86_64 -mllvm --trap-unreachable -O1 -S -o -
...
test:
...
ud2
ud2
...
```
`SimplifyCFGPass` inserts `unreachable` after a call to a `noreturn`
function, and later this instruction causes `TRAP/G_TRAP` to be emitted
in `SelectionDAGBuilder::visitUnreachable()` or
`IRTranslator::translateUnreachable()` if
`TargetOptions.TrapUnreachable` is set.
The patch checks the instruction before `unreachable` and avoids
inserting an additional trap.
---
llvm/include/llvm/IR/Instructions.h | 12 +++++++
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 18 +++++-----
.../SelectionDAG/SelectionDAGBuilder.cpp | 12 ++++---
llvm/test/CodeGen/X86/trap.ll | 30 +++++-----------
llvm/test/CodeGen/X86/unreachable-trap.ll | 36 ++++++++++++++-----
.../test/CodeGen/X86/unreachable-ubsantrap.ll | 25 +++++++++++++
llvm/test/LTO/X86/unified-cfi.ll | 1 -
7 files changed, 88 insertions(+), 46 deletions(-)
create mode 100644 llvm/test/CodeGen/X86/unreachable-ubsantrap.ll
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index ab58edd1bf78c..c07fee58e4bdb 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -29,6 +29,7 @@
#include "llvm/IR/GEPNoWrapFlags.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
@@ -1520,6 +1521,17 @@ class CallInst : public CallBase {
bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
+ /// Return true if the call is for a noreturn trap intrinsic.
+ bool isNonContinuableTrap() const {
+ switch (getIntrinsicID()) {
+ case Intrinsic::trap:
+ case Intrinsic::ubsantrap:
+ return !hasFnAttr("trap-func-name");
+ default:
+ return false;
+ }
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Call;
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 552d4c9bb3875..7b96f4589f5c4 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -3082,17 +3082,15 @@ bool IRTranslator::translateUnreachable(const User &U, MachineIRBuilder &MIRBuil
return true;
auto &UI = cast<UnreachableInst>(U);
+
// We may be able to ignore unreachable behind a noreturn call.
- if (MF->getTarget().Options.NoTrapAfterNoreturn) {
- const BasicBlock &BB = *UI.getParent();
- if (&UI != &BB.front()) {
- BasicBlock::const_iterator PredI =
- std::prev(BasicBlock::const_iterator(UI));
- if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
- if (Call->doesNotReturn())
- return true;
- }
- }
+ if (const CallInst *Call = dyn_cast_or_null<CallInst>(UI.getPrevNode());
+ Call && Call->doesNotReturn()) {
+ if (MF->getTarget().Options.NoTrapAfterNoreturn)
+ return true;
+ // Do not emit an additional trap instruction.
+ if (Call->isNonContinuableTrap())
+ return true;
}
MIRBuilder.buildTrap();
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 465919d03d8ca..ad809f836e336 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3541,11 +3541,13 @@ void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
return;
// We may be able to ignore unreachable behind a noreturn call.
- if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
- if (const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode())) {
- if (Call->doesNotReturn())
- return;
- }
+ if (const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode());
+ Call && Call->doesNotReturn()) {
+ if (DAG.getTarget().Options.NoTrapAfterNoreturn)
+ return;
+ // Do not emit an additional trap instruction.
+ if (Call->isNonContinuableTrap())
+ return;
}
DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
diff --git a/llvm/test/CodeGen/X86/trap.ll b/llvm/test/CodeGen/X86/trap.ll
index 1b1837a7c5a7a..3d9a858beda85 100644
--- a/llvm/test/CodeGen/X86/trap.ll
+++ b/llvm/test/CodeGen/X86/trap.ll
@@ -1,33 +1,22 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin8 -mcpu=yonah | FileCheck %s -check-prefix=DARWIN
-; RUN: llc < %s -mtriple=i686-unknown-linux -mcpu=yonah | FileCheck %s -check-prefix=LINUX
-; RUN: llc < %s -mtriple=x86_64-scei-ps4 | FileCheck %s -check-prefix=PS4
-; RUN: llc < %s -mtriple=x86_64-sie-ps5 | FileCheck %s -check-prefix=PS4
-; RUN: llc < %s -mtriple=x86_64-windows-msvc | FileCheck %s -check-prefix=WIN64
+; RUN: llc < %s -mtriple=i686-apple-darwin8 -mcpu=yonah | FileCheck %s -check-prefixes=CHECK,DARWIN
+; RUN: llc < %s -mtriple=i686-unknown-linux -mcpu=yonah | FileCheck %s -check-prefixes=CHECK,LINUX
+; RUN: llc < %s -mtriple=x86_64-scei-ps4 | FileCheck %s -check-prefixes=CHECK,PS4
+; RUN: llc < %s -mtriple=x86_64-sie-ps5 | FileCheck %s -check-prefixes=CHECK,PS4
+; RUN: llc < %s -mtriple=x86_64-windows-msvc | FileCheck %s -check-prefixes=CHECK,WIN64
-; DARWIN-LABEL: test0:
-; DARWIN: ud2
-; LINUX-LABEL: test0:
-; LINUX: ud2
-; FIXME: PS4 probably doesn't want two ud2s.
-; PS4-LABEL: test0:
-; PS4: ud2
-; PS4: ud2
-; WIN64-LABEL: test0:
-; WIN64: ud2
-; WIN64-NOT: ud2
+; CHECK-LABEL: test0:
+; CHECK: ud2
+; CHECK-NOT: ud2
define i32 @test0() noreturn nounwind {
entry:
tail call void @llvm.trap( )
unreachable
}
-; DARWIN-LABEL: test1:
+; CHECK-LABEL: test1:
; DARWIN: int3
-; LINUX-LABEL: test1:
; LINUX: int3
-; PS4-LABEL: test1:
; PS4: int $65
-; WIN64-LABEL: test1:
; WIN64: int3
; WIN64-NOT: ud2
define i32 @test1() noreturn nounwind {
@@ -38,4 +27,3 @@ entry:
declare void @llvm.trap() nounwind
declare void @llvm.debugtrap() nounwind
-
diff --git a/llvm/test/CodeGen/X86/unreachable-trap.ll b/llvm/test/CodeGen/X86/unreachable-trap.ll
index ee1a11c767784..d2704bf7b4620 100644
--- a/llvm/test/CodeGen/X86/unreachable-trap.ll
+++ b/llvm/test/CodeGen/X86/unreachable-trap.ll
@@ -1,13 +1,15 @@
-; RUN: llc -o - %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,NORMAL
-; RUN: llc -o - %s -mtriple=x86_64-windows-msvc | FileCheck %s --check-prefixes=CHECK,NORMAL
+; RUN: llc -o - %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -o - %s -mtriple=x86_64-windows-msvc | FileCheck %s --check-prefixes=CHECK
; RUN: llc -o - %s -mtriple=x86_64-scei-ps4 | FileCheck %s --check-prefixes=CHECK,TRAP_AFTER_NORETURN
; RUN: llc -o - %s -mtriple=x86_64-apple-darwin | FileCheck %s --check-prefixes=CHECK,NO_TRAP_AFTER_NORETURN
+; RUN: llc --trap-unreachable -o - %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,TRAP_AFTER_NORETURN
+; RUN: llc --trap-unreachable -global-isel -o - %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,TRAP_AFTER_NORETURN
+; RUN: llc --trap-unreachable -fast-isel -o - %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,TRAP_AFTER_NORETURN
; CHECK-LABEL: call_exit:
; CHECK: callq {{_?}}exit
; TRAP_AFTER_NORETURN: ud2
-; NO_TRAP_AFTER_NORETURN-NOT: ud2
-; NORMAL-NOT: ud2
+; CHECK-NOT: ud2
define i32 @call_exit() noreturn nounwind {
tail call void @exit(i32 0)
unreachable
@@ -15,19 +17,35 @@ define i32 @call_exit() noreturn nounwind {
; CHECK-LABEL: trap:
; CHECK: ud2
-; TRAP_AFTER_NORETURN: ud2
-; NO_TRAP_AFTER_NORETURN-NOT: ud2
-; NORMAL-NOT: ud2
+; CHECK-NOT: ud2
define i32 @trap() noreturn nounwind {
tail call void @llvm.trap()
unreachable
}
+; CHECK-LABEL: trap_fn_attr:
+; CHECK: callq {{_?}}trap_func
+; TRAP_AFTER_NORETURN: ud2
+; CHECK-NOT: ud2
+define i32 @trap_fn_attr() noreturn nounwind {
+ tail call void @llvm.trap() "trap-func-name"="trap_func"
+ unreachable
+}
+
+; CHECK-LABEL: noreturn_indirect:
+; CHECK: callq *%r{{.+}}
+; TRAP_AFTER_NORETURN: ud2
+; CHECK-NOT: ud2
+define i32 @noreturn_indirect(ptr %fptr) noreturn nounwind {
+ tail call void (...) %fptr() noreturn nounwind
+ unreachable
+}
+
; CHECK-LABEL: unreachable:
; TRAP_AFTER_NORETURN: ud2
; NO_TRAP_AFTER_NORETURN: ud2
-; NORMAL-NOT: ud2
-; NORMAL: # -- End function
+; CHECK-NOT: ud2
+; CHECK: # -- End function
define i32 @unreachable() noreturn nounwind {
unreachable
}
diff --git a/llvm/test/CodeGen/X86/unreachable-ubsantrap.ll b/llvm/test/CodeGen/X86/unreachable-ubsantrap.ll
new file mode 100644
index 0000000000000..d02a12b6c3af9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/unreachable-ubsantrap.ll
@@ -0,0 +1,25 @@
+; RUN: llc -o - %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -global-isel -o - %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -fast-isel -o - %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK
+; RUN: llc --trap-unreachable -o - %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,TRAP_UNREACHABLE
+; RUN: llc --trap-unreachable -global-isel -o - %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,TRAP_UNREACHABLE
+; RUN: llc --trap-unreachable -fast-isel -o - %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,TRAP_UNREACHABLE
+
+; CHECK-LABEL: ubsantrap:
+; CHECK: ud1l 12(%eax), %eax
+; CHECK-NOT: ud2
+define i32 @ubsantrap() noreturn nounwind {
+ call void @llvm.ubsantrap(i8 12)
+ unreachable
+}
+
+; CHECK-LABEL: ubsantrap_fn_attr:
+; CHECK: callq {{_?}}ubsantrap_func
+; TRAP_UNREACHABLE: ud2
+; CHECK-NOT: ud2
+define i32 @ubsantrap_fn_attr() noreturn nounwind {
+ call void @llvm.ubsantrap(i8 12) "trap-func-name"="ubsantrap_func"
+ unreachable
+}
+
+declare void @llvm.ubsantrap(i8) cold noreturn nounwind
diff --git a/llvm/test/LTO/X86/unified-cfi.ll b/llvm/test/LTO/X86/unified-cfi.ll
index f404136ca35f1..47dbe2b9f292a 100644
--- a/llvm/test/LTO/X86/unified-cfi.ll
+++ b/llvm/test/LTO/X86/unified-cfi.ll
@@ -6,7 +6,6 @@
; CHECK: jbe
; CHECK-NEXT: ud2
-; CHECK-NEXT: ud2
; ModuleID = 'llvm/test/LTO/X86/unified-cfi.ll'
source_filename = "cfi.cpp"
>From b5b0a22ecc58c9950fb4b1cd3532e75f42155978 Mon Sep 17 00:00:00 2001
From: Evgenii Kudriashov <evgenii.kudriashov at intel.com>
Date: Wed, 3 Jul 2024 01:56:50 +0300
Subject: [PATCH 049/246] [X86][GlobalISel] Support StructRet arguments
(#96629)
We follow SelectionDAG and FastISel manner: set a register during formal
arguments lowering and use this register to insert a copy of StructRet
argument to RAX register during return lowering.
Also add RAX register to RET instruction to fix a difference between
GlobalISel and SelectionDAG, when the copy instruction could be
deleted.
---
llvm/lib/Target/X86/GISel/X86CallLowering.cpp | 20 +++++++++---
.../GlobalISel/irtranslator-callingconv.ll | 32 +++++++++++++++++--
llvm/test/CodeGen/X86/isel-buildvector-sse.ll | 26 ++++++++-------
.../test/CodeGen/X86/isel-buildvector-sse2.ll | 23 ++++++-------
4 files changed, 72 insertions(+), 29 deletions(-)
diff --git a/llvm/lib/Target/X86/GISel/X86CallLowering.cpp b/llvm/lib/Target/X86/GISel/X86CallLowering.cpp
index 8e5a23d95683b..3487efbbbec09 100644
--- a/llvm/lib/Target/X86/GISel/X86CallLowering.cpp
+++ b/llvm/lib/Target/X86/GISel/X86CallLowering.cpp
@@ -16,6 +16,7 @@
#include "X86CallingConv.h"
#include "X86ISelLowering.h"
#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
#include "X86RegisterInfo.h"
#include "X86Subtarget.h"
#include "llvm/ADT/ArrayRef.h"
@@ -147,12 +148,17 @@ bool X86CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
"Return value without a vreg");
MachineFunction &MF = MIRBuilder.getMF();
auto MIB = MIRBuilder.buildInstrNoInsert(X86::RET).addImm(0);
- const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
- bool Is64Bit = STI.is64Bit();
+ auto FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
+ const auto &STI = MF.getSubtarget<X86Subtarget>();
+ Register RetReg = STI.is64Bit() ? X86::RAX : X86::EAX;
if (!FLI.CanLowerReturn) {
insertSRetStores(MIRBuilder, Val->getType(), VRegs, FLI.DemoteRegister);
- MIRBuilder.buildCopy(Is64Bit ? X86::RAX : X86::EAX, FLI.DemoteRegister);
+ MIRBuilder.buildCopy(RetReg, FLI.DemoteRegister);
+ MIB.addReg(RetReg);
+ } else if (Register Reg = FuncInfo->getSRetReturnReg()) {
+ MIRBuilder.buildCopy(RetReg, Reg);
+ MIB.addReg(RetReg);
} else if (!VRegs.empty()) {
const Function &F = MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -258,6 +264,7 @@ bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
MachineFunction &MF = MIRBuilder.getMF();
MachineRegisterInfo &MRI = MF.getRegInfo();
auto DL = MF.getDataLayout();
+ auto FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
SmallVector<ArgInfo, 8> SplitArgs;
@@ -273,12 +280,17 @@ bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
// TODO: handle not simple cases.
if (Arg.hasAttribute(Attribute::ByVal) ||
Arg.hasAttribute(Attribute::InReg) ||
- Arg.hasAttribute(Attribute::StructRet) ||
Arg.hasAttribute(Attribute::SwiftSelf) ||
Arg.hasAttribute(Attribute::SwiftError) ||
Arg.hasAttribute(Attribute::Nest) || VRegs[Idx].size() > 1)
return false;
+ if (Arg.hasAttribute(Attribute::StructRet)) {
+ assert(VRegs[Idx].size() == 1 &&
+ "Unexpected amount of registers for sret argument.");
+ FuncInfo->setSRetReturnReg(VRegs[Idx][0]);
+ }
+
ArgInfo OrigArg(VRegs[Idx], Arg.getType(), Idx);
setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F);
splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv());
diff --git a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
index 55e73dc5d29ec..a797c235c46f4 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
@@ -5,6 +5,7 @@
@a1_8bit = external global i8
@a7_8bit = external global i8
@a8_8bit = external global i8
+%struct.all = type { i8, i16, i32, i8, i16, i32, i64, float, double }
define i8 @test_i8_args_8(i8 %arg1, i8 %arg2, i8 %arg3, i8 %arg4, i8 %arg5, i8 %arg6, i8 %arg7, i8 %arg8) {
; X86-LABEL: name: test_i8_args_8
@@ -745,7 +746,7 @@ define <32 x float> @test_return_v32f32() {
; X86-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
; X86-NEXT: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[LOAD]](p0) :: (store (<32 x s32>))
; X86-NEXT: $eax = COPY [[LOAD]](p0)
- ; X86-NEXT: RET 0
+ ; X86-NEXT: RET 0, $eax
;
; X64-LABEL: name: test_return_v32f32
; X64: bb.1 (%ir-block.0):
@@ -756,7 +757,7 @@ define <32 x float> @test_return_v32f32() {
; X64-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
; X64-NEXT: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[COPY]](p0) :: (store (<32 x s32>))
; X64-NEXT: $rax = COPY [[COPY]](p0)
- ; X64-NEXT: RET 0
+ ; X64-NEXT: RET 0, $rax
ret <32 x float> zeroinitializer
}
@@ -793,3 +794,30 @@ define float @test_call_v32f32() {
%elt = extractelement <32 x float> %vect, i32 7
ret float %elt
}
+
+define void @test_sret(ptr sret(%struct.all) align 8 %result) #0 {
+ ; X86-LABEL: name: test_sret
+ ; X86: bb.1.entry:
+ ; X86-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; X86-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.0, align 16)
+ ; X86-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 104
+ ; X86-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY [[LOAD]](p0)
+ ; X86-NEXT: G_STORE [[C]](s8), [[COPY]](p0) :: (store (s8) into %ir.c, align 8)
+ ; X86-NEXT: $eax = COPY [[LOAD]](p0)
+ ; X86-NEXT: RET 0, $eax
+ ;
+ ; X64-LABEL: name: test_sret
+ ; X64: bb.1.entry:
+ ; X64-NEXT: liveins: $rdi
+ ; X64-NEXT: {{ $}}
+ ; X64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
+ ; X64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 104
+ ; X64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
+ ; X64-NEXT: G_STORE [[C]](s8), [[COPY1]](p0) :: (store (s8) into %ir.c, align 8)
+ ; X64-NEXT: $rax = COPY [[COPY]](p0)
+ ; X64-NEXT: RET 0, $rax
+entry:
+ %c = getelementptr inbounds %struct.all, ptr %result, i32 0, i32 0
+ store i8 104, ptr %c, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/isel-buildvector-sse.ll b/llvm/test/CodeGen/X86/isel-buildvector-sse.ll
index 5b96d57cf019b..7f580aad78764 100644
--- a/llvm/test/CodeGen/X86/isel-buildvector-sse.ll
+++ b/llvm/test/CodeGen/X86/isel-buildvector-sse.ll
@@ -22,22 +22,23 @@ define <8 x i32> @test_vector_v8i32() {
;
; SSE-X64-GISEL-LABEL: test_vector_v8i32:
; SSE-X64-GISEL: # %bb.0:
-; SSE-X64-GISEL-NEXT: movl $128100944, %eax # imm = 0x7A2AA50
-; SSE-X64-GISEL-NEXT: movl $-632258670, %ecx # imm = 0xDA507F92
-; SSE-X64-GISEL-NEXT: movl $-408980432, %edx # imm = 0xE79F7430
-; SSE-X64-GISEL-NEXT: movl $708630551, %esi # imm = 0x2A3CD817
+; SSE-X64-GISEL-NEXT: movq %rdi, %rax
+; SSE-X64-GISEL-NEXT: movl $128100944, %ecx # imm = 0x7A2AA50
+; SSE-X64-GISEL-NEXT: movl $-632258670, %edx # imm = 0xDA507F92
+; SSE-X64-GISEL-NEXT: movl $-408980432, %esi # imm = 0xE79F7430
+; SSE-X64-GISEL-NEXT: movl $708630551, %edi # imm = 0x2A3CD817
; SSE-X64-GISEL-NEXT: movl $-871899055, %r8d # imm = 0xCC07E051
; SSE-X64-GISEL-NEXT: movl $-633489957, %r9d # imm = 0xDA3DB5DB
; SSE-X64-GISEL-NEXT: movl $591019567, %r10d # imm = 0x233A3E2F
; SSE-X64-GISEL-NEXT: movl $708632899, %r11d # imm = 0x2A3CE143
-; SSE-X64-GISEL-NEXT: movl %eax, (%rdi)
-; SSE-X64-GISEL-NEXT: movl %ecx, 4(%rdi)
-; SSE-X64-GISEL-NEXT: movl %edx, 8(%rdi)
-; SSE-X64-GISEL-NEXT: movl %esi, 12(%rdi)
-; SSE-X64-GISEL-NEXT: movl %r8d, 16(%rdi)
-; SSE-X64-GISEL-NEXT: movl %r9d, 20(%rdi)
-; SSE-X64-GISEL-NEXT: movl %r10d, 24(%rdi)
-; SSE-X64-GISEL-NEXT: movl %r11d, 28(%rdi)
+; SSE-X64-GISEL-NEXT: movl %ecx, (%rax)
+; SSE-X64-GISEL-NEXT: movl %edx, 4(%rax)
+; SSE-X64-GISEL-NEXT: movl %esi, 8(%rax)
+; SSE-X64-GISEL-NEXT: movl %edi, 12(%rax)
+; SSE-X64-GISEL-NEXT: movl %r8d, 16(%rax)
+; SSE-X64-GISEL-NEXT: movl %r9d, 20(%rax)
+; SSE-X64-GISEL-NEXT: movl %r10d, 24(%rax)
+; SSE-X64-GISEL-NEXT: movl %r11d, 28(%rax)
; SSE-X64-GISEL-NEXT: retq
;
; SSE-X86-LABEL: test_vector_v8i32:
@@ -88,6 +89,7 @@ define <4 x i32> @test_vector_v4i32() {
;
; SSE-X64-GISEL-LABEL: test_vector_v4i32:
; SSE-X64-GISEL: # %bb.0:
+; SSE-X64-GISEL-NEXT: movq %rdi, %rax
; SSE-X64-GISEL-NEXT: movaps {{.*#+}} xmm0 = [128100944,3662708626,3885986864,708630551]
; SSE-X64-GISEL-NEXT: movaps %xmm0, (%rdi)
; SSE-X64-GISEL-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/isel-buildvector-sse2.ll b/llvm/test/CodeGen/X86/isel-buildvector-sse2.ll
index 88e0ede0d4b6f..da089dda6d03d 100644
--- a/llvm/test/CodeGen/X86/isel-buildvector-sse2.ll
+++ b/llvm/test/CodeGen/X86/isel-buildvector-sse2.ll
@@ -19,20 +19,21 @@ define <7 x i8> @test_vector_v7i8() {
;
; SSE2-GISEL-LABEL: test_vector_v7i8:
; SSE2-GISEL: # %bb.0:
-; SSE2-GISEL-NEXT: movb $4, %al
-; SSE2-GISEL-NEXT: movb $8, %cl
-; SSE2-GISEL-NEXT: movb $15, %dl
-; SSE2-GISEL-NEXT: movb $16, %sil
+; SSE2-GISEL-NEXT: movq %rdi, %rax
+; SSE2-GISEL-NEXT: movb $4, %cl
+; SSE2-GISEL-NEXT: movb $8, %dl
+; SSE2-GISEL-NEXT: movb $15, %sil
+; SSE2-GISEL-NEXT: movb $16, %dil
; SSE2-GISEL-NEXT: movb $23, %r8b
; SSE2-GISEL-NEXT: movb $42, %r9b
; SSE2-GISEL-NEXT: movb $63, %r10b
-; SSE2-GISEL-NEXT: movb %al, (%rdi)
-; SSE2-GISEL-NEXT: movb %cl, 1(%rdi)
-; SSE2-GISEL-NEXT: movb %dl, 2(%rdi)
-; SSE2-GISEL-NEXT: movb %sil, 3(%rdi)
-; SSE2-GISEL-NEXT: movb %r8b, 4(%rdi)
-; SSE2-GISEL-NEXT: movb %r9b, 5(%rdi)
-; SSE2-GISEL-NEXT: movb %r10b, 6(%rdi)
+; SSE2-GISEL-NEXT: movb %cl, (%rax)
+; SSE2-GISEL-NEXT: movb %dl, 1(%rax)
+; SSE2-GISEL-NEXT: movb %sil, 2(%rax)
+; SSE2-GISEL-NEXT: movb %dil, 3(%rax)
+; SSE2-GISEL-NEXT: movb %r8b, 4(%rax)
+; SSE2-GISEL-NEXT: movb %r9b, 5(%rax)
+; SSE2-GISEL-NEXT: movb %r10b, 6(%rax)
; SSE2-GISEL-NEXT: retq
ret <7 x i8> <i8 4, i8 8, i8 15, i8 16, i8 23, i8 42, i8 63>
}
>From 594bc520a8a14617bdfc158f4c78fa70567dab19 Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Tue, 2 Jul 2024 18:11:48 -0500
Subject: [PATCH 050/246] [lld] Change `--lto-emit-llvm` to use the pre-codegen
module (#97480)
Summary:
Currently the `--lto-emit-llvm` option writes out the
post-internalization bitcode. This is the bitcode before any
optimizations or other pipelines have been run on it. This patch changes
that to use the pre-codegen module, which is the state of the LLVM-IR
after the optimizations have been run.
I believe that this makes sense as the `--lto-emit-llvm` option seems to
imply that we should emit the final output of the LLVM pass as if it
were the desired output. This should include optimizations at the
requested optimization level. My main motivation for this change is to
be able to use this to link several LLVM-IR files into a single one that
I can then pass back to `ld.lld` later (for JIT purposes).
---
lld/ELF/LTO.cpp | 2 +-
lld/test/ELF/lto/emit-llvm.ll | 6 ++++--
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/lld/ELF/LTO.cpp b/lld/ELF/LTO.cpp
index 3d92007469263..935d0a9eab9ee 100644
--- a/lld/ELF/LTO.cpp
+++ b/lld/ELF/LTO.cpp
@@ -147,7 +147,7 @@ static lto::Config createConfig() {
c.PGOWarnMismatch = config->ltoPGOWarnMismatch;
if (config->emitLLVM) {
- c.PostInternalizeModuleHook = [](size_t task, const Module &m) {
+ c.PreCodeGenModuleHook = [](size_t task, const Module &m) {
if (std::unique_ptr<raw_fd_ostream> os =
openLTOOutputFile(config->outputFile))
WriteBitcodeToFile(m, *os, false);
diff --git a/lld/test/ELF/lto/emit-llvm.ll b/lld/test/ELF/lto/emit-llvm.ll
index 01f5a056e0c0d..37488016a4bc2 100644
--- a/lld/test/ELF/lto/emit-llvm.ll
+++ b/lld/test/ELF/lto/emit-llvm.ll
@@ -9,11 +9,13 @@
; RUN: ld.lld --plugin-opt=emit-llvm -mllvm -bitcode-flush-threshold=0 -o /dev/null %t.o
; RUN: ld.lld --lto-emit-llvm -mllvm -bitcode-flush-threshold=0 -o /dev/null %t.o
-; CHECK: define internal void @main()
+; CHECK: define hidden void @main()
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define void @main() {
+ at llvm.compiler.used = appending global [1 x ptr] [ptr @main], section "llvm.metadata"
+
+define hidden void @main() {
ret void
}
>From 4b28b3fae4eb0808a135a7ec73a2f4a7257a6652 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Tue, 2 Jul 2024 16:20:44 -0700
Subject: [PATCH 051/246] [Transforms] Use range-based for loops (NFC) (#97195)
---
llvm/lib/Transforms/IPO/Attributor.cpp | 9 ++++-----
.../Transforms/IPO/DeadArgumentElimination.cpp | 4 +---
.../Instrumentation/MemorySanitizer.cpp | 16 +++++-----------
llvm/lib/Transforms/Scalar/Scalarizer.cpp | 4 ++--
llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp | 4 +---
llvm/lib/Transforms/Utils/InlineFunction.cpp | 11 ++++-------
llvm/lib/Transforms/Utils/Local.cpp | 4 +---
llvm/lib/Transforms/Utils/LoopSimplify.cpp | 13 ++++++-------
8 files changed, 24 insertions(+), 41 deletions(-)
diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp
index 5286228a97941..10660b9cb3ca1 100644
--- a/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -2385,8 +2385,7 @@ void Attributor::identifyDeadInternalFunctions() {
bool FoundLiveInternal = true;
while (FoundLiveInternal) {
FoundLiveInternal = false;
- for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
- Function *F = InternalFns[u];
+ for (Function *&F : InternalFns) {
if (!F)
continue;
@@ -2403,13 +2402,13 @@ void Attributor::identifyDeadInternalFunctions() {
}
LiveInternalFns.insert(F);
- InternalFns[u] = nullptr;
+ F = nullptr;
FoundLiveInternal = true;
}
}
- for (unsigned u = 0, e = InternalFns.size(); u < e; ++u)
- if (Function *F = InternalFns[u])
+ for (Function *F : InternalFns)
+ if (F)
ToBeDeletedFunctions.insert(F);
}
diff --git a/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index f19031383f5cb..a164c82bdf75d 100644
--- a/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -319,9 +319,7 @@ bool DeadArgumentEliminationPass::removeDeadArgumentsFromCallers(Function &F) {
continue;
// Now go through all unused args and replace them with poison.
- for (unsigned I = 0, E = UnusedArgs.size(); I != E; ++I) {
- unsigned ArgNo = UnusedArgs[I];
-
+ for (unsigned ArgNo : UnusedArgs) {
Value *Arg = CB->getArgOperand(ArgNo);
CB->setArgOperand(ArgNo, PoisonValue::get(Arg->getType()));
CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 905d2671aa43e..d0dbb108b1eca 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -5114,8 +5114,7 @@ struct VarArgAMD64Helper : public VarArgHelperBase {
// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
- for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
- CallInst *OrigInst = VAStartInstrumentationList[i];
+ for (CallInst *OrigInst : VAStartInstrumentationList) {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
@@ -5224,8 +5223,7 @@ struct VarArgMIPS64Helper : public VarArgHelperBase {
// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
- for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
- CallInst *OrigInst = VAStartInstrumentationList[i];
+ for (CallInst *OrigInst : VAStartInstrumentationList) {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
@@ -5399,8 +5397,7 @@ struct VarArgAArch64Helper : public VarArgHelperBase {
// Instrument va_start, copy va_list shadow from the backup copy of
// the TLS contents.
- for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
- CallInst *OrigInst = VAStartInstrumentationList[i];
+ for (CallInst *OrigInst : VAStartInstrumentationList) {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
@@ -5610,8 +5607,7 @@ struct VarArgPowerPC64Helper : public VarArgHelperBase {
// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
- for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
- CallInst *OrigInst = VAStartInstrumentationList[i];
+ for (CallInst *OrigInst : VAStartInstrumentationList) {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
@@ -5907,9 +5903,7 @@ struct VarArgSystemZHelper : public VarArgHelperBase {
// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
- for (size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.size();
- VaStartNo < VaStartNum; VaStartNo++) {
- CallInst *OrigInst = VAStartInstrumentationList[VaStartNo];
+ for (CallInst *OrigInst : VAStartInstrumentationList) {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
copyRegSaveArea(IRB, VAListTag);
diff --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
index 9930c1a294440..2bed3480da1cd 100644
--- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
@@ -523,8 +523,8 @@ void ScalarizerVisitor::transferMetadataAndIRFlags(Instruction *Op,
const ValueVector &CV) {
SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
Op->getAllMetadataOtherThanDebugLoc(MDs);
- for (unsigned I = 0, E = CV.size(); I != E; ++I) {
- if (Instruction *New = dyn_cast<Instruction>(CV[I])) {
+ for (Value *V : CV) {
+ if (Instruction *New = dyn_cast<Instruction>(V)) {
for (const auto &MD : MDs)
if (canTransferMetadata(MD.first))
New->setMetadata(MD.first, MD.second);
diff --git a/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp b/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
index c4b24bc3d190e..be8264f1f42e5 100644
--- a/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
+++ b/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
@@ -409,9 +409,7 @@ callBufferedPrintfArgPush(IRBuilder<> &Builder, ArrayRef<Value *> Args,
WhatToStore.push_back(processNonStringArg(Args[i], Builder));
}
- for (unsigned I = 0, E = WhatToStore.size(); I != E; ++I) {
- Value *toStore = WhatToStore[I];
-
+ for (Value *toStore : WhatToStore) {
StoreInst *StBuff = Builder.CreateStore(toStore, PtrToStore);
LLVM_DEBUG(dbgs() << "inserting store to printf buffer:" << *StBuff
<< '\n');
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 036527c797e89..6cb6540d1a7b6 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -2631,8 +2631,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
!IFI.StaticAllocas.empty()) {
IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
- for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
- AllocaInst *AI = IFI.StaticAllocas[ai];
+ for (AllocaInst *AI : IFI.StaticAllocas) {
// Don't mark swifterror allocas. They can't have bitcast uses.
if (AI->isSwiftError())
continue;
@@ -2969,8 +2968,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// Loop over all of the return instructions adding entries to the PHI node
// as appropriate.
if (PHI) {
- for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
- ReturnInst *RI = Returns[i];
+ for (ReturnInst *RI : Returns) {
assert(RI->getReturnValue()->getType() == PHI->getType() &&
"Ret value not consistent in function!");
PHI->addIncoming(RI->getReturnValue(), RI->getParent());
@@ -2979,9 +2977,8 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// Add a branch to the merge points and remove return instructions.
DebugLoc Loc;
- for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
- ReturnInst *RI = Returns[i];
- BranchInst* BI = BranchInst::Create(AfterCallBB, RI->getIterator());
+ for (ReturnInst *RI : Returns) {
+ BranchInst *BI = BranchInst::Create(AfterCallBB, RI->getIterator());
Loc = RI->getDebugLoc();
BI->setDebugLoc(Loc);
RI->eraseFromParent();
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 6ebf9e104ac14..7192efe3f16b9 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1095,11 +1095,9 @@ static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB);
} else {
- for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
+ for (BasicBlock *PredBB : BBPreds) {
// Update existing incoming values in PN for this
// predecessor of BB.
- BasicBlock *PredBB = BBPreds[i];
-
if (PredBB == CommonPred)
continue;
diff --git a/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/llvm/lib/Transforms/Utils/LoopSimplify.cpp
index 66b59cdc784d3..a764fef574911 100644
--- a/llvm/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/llvm/lib/Transforms/Utils/LoopSimplify.cpp
@@ -83,8 +83,8 @@ static void placeSplitBlockCarefully(BasicBlock *NewBB,
Loop *L) {
// Check to see if NewBB is already well placed.
Function::iterator BBI = --NewBB->getIterator();
- for (unsigned i = 0, e = SplitPreds.size(); i != e; ++i) {
- if (&*BBI == SplitPreds[i])
+ for (BasicBlock *Pred : SplitPreds) {
+ if (&*BBI == Pred)
return;
}
@@ -95,10 +95,10 @@ static void placeSplitBlockCarefully(BasicBlock *NewBB,
// Figure out *which* outside block to put this after. Prefer an outside
// block that neighbors a BB actually in the loop.
BasicBlock *FoundBB = nullptr;
- for (unsigned i = 0, e = SplitPreds.size(); i != e; ++i) {
- Function::iterator BBI = SplitPreds[i]->getIterator();
+ for (BasicBlock *Pred : SplitPreds) {
+ Function::iterator BBI = Pred->getIterator();
if (++BBI != NewBB->getParent()->end() && L->contains(&*BBI)) {
- FoundBB = SplitPreds[i];
+ FoundBB = Pred;
break;
}
}
@@ -630,8 +630,7 @@ static bool simplifyOneLoop(Loop *L, SmallVectorImpl<Loop *> &Worklist,
return true;
};
if (HasUniqueExitBlock()) {
- for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
- BasicBlock *ExitingBlock = ExitingBlocks[i];
+ for (BasicBlock *ExitingBlock : ExitingBlocks) {
if (!ExitingBlock->getSinglePredecessor()) continue;
BranchInst *BI = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
if (!BI || !BI->isConditional()) continue;
>From 4e567242133678c88a6cb5aeb979c6148f6a7035 Mon Sep 17 00:00:00 2001
From: OverMighty <its.overmighty at gmail.com>
Date: Wed, 3 Jul 2024 01:27:09 +0200
Subject: [PATCH 052/246] [libc][math][c23] Add f16{add,sub}{,l,f128} C23 math
functions (#97072)
Part of #93566.
---
libc/config/linux/aarch64/entrypoints.txt | 2 +
libc/config/linux/x86_64/entrypoints.txt | 6 ++
libc/docs/math/index.rst | 4 +-
libc/spec/llvm_libc_ext.td | 4 +
libc/spec/stdc.td | 4 +
libc/src/math/CMakeLists.txt | 6 ++
libc/src/math/f16add.h | 20 +++++
libc/src/math/f16addf128.h | 20 +++++
libc/src/math/f16addl.h | 20 +++++
libc/src/math/f16sub.h | 20 +++++
libc/src/math/f16subf128.h | 20 +++++
libc/src/math/f16subl.h | 20 +++++
libc/src/math/generic/CMakeLists.txt | 78 ++++++++++++++++++
libc/src/math/generic/f16add.cpp | 19 +++++
libc/src/math/generic/f16addf128.cpp | 19 +++++
libc/src/math/generic/f16addl.cpp | 19 +++++
libc/src/math/generic/f16sub.cpp | 19 +++++
libc/src/math/generic/f16subf128.cpp | 19 +++++
libc/src/math/generic/f16subl.cpp | 19 +++++
libc/test/src/math/CMakeLists.txt | 52 ++++++++++++
libc/test/src/math/f16add_test.cpp | 13 +++
libc/test/src/math/f16addl_test.cpp | 13 +++
libc/test/src/math/f16sub_test.cpp | 13 +++
libc/test/src/math/f16subl_test.cpp | 13 +++
libc/test/src/math/smoke/CMakeLists.txt | 87 ++++++++++++++++++++
libc/test/src/math/smoke/f16add_test.cpp | 13 +++
libc/test/src/math/smoke/f16addf128_test.cpp | 13 +++
libc/test/src/math/smoke/f16addl_test.cpp | 13 +++
libc/test/src/math/smoke/f16sub_test.cpp | 13 +++
libc/test/src/math/smoke/f16subf128_test.cpp | 13 +++
libc/test/src/math/smoke/f16subl_test.cpp | 13 +++
31 files changed, 605 insertions(+), 2 deletions(-)
create mode 100644 libc/src/math/f16add.h
create mode 100644 libc/src/math/f16addf128.h
create mode 100644 libc/src/math/f16addl.h
create mode 100644 libc/src/math/f16sub.h
create mode 100644 libc/src/math/f16subf128.h
create mode 100644 libc/src/math/f16subl.h
create mode 100644 libc/src/math/generic/f16add.cpp
create mode 100644 libc/src/math/generic/f16addf128.cpp
create mode 100644 libc/src/math/generic/f16addl.cpp
create mode 100644 libc/src/math/generic/f16sub.cpp
create mode 100644 libc/src/math/generic/f16subf128.cpp
create mode 100644 libc/src/math/generic/f16subl.cpp
create mode 100644 libc/test/src/math/f16add_test.cpp
create mode 100644 libc/test/src/math/f16addl_test.cpp
create mode 100644 libc/test/src/math/f16sub_test.cpp
create mode 100644 libc/test/src/math/f16subl_test.cpp
create mode 100644 libc/test/src/math/smoke/f16add_test.cpp
create mode 100644 libc/test/src/math/smoke/f16addf128_test.cpp
create mode 100644 libc/test/src/math/smoke/f16addl_test.cpp
create mode 100644 libc/test/src/math/smoke/f16sub_test.cpp
create mode 100644 libc/test/src/math/smoke/f16subf128_test.cpp
create mode 100644 libc/test/src/math/smoke/f16subl_test.cpp
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index ff35e8fffec19..940df63e3912b 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -508,12 +508,14 @@ if(LIBC_TYPES_HAS_FLOAT16)
libc.src.math.canonicalizef16
libc.src.math.ceilf16
libc.src.math.copysignf16
+ libc.src.math.f16add
libc.src.math.f16addf
libc.src.math.f16div
libc.src.math.f16divf
libc.src.math.f16fmaf
libc.src.math.f16sqrt
libc.src.math.f16sqrtf
+ libc.src.math.f16sub
libc.src.math.f16subf
libc.src.math.fabsf16
libc.src.math.fdimf16
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index 3eefa129c9758..09f04fb31dfd8 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -538,7 +538,9 @@ if(LIBC_TYPES_HAS_FLOAT16)
libc.src.math.canonicalizef16
libc.src.math.ceilf16
libc.src.math.copysignf16
+ libc.src.math.f16add
libc.src.math.f16addf
+ libc.src.math.f16addl
libc.src.math.f16div
libc.src.math.f16divf
libc.src.math.f16divl
@@ -548,7 +550,9 @@ if(LIBC_TYPES_HAS_FLOAT16)
libc.src.math.f16sqrt
libc.src.math.f16sqrtf
libc.src.math.f16sqrtl
+ libc.src.math.f16sub
libc.src.math.f16subf
+ libc.src.math.f16subl
libc.src.math.fabsf16
libc.src.math.fdimf16
libc.src.math.floorf16
@@ -601,9 +605,11 @@ if(LIBC_TYPES_HAS_FLOAT16)
if(LIBC_TYPES_HAS_FLOAT128)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# math.h C23 mixed _Float16 and _Float128 entrypoints
+ libc.src.math.f16addf128
libc.src.math.f16divf128
libc.src.math.f16fmaf128
libc.src.math.f16sqrtf128
+ libc.src.math.f16subf128
)
endif()
endif()
diff --git a/libc/docs/math/index.rst b/libc/docs/math/index.rst
index e4da3d42baf7a..04f63d03778a8 100644
--- a/libc/docs/math/index.rst
+++ b/libc/docs/math/index.rst
@@ -124,13 +124,13 @@ Basic Operations
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| dsub | N/A | N/A | | N/A | | 7.12.14.2 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| f16add | |check|\* | | | N/A | | 7.12.14.1 | F.10.11 |
+| f16add | |check|\* | |check|\* | |check|\* | N/A | |check| | 7.12.14.1 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| f16div | |check|\* | |check|\* | |check|\* | N/A | |check| | 7.12.14.4 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| f16fma | |check| | |check| | |check| | N/A | |check| | 7.12.14.5 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| f16sub | |check|\* | | | N/A | | 7.12.14.2 | F.10.11 |
+| f16sub | |check|\* | |check|\* | |check|\* | N/A | |check| | 7.12.14.2 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| fabs | |check| | |check| | |check| | |check| | |check| | 7.12.7.3 | F.10.4.3 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
diff --git a/libc/spec/llvm_libc_ext.td b/libc/spec/llvm_libc_ext.td
index ff7831f8bce16..b994e7ca56a93 100644
--- a/libc/spec/llvm_libc_ext.td
+++ b/libc/spec/llvm_libc_ext.td
@@ -57,9 +57,13 @@ def LLVMLibcExt : StandardSpec<"llvm_libc_ext"> {
[], // Types
[], // Enumerations
[
+ GuardedFunctionSpec<"f16add", RetValSpec<Float16Type>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>], "LIBC_TYPES_HAS_FLOAT16">,
GuardedFunctionSpec<"f16addf", RetValSpec<Float16Type>, [ArgSpec<FloatType>, ArgSpec<FloatType>], "LIBC_TYPES_HAS_FLOAT16">,
+ GuardedFunctionSpec<"f16addl", RetValSpec<Float16Type>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>], "LIBC_TYPES_HAS_FLOAT16">,
+ GuardedFunctionSpec<"f16sub", RetValSpec<Float16Type>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>], "LIBC_TYPES_HAS_FLOAT16">,
GuardedFunctionSpec<"f16subf", RetValSpec<Float16Type>, [ArgSpec<FloatType>, ArgSpec<FloatType>], "LIBC_TYPES_HAS_FLOAT16">,
+ GuardedFunctionSpec<"f16subl", RetValSpec<Float16Type>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>], "LIBC_TYPES_HAS_FLOAT16">,
GuardedFunctionSpec<"f16div", RetValSpec<Float16Type>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>], "LIBC_TYPES_HAS_FLOAT16">,
GuardedFunctionSpec<"f16divf", RetValSpec<Float16Type>, [ArgSpec<FloatType>, ArgSpec<FloatType>], "LIBC_TYPES_HAS_FLOAT16">,
diff --git a/libc/spec/stdc.td b/libc/spec/stdc.td
index 6e59062dde727..9ff40bf76700c 100644
--- a/libc/spec/stdc.td
+++ b/libc/spec/stdc.td
@@ -729,6 +729,10 @@ def StdC : StandardSpec<"stdc"> {
GuardedFunctionSpec<"setpayloadsigf16", RetValSpec<IntType>, [ArgSpec<Float16Ptr>, ArgSpec<Float16Type>], "LIBC_TYPES_HAS_FLOAT16">,
+ GuardedFunctionSpec<"f16addf128", RetValSpec<Float16Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT16_AND_FLOAT128">,
+
+ GuardedFunctionSpec<"f16subf128", RetValSpec<Float16Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT16_AND_FLOAT128">,
+
GuardedFunctionSpec<"f16divf128", RetValSpec<Float16Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT16_AND_FLOAT128">,
GuardedFunctionSpec<"f16sqrtf128", RetValSpec<Float16Type>, [ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT16_AND_FLOAT128">,
diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt
index 5b20913134fdf..0983d268bd4b8 100644
--- a/libc/src/math/CMakeLists.txt
+++ b/libc/src/math/CMakeLists.txt
@@ -99,7 +99,10 @@ add_math_entrypoint_object(exp10f)
add_math_entrypoint_object(expm1)
add_math_entrypoint_object(expm1f)
+add_math_entrypoint_object(f16add)
add_math_entrypoint_object(f16addf)
+add_math_entrypoint_object(f16addl)
+add_math_entrypoint_object(f16addf128)
add_math_entrypoint_object(f16div)
add_math_entrypoint_object(f16divf)
@@ -116,7 +119,10 @@ add_math_entrypoint_object(f16sqrtf)
add_math_entrypoint_object(f16sqrtl)
add_math_entrypoint_object(f16sqrtf128)
+add_math_entrypoint_object(f16sub)
add_math_entrypoint_object(f16subf)
+add_math_entrypoint_object(f16subl)
+add_math_entrypoint_object(f16subf128)
add_math_entrypoint_object(fabs)
add_math_entrypoint_object(fabsf)
diff --git a/libc/src/math/f16add.h b/libc/src/math/f16add.h
new file mode 100644
index 0000000000000..763a0787d860d
--- /dev/null
+++ b/libc/src/math/f16add.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for f16add ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_F16ADD_H
+#define LLVM_LIBC_SRC_MATH_F16ADD_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float16 f16add(double x, double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_F16ADD_H
diff --git a/libc/src/math/f16addf128.h b/libc/src/math/f16addf128.h
new file mode 100644
index 0000000000000..284ce1d303775
--- /dev/null
+++ b/libc/src/math/f16addf128.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for f16addf128 --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_F16ADDF128_H
+#define LLVM_LIBC_SRC_MATH_F16ADDF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float16 f16addf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_F16ADDF128_H
diff --git a/libc/src/math/f16addl.h b/libc/src/math/f16addl.h
new file mode 100644
index 0000000000000..6a7267a10d0c6
--- /dev/null
+++ b/libc/src/math/f16addl.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for f16addl -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_F16ADDL_H
+#define LLVM_LIBC_SRC_MATH_F16ADDL_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float16 f16addl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_F16ADDL_H
diff --git a/libc/src/math/f16sub.h b/libc/src/math/f16sub.h
new file mode 100644
index 0000000000000..66f82daada019
--- /dev/null
+++ b/libc/src/math/f16sub.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for f16sub ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_F16SUB_H
+#define LLVM_LIBC_SRC_MATH_F16SUB_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float16 f16sub(double x, double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_F16SUB_H
diff --git a/libc/src/math/f16subf128.h b/libc/src/math/f16subf128.h
new file mode 100644
index 0000000000000..eb674297ba266
--- /dev/null
+++ b/libc/src/math/f16subf128.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for f16subf128 --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_F16SUBF128_H
+#define LLVM_LIBC_SRC_MATH_F16SUBF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float16 f16subf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_F16SUBF128_H
diff --git a/libc/src/math/f16subl.h b/libc/src/math/f16subl.h
new file mode 100644
index 0000000000000..43b44a57a604e
--- /dev/null
+++ b/libc/src/math/f16subl.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for f16subl -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_F16SUBL_H
+#define LLVM_LIBC_SRC_MATH_F16SUBL_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float16 f16subl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_F16SUBL_H
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index d6ea8c54174b6..2e4ed8f2961da 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -3795,6 +3795,19 @@ add_entrypoint_object(
-O3
)
+add_entrypoint_object(
+ f16add
+ SRCS
+ f16add.cpp
+ HDRS
+ ../f16add.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.generic.add_sub
+ COMPILE_OPTIONS
+ -O3
+)
+
add_entrypoint_object(
f16addf
SRCS
@@ -3808,6 +3821,45 @@ add_entrypoint_object(
-O3
)
+add_entrypoint_object(
+ f16addl
+ SRCS
+ f16addl.cpp
+ HDRS
+ ../f16addl.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.generic.add_sub
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ f16addf128
+ SRCS
+ f16addf128.cpp
+ HDRS
+ ../f16addf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.generic.add_sub
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ f16sub
+ SRCS
+ f16sub.cpp
+ HDRS
+ ../f16sub.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.generic.add_sub
+ COMPILE_OPTIONS
+ -O3
+)
+
add_entrypoint_object(
f16subf
SRCS
@@ -3821,6 +3873,32 @@ add_entrypoint_object(
-O3
)
+add_entrypoint_object(
+ f16subl
+ SRCS
+ f16subl.cpp
+ HDRS
+ ../f16subl.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.generic.add_sub
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ f16subf128
+ SRCS
+ f16subf128.cpp
+ HDRS
+ ../f16subf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.generic.add_sub
+ COMPILE_OPTIONS
+ -O3
+)
+
add_entrypoint_object(
f16div
SRCS
diff --git a/libc/src/math/generic/f16add.cpp b/libc/src/math/generic/f16add.cpp
new file mode 100644
index 0000000000000..ef9b43e9f46aa
--- /dev/null
+++ b/libc/src/math/generic/f16add.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of f16add function ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/f16add.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float16, f16add, (double x, double y)) {
+ return fputil::generic::add<float16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/f16addf128.cpp b/libc/src/math/generic/f16addf128.cpp
new file mode 100644
index 0000000000000..61c458f7d5de1
--- /dev/null
+++ b/libc/src/math/generic/f16addf128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of f16addf128 function -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/f16addf128.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float16, f16addf128, (float128 x, float128 y)) {
+ return fputil::generic::add<float16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/f16addl.cpp b/libc/src/math/generic/f16addl.cpp
new file mode 100644
index 0000000000000..d32d09d0dbb83
--- /dev/null
+++ b/libc/src/math/generic/f16addl.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of f16addl function --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/f16addl.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float16, f16addl, (long double x, long double y)) {
+ return fputil::generic::add<float16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/f16sub.cpp b/libc/src/math/generic/f16sub.cpp
new file mode 100644
index 0000000000000..114c8ad3155e1
--- /dev/null
+++ b/libc/src/math/generic/f16sub.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of f16sub function ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/f16sub.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float16, f16sub, (double x, double y)) {
+ return fputil::generic::sub<float16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/f16subf128.cpp b/libc/src/math/generic/f16subf128.cpp
new file mode 100644
index 0000000000000..1f9ff28abdf29
--- /dev/null
+++ b/libc/src/math/generic/f16subf128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of f16subf128 function -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/f16subf128.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float16, f16subf128, (float128 x, float128 y)) {
+ return fputil::generic::sub<float16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/f16subl.cpp b/libc/src/math/generic/f16subl.cpp
new file mode 100644
index 0000000000000..31970af9a2366
--- /dev/null
+++ b/libc/src/math/generic/f16subl.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of f16subl function --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/f16subl.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float16, f16subl, (long double x, long double y)) {
+ return fputil::generic::sub<float16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/test/src/math/CMakeLists.txt b/libc/test/src/math/CMakeLists.txt
index 637e6720400ff..f919634ae108c 100644
--- a/libc/test/src/math/CMakeLists.txt
+++ b/libc/test/src/math/CMakeLists.txt
@@ -1996,6 +1996,19 @@ add_fp_unittest(
libc.src.__support.FPUtil.fp_bits
)
+add_fp_unittest(
+ f16add_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ f16add_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.src.math.f16add
+)
+
add_fp_unittest(
f16addf_test
NEED_MPFR
@@ -2009,6 +2022,32 @@ add_fp_unittest(
libc.src.math.f16addf
)
+add_fp_unittest(
+ f16addl_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ f16addl_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.src.math.f16addl
+)
+
+add_fp_unittest(
+ f16sub_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ f16sub_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.src.math.f16sub
+)
+
add_fp_unittest(
f16subf_test
NEED_MPFR
@@ -2022,6 +2061,19 @@ add_fp_unittest(
libc.src.math.f16subf
)
+add_fp_unittest(
+ f16subl_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ f16subl_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.src.math.f16subl
+)
+
add_fp_unittest(
f16div_test
NEED_MPFR
diff --git a/libc/test/src/math/f16add_test.cpp b/libc/test/src/math/f16add_test.cpp
new file mode 100644
index 0000000000000..c47ece2a92555
--- /dev/null
+++ b/libc/test/src/math/f16add_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for f16add ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/math/f16add.h"
+
+LIST_ADD_TESTS(float16, double, LIBC_NAMESPACE::f16add)
diff --git a/libc/test/src/math/f16addl_test.cpp b/libc/test/src/math/f16addl_test.cpp
new file mode 100644
index 0000000000000..f8e0d9ba6b4de
--- /dev/null
+++ b/libc/test/src/math/f16addl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for f16addl ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/math/f16addl.h"
+
+LIST_ADD_TESTS(float16, long double, LIBC_NAMESPACE::f16addl)
diff --git a/libc/test/src/math/f16sub_test.cpp b/libc/test/src/math/f16sub_test.cpp
new file mode 100644
index 0000000000000..37b970952fe5e
--- /dev/null
+++ b/libc/test/src/math/f16sub_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for f16sub ----------------------------------------------===/
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/math/f16sub.h"
+
+LIST_SUB_TESTS(float16, double, LIBC_NAMESPACE::f16sub)
diff --git a/libc/test/src/math/f16subl_test.cpp b/libc/test/src/math/f16subl_test.cpp
new file mode 100644
index 0000000000000..c41e6e97bab09
--- /dev/null
+++ b/libc/test/src/math/f16subl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for f16subl ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/math/f16subl.h"
+
+LIST_SUB_TESTS(float16, long double, LIBC_NAMESPACE::f16subl)
diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt
index 1b269edaa2477..f244e7addc151 100644
--- a/libc/test/src/math/smoke/CMakeLists.txt
+++ b/libc/test/src/math/smoke/CMakeLists.txt
@@ -3643,6 +3643,21 @@ add_fp_unittest(
libc.src.math.setpayloadsigf16
)
+add_fp_unittest(
+ f16add_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ f16add_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.__support.FPUtil.basic_operations
+ libc.src.math.f16add
+)
+
add_fp_unittest(
f16addf_test
SUITE
@@ -3658,6 +3673,50 @@ add_fp_unittest(
libc.src.math.f16addf
)
+add_fp_unittest(
+ f16addl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ f16addl_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.__support.FPUtil.basic_operations
+ libc.src.math.f16addl
+)
+
+add_fp_unittest(
+ f16addf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ f16addf128_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.__support.FPUtil.basic_operations
+ libc.src.math.f16addf128
+)
+
+add_fp_unittest(
+ f16sub_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ f16sub_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.hdr.fenv_macros
+ libc.src.__support.FPUtil.basic_operations
+ libc.src.math.f16sub
+)
+
add_fp_unittest(
f16subf_test
SUITE
@@ -3672,6 +3731,34 @@ add_fp_unittest(
libc.src.math.f16subf
)
+add_fp_unittest(
+ f16subl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ f16subl_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.hdr.fenv_macros
+ libc.src.__support.FPUtil.basic_operations
+ libc.src.math.f16subl
+)
+
+add_fp_unittest(
+ f16subf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ f16subf128_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.hdr.fenv_macros
+ libc.src.__support.FPUtil.basic_operations
+ libc.src.math.f16subf128
+)
+
add_fp_unittest(
f16div_test
SUITE
diff --git a/libc/test/src/math/smoke/f16add_test.cpp b/libc/test/src/math/smoke/f16add_test.cpp
new file mode 100644
index 0000000000000..c47ece2a92555
--- /dev/null
+++ b/libc/test/src/math/smoke/f16add_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for f16add ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/math/f16add.h"
+
+LIST_ADD_TESTS(float16, double, LIBC_NAMESPACE::f16add)
diff --git a/libc/test/src/math/smoke/f16addf128_test.cpp b/libc/test/src/math/smoke/f16addf128_test.cpp
new file mode 100644
index 0000000000000..8ed123b4ff1e8
--- /dev/null
+++ b/libc/test/src/math/smoke/f16addf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for f16addf128 ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/math/f16addf128.h"
+
+LIST_ADD_TESTS(float16, float128, LIBC_NAMESPACE::f16addf128)
diff --git a/libc/test/src/math/smoke/f16addl_test.cpp b/libc/test/src/math/smoke/f16addl_test.cpp
new file mode 100644
index 0000000000000..f8e0d9ba6b4de
--- /dev/null
+++ b/libc/test/src/math/smoke/f16addl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for f16addl ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/math/f16addl.h"
+
+LIST_ADD_TESTS(float16, long double, LIBC_NAMESPACE::f16addl)
diff --git a/libc/test/src/math/smoke/f16sub_test.cpp b/libc/test/src/math/smoke/f16sub_test.cpp
new file mode 100644
index 0000000000000..4ab347ba614fb
--- /dev/null
+++ b/libc/test/src/math/smoke/f16sub_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for f16sub ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/math/f16sub.h"
+
+LIST_SUB_TESTS(float16, double, LIBC_NAMESPACE::f16sub)
diff --git a/libc/test/src/math/smoke/f16subf128_test.cpp b/libc/test/src/math/smoke/f16subf128_test.cpp
new file mode 100644
index 0000000000000..4936d89c1f904
--- /dev/null
+++ b/libc/test/src/math/smoke/f16subf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for f16subf128 ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/math/f16subf128.h"
+
+LIST_SUB_TESTS(float16, float128, LIBC_NAMESPACE::f16subf128)
diff --git a/libc/test/src/math/smoke/f16subl_test.cpp b/libc/test/src/math/smoke/f16subl_test.cpp
new file mode 100644
index 0000000000000..c41e6e97bab09
--- /dev/null
+++ b/libc/test/src/math/smoke/f16subl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for f16subl ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/math/f16subl.h"
+
+LIST_SUB_TESTS(float16, long double, LIBC_NAMESPACE::f16subl)
>From 58fd3bea6d759eb17722ad2e0135714a34efd7e0 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Tue, 2 Jul 2024 16:36:13 -0700
Subject: [PATCH 053/246] [CodeGen] Use range-based for loops (NFC) (#97467)
---
llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp | 6 +++---
llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 6 +++---
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 8 ++++----
3 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index e91750afd2817..bccd9b04cd2c5 100644
--- a/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -124,13 +124,13 @@ AggressiveAntiDepBreaker::AggressiveAntiDepBreaker(
TRI(MF.getSubtarget().getRegisterInfo()), RegClassInfo(RCI) {
/* Collect a bitset of all registers that are only broken if they
are on the critical path. */
- for (unsigned i = 0, e = CriticalPathRCs.size(); i < e; ++i) {
- BitVector CPSet = TRI->getAllocatableSet(MF, CriticalPathRCs[i]);
+ for (const TargetRegisterClass *RC : CriticalPathRCs) {
+ BitVector CPSet = TRI->getAllocatableSet(MF, RC);
if (CriticalPathSet.none())
CriticalPathSet = CPSet;
else
CriticalPathSet |= CPSet;
- }
+ }
LLVM_DEBUG(dbgs() << "AntiDep Critical-Path Registers:");
LLVM_DEBUG(for (unsigned r
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index d57dd6fca0140..945bd8bab1648 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -438,11 +438,11 @@ void CombinerHelper::applyCombineShuffleConcat(MachineInstr &MI,
LLT SrcTy = MRI.getType(Ops[0]);
Register UndefReg = 0;
- for (unsigned i = 0; i < Ops.size(); i++) {
- if (Ops[i] == 0) {
+ for (Register &Reg : Ops) {
+ if (Reg == 0) {
if (UndefReg == 0)
UndefReg = Builder.buildUndef(SrcTy).getReg(0);
- Ops[i] = UndefReg;
+ Reg = UndefReg;
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index ad809f836e336..8db2708d41a69 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -10238,8 +10238,8 @@ void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
return;
SmallVector<SDValue, 1> Ops;
- for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
- Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
+ for (const EVT &VT : ValueVTs)
+ Ops.push_back(DAG.getUNDEF(VT));
setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
}
@@ -12516,12 +12516,12 @@ void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
// getRegistersForValue may produce 1 to many registers based on whether
// the OpInfo.ConstraintVT is legal on the target or not.
- for (size_t i = 0, e = OpInfo.AssignedRegs.Regs.size(); i != e; ++i) {
+ for (unsigned &Reg : OpInfo.AssignedRegs.Regs) {
Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
if (Register::isPhysicalRegister(OriginalDef))
FuncInfo.MBB->addLiveIn(OriginalDef);
// Update the assigned registers to use the original defs.
- OpInfo.AssignedRegs.Regs[i] = OriginalDef;
+ Reg = OriginalDef;
}
SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
>From a1c4926dd0ffbc3be8b27b159d8b1978b2ee9411 Mon Sep 17 00:00:00 2001
From: Petr Hosek <phosek at google.com>
Date: Tue, 2 Jul 2024 16:55:15 -0700
Subject: [PATCH 054/246] [libc] Include Linux kernel headers in the full build
(#97486)
When doing a full build for Linux, as of #97461 we no longer include
system headers, but we need to include Linux kernel headers.
---
libc/CMakeLists.txt | 2 ++
libc/cmake/modules/LLVMLibCCompileOptionRules.cmake | 8 ++++++++
2 files changed, 10 insertions(+)
diff --git a/libc/CMakeLists.txt b/libc/CMakeLists.txt
index 4ffcd55ba9500..013b17b03f570 100644
--- a/libc/CMakeLists.txt
+++ b/libc/CMakeLists.txt
@@ -39,6 +39,8 @@ set(LIBC_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(LIBC_ENABLE_USE_BY_CLANG OFF CACHE BOOL "Whether or not to place libc in a build directory findable by a just built clang")
+set(LIBC_KERNEL_HEADERS "/usr/include" CACHE STRING "Path to Linux kernel headers")
+
# Defining a global namespace to enclose all libc functions.
set(default_namespace "__llvm_libc")
if(LLVM_VERSION_MAJOR)
diff --git a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
index 28379213029a3..6d38bb491044e 100644
--- a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
+++ b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
@@ -51,9 +51,17 @@ function(_get_common_compile_options output_var flags)
if(LIBC_CC_SUPPORTS_NOSTDLIBINC)
list(APPEND compile_options "-nostdlibinc")
elseif(COMPILER_RESOURCE_DIR)
+ # TODO: We should require COMPILER_RESOURCE_DIR to be set.
list(APPEND compile_options "-isystem${COMPILER_RESOURCE_DIR}/include")
list(APPEND compile_options "-nostdinc")
endif()
+ # TODO: We should set this unconditionally on Linux.
+ if(LIBC_TARGET_OS_IS_LINUX AND
+ (LIBC_CC_SUPPORTS_NOSTDLIBINC OR COMPILER_RESOURCE_DIR))
+ # We use -idirafter to avoid preempting libc's own headers in case the
+ # directory (e.g. /usr/include) contains other headers.
+ list(APPEND compile_options "-idirafter${LIBC_KERNEL_HEADERS}")
+ endif()
endif()
if(LIBC_COMPILER_HAS_FIXED_POINT)
>From 35668e2c9cb1a09fac1773dfc62fcd892b358294 Mon Sep 17 00:00:00 2001
From: Fangrui Song <i at maskray.me>
Date: Tue, 2 Jul 2024 16:56:35 -0700
Subject: [PATCH 055/246] Remove llvm/MC/MCAsmLayout.h and the unused parameter
in MCAssembler::layout
This restores 63ec52f867ada8d841dd872acf3d0cb62e2a99e8 and
46f7929879a59ec72dc75679b4201e2d314efba9, NFC changes that were
unnecessarily reverted.
This completes the work that merges MCAsmLayout into MCAssembler.
Pull Request: https://github.com/llvm/llvm-project/pull/97449
---
bolt/lib/Core/BinaryContext.cpp | 4 +---
clang/docs/tools/clang-formatted-files.txt | 1 -
llvm/include/llvm/MC/MCAsmLayout.h | 22 ----------------------
llvm/include/llvm/MC/MCAssembler.h | 3 +--
llvm/lib/MC/MCAssembler.cpp | 7 ++-----
llvm/lib/MC/MCExpr.cpp | 1 -
llvm/tools/dsymutil/MachOUtils.cpp | 4 +---
7 files changed, 5 insertions(+), 37 deletions(-)
delete mode 100644 llvm/include/llvm/MC/MCAsmLayout.h
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index 3bd715d487e0f..f28a0cd6eb9c6 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -20,7 +20,6 @@
#include "llvm/DebugInfo/DWARF/DWARFCompileUnit.h"
#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
-#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler/MCDisassembler.h"
@@ -2416,8 +2415,7 @@ BinaryContext::calculateEmittedSize(BinaryFunction &BF, bool FixBranches) {
MCAssembler &Assembler =
static_cast<MCObjectStreamer *>(Streamer.get())->getAssembler();
- MCAsmLayout Layout(Assembler);
- Assembler.layout(Layout);
+ Assembler.layout();
// Obtain fragment sizes.
std::vector<uint64_t> FragmentSizes;
diff --git a/clang/docs/tools/clang-formatted-files.txt b/clang/docs/tools/clang-formatted-files.txt
index 4866bd4aee634..a8ee8f1fcb87c 100644
--- a/clang/docs/tools/clang-formatted-files.txt
+++ b/clang/docs/tools/clang-formatted-files.txt
@@ -5357,7 +5357,6 @@ llvm/include/llvm/MC/MCAsmInfoELF.h
llvm/include/llvm/MC/MCAsmInfoGOFF.h
llvm/include/llvm/MC/MCAsmInfoWasm.h
llvm/include/llvm/MC/MCAsmInfoXCOFF.h
-llvm/include/llvm/MC/MCAsmLayout.h
llvm/include/llvm/MC/MCCodeView.h
llvm/include/llvm/MC/MCContext.h
llvm/include/llvm/MC/MCFixedLenDisassembler.h
diff --git a/llvm/include/llvm/MC/MCAsmLayout.h b/llvm/include/llvm/MC/MCAsmLayout.h
deleted file mode 100644
index 33fae0a0f9766..0000000000000
--- a/llvm/include/llvm/MC/MCAsmLayout.h
+++ /dev/null
@@ -1,22 +0,0 @@
-//===- MCAsmLayout.h - Assembly Layout Object -------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_MC_MCASMLAYOUT_H
-#define LLVM_MC_MCASMLAYOUT_H
-
-namespace llvm {
-class MCAssembler;
-
-class MCAsmLayout {
-public:
- MCAsmLayout(MCAssembler &) {}
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/llvm/include/llvm/MC/MCAssembler.h b/llvm/include/llvm/MC/MCAssembler.h
index df5ad0e7bdf4b..9cd65d388d247 100644
--- a/llvm/include/llvm/MC/MCAssembler.h
+++ b/llvm/include/llvm/MC/MCAssembler.h
@@ -46,7 +46,6 @@ class MCRelaxableFragment;
class MCSymbolRefExpr;
class raw_ostream;
class MCAsmBackend;
-class MCAsmLayout;
class MCContext;
class MCCodeEmitter;
class MCFragment;
@@ -341,7 +340,7 @@ class MCAssembler {
void Finish();
// Layout all section and prepare them for emission.
- void layout(MCAsmLayout &Layout);
+ void layout();
// FIXME: This does not belong here.
bool getSubsectionsViaSymbols() const { return SubsectionsViaSymbols; }
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp
index 2fe4a3cbec9a6..14790f508323e 100644
--- a/llvm/lib/MC/MCAssembler.cpp
+++ b/llvm/lib/MC/MCAssembler.cpp
@@ -15,7 +15,6 @@
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCCodeView.h"
#include "llvm/MC/MCContext.h"
@@ -936,7 +935,7 @@ MCAssembler::handleFixup(MCFragment &F, const MCFixup &Fixup,
return std::make_tuple(Target, FixedValue, IsResolved);
}
-void MCAssembler::layout(MCAsmLayout &Layout) {
+void MCAssembler::layout() {
assert(getBackendPtr() && "Expected assembler backend");
DEBUG_WITH_TYPE("mc-dump", {
errs() << "assembler backend - pre-layout\n--\n";
@@ -1073,9 +1072,7 @@ void MCAssembler::layout(MCAsmLayout &Layout) {
}
void MCAssembler::Finish() {
- // Create the layout object.
- MCAsmLayout Layout(*this);
- layout(Layout);
+ layout();
// Write the object file.
stats::ObjectBytes += getWriter().writeObject(*this);
diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp
index 0a175ade68d78..b42a668bce23c 100644
--- a/llvm/lib/MC/MCExpr.cpp
+++ b/llvm/lib/MC/MCExpr.cpp
@@ -12,7 +12,6 @@
#include "llvm/Config/llvm-config.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCObjectWriter.h"
diff --git a/llvm/tools/dsymutil/MachOUtils.cpp b/llvm/tools/dsymutil/MachOUtils.cpp
index fba66309ca20b..d2bdcf8542b84 100644
--- a/llvm/tools/dsymutil/MachOUtils.cpp
+++ b/llvm/tools/dsymutil/MachOUtils.cpp
@@ -12,7 +12,6 @@
#include "LinkUtils.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/NonRelocatableStringpool.h"
-#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCMachObjectWriter.h"
#include "llvm/MC/MCObjectStreamer.h"
@@ -381,8 +380,7 @@ bool generateDsymCompanion(
auto &Writer = static_cast<MachObjectWriter &>(MCAsm.getWriter());
// Layout but don't emit.
- MCAsmLayout Layout(MCAsm);
- MCAsm.layout(Layout);
+ MCAsm.layout();
BinaryHolder InputBinaryHolder(VFS, false);
>From 3402a1a4d2d4c7ead69156c3d741fc9ae9c4d399 Mon Sep 17 00:00:00 2001
From: Alexander Shaposhnikov <ashaposhnikov at google.com>
Date: Tue, 2 Jul 2024 16:57:30 -0700
Subject: [PATCH 056/246] [Clang] Enable nsan instrumentation pass (#97359)
Enable nsan instrumentation pass
---
clang/lib/CodeGen/BackendUtil.cpp | 4 ++++
clang/test/CodeGen/no-skipped-passes-O0-opt-bisect.c | 1 +
clang/test/CodeGen/nsan-basic.c | 7 +++++++
clang/test/CodeGen/sanitizer-module-constructor.c | 1 +
4 files changed, 13 insertions(+)
create mode 100644 clang/test/CodeGen/nsan-basic.c
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index 22b593e8f2b7a..4195bb87cf0dd 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -76,6 +76,7 @@
#include "llvm/Transforms/Instrumentation/LowerAllowCheckPass.h"
#include "llvm/Transforms/Instrumentation/MemProfiler.h"
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
+#include "llvm/Transforms/Instrumentation/NumericalStabilitySanitizer.h"
#include "llvm/Transforms/Instrumentation/PGOInstrumentation.h"
#include "llvm/Transforms/Instrumentation/SanitizerBinaryMetadata.h"
#include "llvm/Transforms/Instrumentation/SanitizerCoverage.h"
@@ -707,6 +708,9 @@ static void addSanitizers(const Triple &TargetTriple,
MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
}
+ if (LangOpts.Sanitize.has(SanitizerKind::NumericalStability))
+ MPM.addPass(NumericalStabilitySanitizerPass());
+
auto ASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
if (LangOpts.Sanitize.has(Mask)) {
bool UseGlobalGC = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
diff --git a/clang/test/CodeGen/no-skipped-passes-O0-opt-bisect.c b/clang/test/CodeGen/no-skipped-passes-O0-opt-bisect.c
index 381803a00ede7..6d3e6359d37c7 100644
--- a/clang/test/CodeGen/no-skipped-passes-O0-opt-bisect.c
+++ b/clang/test/CodeGen/no-skipped-passes-O0-opt-bisect.c
@@ -9,6 +9,7 @@
// RUN: %clang_cc1 -triple x86_64-linux-gnu -O0 %s -fdebug-pass-manager -emit-llvm -o /dev/null -fsanitize=local-bounds 2>&1 | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -O0 %s -fdebug-pass-manager -emit-llvm -o /dev/null -fsanitize=dataflow 2>&1 | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -O0 %s -fdebug-pass-manager -emit-llvm -o /dev/null -fsanitize-coverage-trace-pc-guard 2>&1 | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -O0 %s -fdebug-pass-manager -emit-llvm -o /dev/null -fsanitize=numerical 2>&1 | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -O0 %s -fdebug-pass-manager -emit-llvm -o /dev/null -fmemory-profile 2>&1 | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -mllvm -opt-bisect-limit=0 %s -fdebug-pass-manager -emit-llvm -o /dev/null 2>&1 | FileCheck %s
diff --git a/clang/test/CodeGen/nsan-basic.c b/clang/test/CodeGen/nsan-basic.c
new file mode 100644
index 0000000000000..7aedaa38d881d
--- /dev/null
+++ b/clang/test/CodeGen/nsan-basic.c
@@ -0,0 +1,7 @@
+// RUN: %clang_cc1 -triple x86_64 -emit-llvm -o - -fsanitize=numerical %s | FileCheck %s
+
+// CHECK: Function Attrs: noinline nounwind optnone sanitize_numerical_stability
+float add(float x, float y) {
+ float z = x + y;
+ return z;
+}
diff --git a/clang/test/CodeGen/sanitizer-module-constructor.c b/clang/test/CodeGen/sanitizer-module-constructor.c
index e4d08cde2620a..06dc57304a7e1 100644
--- a/clang/test/CodeGen/sanitizer-module-constructor.c
+++ b/clang/test/CodeGen/sanitizer-module-constructor.c
@@ -1,6 +1,7 @@
// RUN: %clang_cc1 -triple x86_64-linux-gnu -fsanitize=address -O3 -emit-llvm -fdebug-pass-manager -o - %s 2>&1 | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -fsanitize=thread -O3 -emit-llvm -fdebug-pass-manager -o - %s 2>&1 | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -fsanitize=memory -O3 -emit-llvm -fdebug-pass-manager -o - %s 2>&1 | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -fsanitize=numerical -O3 -emit-llvm -fdebug-pass-manager -o - %s 2>&1 | FileCheck %s
// This is regression test for PR42877
>From 0a369b06e34495966c6c9db427ea52f77a82a0bf Mon Sep 17 00:00:00 2001
From: Ryotaro KASUGA <kasuga.ryotaro at fujitsu.com>
Date: Wed, 3 Jul 2024 09:15:41 +0900
Subject: [PATCH 057/246] =?UTF-8?q?Reapply=20"[MachinePipeliner]=20Fix=20c?=
=?UTF-8?q?onstraints=20aren't=20considered=20in=20cert=E2=80=A6=20(#97259?=
=?UTF-8?q?)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
…ain cases" (#97246)
This reverts commit e6a961dbef773b16bda2cebc4bf9f3d1e0da42fc.
There is no difference from the original change. I re-ran the failed
test and it passed. So the failure wasn't caused by this change.
test result: https://lab.llvm.org/buildbot/#/builders/176/builds/585
---
llvm/include/llvm/CodeGen/MachinePipeliner.h | 7 +-
llvm/lib/CodeGen/MachinePipeliner.cpp | 70 ++--
...instruction-scheduled-at-correct-cycle.mir | 335 ++++++++++++++++++
3 files changed, 379 insertions(+), 33 deletions(-)
create mode 100644 llvm/test/CodeGen/AArch64/sms-instruction-scheduled-at-correct-cycle.mir
diff --git a/llvm/include/llvm/CodeGen/MachinePipeliner.h b/llvm/include/llvm/CodeGen/MachinePipeliner.h
index 94913f534fb77..7fe5581faa183 100644
--- a/llvm/include/llvm/CodeGen/MachinePipeliner.h
+++ b/llvm/include/llvm/CodeGen/MachinePipeliner.h
@@ -599,8 +599,8 @@ class SMSchedule {
/// chain.
int latestCycleInChain(const SDep &Dep);
- void computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart,
- int *MinEnd, int *MaxStart, int II, SwingSchedulerDAG *DAG);
+ void computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart, int II,
+ SwingSchedulerDAG *DAG);
bool insert(SUnit *SU, int StartCycle, int EndCycle, int II);
/// Iterators for the cycle to instruction map.
@@ -658,6 +658,9 @@ class SMSchedule {
bool isLoopCarried(const SwingSchedulerDAG *SSD, MachineInstr &Phi) const;
bool isLoopCarriedDefOfUse(const SwingSchedulerDAG *SSD, MachineInstr *Def,
MachineOperand &MO) const;
+
+ bool onlyHasLoopCarriedOutputOrOrderPreds(SUnit *SU,
+ SwingSchedulerDAG *DAG) const;
void print(raw_ostream &os) const;
void dump() const;
};
diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp
index 7ff14a6cf36bf..515c7f89b4bed 100644
--- a/llvm/lib/CodeGen/MachinePipeliner.cpp
+++ b/llvm/lib/CodeGen/MachinePipeliner.cpp
@@ -2461,47 +2461,43 @@ bool SwingSchedulerDAG::schedulePipeline(SMSchedule &Schedule) {
// upon the scheduled time for any predecessors/successors.
int EarlyStart = INT_MIN;
int LateStart = INT_MAX;
- // These values are set when the size of the schedule window is limited
- // due to chain dependences.
- int SchedEnd = INT_MAX;
- int SchedStart = INT_MIN;
- Schedule.computeStart(SU, &EarlyStart, &LateStart, &SchedEnd, &SchedStart,
- II, this);
+ Schedule.computeStart(SU, &EarlyStart, &LateStart, II, this);
LLVM_DEBUG({
dbgs() << "\n";
dbgs() << "Inst (" << SU->NodeNum << ") ";
SU->getInstr()->dump();
dbgs() << "\n";
});
- LLVM_DEBUG({
- dbgs() << format("\tes: %8x ls: %8x me: %8x ms: %8x\n", EarlyStart,
- LateStart, SchedEnd, SchedStart);
- });
+ LLVM_DEBUG(
+ dbgs() << format("\tes: %8x ls: %8x\n", EarlyStart, LateStart));
- if (EarlyStart > LateStart || SchedEnd < EarlyStart ||
- SchedStart > LateStart)
+ if (EarlyStart > LateStart)
scheduleFound = false;
- else if (EarlyStart != INT_MIN && LateStart == INT_MAX) {
- SchedEnd = std::min(SchedEnd, EarlyStart + (int)II - 1);
- scheduleFound = Schedule.insert(SU, EarlyStart, SchedEnd, II);
- } else if (EarlyStart == INT_MIN && LateStart != INT_MAX) {
- SchedStart = std::max(SchedStart, LateStart - (int)II + 1);
- scheduleFound = Schedule.insert(SU, LateStart, SchedStart, II);
- } else if (EarlyStart != INT_MIN && LateStart != INT_MAX) {
- SchedEnd =
- std::min(SchedEnd, std::min(LateStart, EarlyStart + (int)II - 1));
- // When scheduling a Phi it is better to start at the late cycle and go
- // backwards. The default order may insert the Phi too far away from
- // its first dependence.
- if (SU->getInstr()->isPHI())
- scheduleFound = Schedule.insert(SU, SchedEnd, EarlyStart, II);
+ else if (EarlyStart != INT_MIN && LateStart == INT_MAX)
+ scheduleFound =
+ Schedule.insert(SU, EarlyStart, EarlyStart + (int)II - 1, II);
+ else if (EarlyStart == INT_MIN && LateStart != INT_MAX)
+ scheduleFound =
+ Schedule.insert(SU, LateStart, LateStart - (int)II + 1, II);
+ else if (EarlyStart != INT_MIN && LateStart != INT_MAX) {
+ LateStart = std::min(LateStart, EarlyStart + (int)II - 1);
+ // When scheduling a Phi it is better to start at the late cycle and
+ // go backwards. The default order may insert the Phi too far away
+ // from its first dependence.
+ // Also, do backward search when all scheduled predecessors are
+ // loop-carried output/order dependencies. Empirically, there are also
+ // cases where scheduling becomes possible with backward search.
+ if (SU->getInstr()->isPHI() ||
+ Schedule.onlyHasLoopCarriedOutputOrOrderPreds(SU, this))
+ scheduleFound = Schedule.insert(SU, LateStart, EarlyStart, II);
else
- scheduleFound = Schedule.insert(SU, EarlyStart, SchedEnd, II);
+ scheduleFound = Schedule.insert(SU, EarlyStart, LateStart, II);
} else {
int FirstCycle = Schedule.getFirstCycle();
scheduleFound = Schedule.insert(SU, FirstCycle + getASAP(SU),
FirstCycle + getASAP(SU) + II - 1, II);
}
+
// Even if we find a schedule, make sure the schedule doesn't exceed the
// allowable number of stages. We keep trying if this happens.
if (scheduleFound)
@@ -2909,8 +2905,7 @@ static SUnit *multipleIterations(SUnit *SU, SwingSchedulerDAG *DAG) {
/// Compute the scheduling start slot for the instruction. The start slot
/// depends on any predecessor or successor nodes scheduled already.
void SMSchedule::computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart,
- int *MinEnd, int *MaxStart, int II,
- SwingSchedulerDAG *DAG) {
+ int II, SwingSchedulerDAG *DAG) {
// Iterate over each instruction that has been scheduled already. The start
// slot computation depends on whether the previously scheduled instruction
// is a predecessor or successor of the specified instruction.
@@ -2929,7 +2924,7 @@ void SMSchedule::computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart,
*MaxEarlyStart = std::max(*MaxEarlyStart, EarlyStart);
if (DAG->isLoopCarriedDep(SU, Dep, false)) {
int End = earliestCycleInChain(Dep) + (II - 1);
- *MinEnd = std::min(*MinEnd, End);
+ *MinLateStart = std::min(*MinLateStart, End);
}
} else {
int LateStart = cycle - Dep.getLatency() +
@@ -2953,7 +2948,7 @@ void SMSchedule::computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart,
*MinLateStart = std::min(*MinLateStart, LateStart);
if (DAG->isLoopCarriedDep(SU, Dep)) {
int Start = latestCycleInChain(Dep) + 1 - II;
- *MaxStart = std::max(*MaxStart, Start);
+ *MaxEarlyStart = std::max(*MaxEarlyStart, Start);
}
} else {
int EarlyStart = cycle + Dep.getLatency() -
@@ -3146,6 +3141,19 @@ bool SMSchedule::isLoopCarriedDefOfUse(const SwingSchedulerDAG *SSD,
return false;
}
+/// Return true if all scheduled predecessors are loop-carried output/order
+/// dependencies.
+bool SMSchedule::onlyHasLoopCarriedOutputOrOrderPreds(
+ SUnit *SU, SwingSchedulerDAG *DAG) const {
+ for (const SDep &Pred : SU->Preds)
+ if (InstrToCycle.count(Pred.getSUnit()) && !DAG->isBackedge(SU, Pred))
+ return false;
+ for (const SDep &Succ : SU->Succs)
+ if (InstrToCycle.count(Succ.getSUnit()) && DAG->isBackedge(SU, Succ))
+ return false;
+ return true;
+}
+
/// Determine transitive dependences of unpipelineable instructions
SmallSet<SUnit *, 8> SMSchedule::computeUnpipelineableNodes(
SwingSchedulerDAG *SSD, TargetInstrInfo::PipelinerLoopInfo *PLI) {
diff --git a/llvm/test/CodeGen/AArch64/sms-instruction-scheduled-at-correct-cycle.mir b/llvm/test/CodeGen/AArch64/sms-instruction-scheduled-at-correct-cycle.mir
new file mode 100644
index 0000000000000..c1014b296cad3
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sms-instruction-scheduled-at-correct-cycle.mir
@@ -0,0 +1,335 @@
+# RUN: llc --verify-machineinstrs -mtriple=aarch64 -o - %s -run-pass pipeliner -aarch64-enable-pipeliner -debug-only=pipeliner -pipeliner-max-stages=50 -pipeliner-max-mii=50 -pipeliner-enable-copytophi=0 -pipeliner-ii-search-range=30 2>&1 | FileCheck %s
+# REQUIRES: asserts
+
+# Test that each instruction must be scheduled between the early cycle and the late cycle. Previously there were cases where an instruction is scheduled outside of the valid range. See issue #93936 for details.
+
+# CHECK: {{^ *}}Try to schedule with 47
+# CHECK: {{^ *}}Inst (11) %48:fpr128 = LDRQui %35:gpr64sp, 0 :: (load (s128) from %ir.lsr.iv63, align 4, !tbaa !0)
+# CHECK-EMPTY:
+# CHECK-NEXT: {{^ *}}es: ffffffe9 ls: ffffffe9
+# CHECK-NEXT: {{^ *}}Trying to insert node between -23 and -23 II: 47
+# CHECK-NEXT: {{^ *}}failed to insert at cycle -23 %48:fpr128 = LDRQui %35:gpr64sp, 0 :: (load (s128) from %ir.lsr.iv63, align 4, !tbaa !0)
+
+--- |
+ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+
+ define dso_local void @f(ptr nocapture noundef writeonly %a, ptr nocapture noundef readonly %b, ptr nocapture noundef readonly %c, ptr nocapture noundef readonly %d, ptr nocapture noundef readonly %e, float noundef %f, i32 noundef %N) local_unnamed_addr {
+ entry:
+ %cmp16 = icmp sgt i32 %N, 0
+ br i1 %cmp16, label %for.body.preheader, label %for.cond.cleanup
+
+ for.body.preheader: ; preds = %entry
+ %wide.trip.count = zext nneg i32 %N to i64
+ %min.iters.check = icmp ult i32 %N, 8
+ br i1 %min.iters.check, label %for.body.preheader37, label %vector.memcheck
+
+ vector.memcheck: ; preds = %for.body.preheader
+ %0 = ptrtoint ptr %a to i64
+ %1 = ptrtoint ptr %b to i64
+ %2 = ptrtoint ptr %c to i64
+ %3 = ptrtoint ptr %d to i64
+ %4 = ptrtoint ptr %e to i64
+ %5 = sub i64 %0, %1
+ %diff.check = icmp ult i64 %5, 32
+ %6 = sub i64 %0, %2
+ %diff.check22 = icmp ult i64 %6, 32
+ %conflict.rdx = or i1 %diff.check, %diff.check22
+ %7 = sub i64 %0, %3
+ %diff.check24 = icmp ult i64 %7, 32
+ %conflict.rdx25 = or i1 %conflict.rdx, %diff.check24
+ %8 = sub i64 %0, %4
+ %diff.check27 = icmp ult i64 %8, 32
+ %conflict.rdx28 = or i1 %conflict.rdx25, %diff.check27
+ br i1 %conflict.rdx28, label %for.body.preheader37, label %vector.ph
+
+ vector.ph: ; preds = %vector.memcheck
+ %n.vec = and i64 %wide.trip.count, 2147483640
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %f, i64 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ %scevgep54 = getelementptr i8, ptr %b, i64 16
+ %scevgep58 = getelementptr i8, ptr %a, i64 16
+ %scevgep62 = getelementptr i8, ptr %c, i64 16
+ %scevgep66 = getelementptr i8, ptr %e, i64 16
+ %scevgep70 = getelementptr i8, ptr %d, i64 16
+ br label %vector.body
+
+ vector.body: ; preds = %vector.body, %vector.ph
+ %lsr.iv71 = phi ptr [ %scevgep72, %vector.body ], [ %scevgep70, %vector.ph ]
+ %lsr.iv67 = phi ptr [ %scevgep68, %vector.body ], [ %scevgep66, %vector.ph ]
+ %lsr.iv63 = phi ptr [ %scevgep64, %vector.body ], [ %scevgep62, %vector.ph ]
+ %lsr.iv59 = phi ptr [ %scevgep60, %vector.body ], [ %scevgep58, %vector.ph ]
+ %lsr.iv55 = phi ptr [ %scevgep56, %vector.body ], [ %scevgep54, %vector.ph ]
+ %lsr.iv52 = phi i64 [ %lsr.iv.next53, %vector.body ], [ %n.vec, %vector.ph ]
+ %scevgep57 = getelementptr i8, ptr %lsr.iv55, i64 -16
+ %wide.load = load <4 x float>, ptr %scevgep57, align 4, !tbaa !6
+ %wide.load29 = load <4 x float>, ptr %lsr.iv55, align 4, !tbaa !6
+ %9 = fmul <4 x float> %wide.load, %broadcast.splat
+ %10 = fmul <4 x float> %wide.load29, %broadcast.splat
+ %scevgep65 = getelementptr i8, ptr %lsr.iv63, i64 -16
+ %wide.load30 = load <4 x float>, ptr %scevgep65, align 4, !tbaa !6
+ %wide.load31 = load <4 x float>, ptr %lsr.iv63, align 4, !tbaa !6
+ %scevgep73 = getelementptr i8, ptr %lsr.iv71, i64 -16
+ %wide.load32 = load <4 x float>, ptr %scevgep73, align 4, !tbaa !6
+ %wide.load33 = load <4 x float>, ptr %lsr.iv71, align 4, !tbaa !6
+ %11 = fsub <4 x float> %wide.load30, %wide.load32
+ %12 = fsub <4 x float> %wide.load31, %wide.load33
+ %13 = fmul <4 x float> %9, %11
+ %14 = fmul <4 x float> %10, %12
+ %scevgep69 = getelementptr i8, ptr %lsr.iv67, i64 -16
+ %wide.load34 = load <4 x float>, ptr %scevgep69, align 4, !tbaa !6
+ %wide.load35 = load <4 x float>, ptr %lsr.iv67, align 4, !tbaa !6
+ %15 = fdiv <4 x float> %13, %wide.load34
+ %16 = fdiv <4 x float> %14, %wide.load35
+ %scevgep61 = getelementptr i8, ptr %lsr.iv59, i64 -16
+ store <4 x float> %15, ptr %scevgep61, align 4, !tbaa !6
+ store <4 x float> %16, ptr %lsr.iv59, align 4, !tbaa !6
+ %lsr.iv.next53 = add nsw i64 %lsr.iv52, -8
+ %scevgep56 = getelementptr i8, ptr %lsr.iv55, i64 32
+ %scevgep60 = getelementptr i8, ptr %lsr.iv59, i64 32
+ %scevgep64 = getelementptr i8, ptr %lsr.iv63, i64 32
+ %scevgep68 = getelementptr i8, ptr %lsr.iv67, i64 32
+ %scevgep72 = getelementptr i8, ptr %lsr.iv71, i64 32
+ %17 = icmp eq i64 %lsr.iv.next53, 0
+ br i1 %17, label %middle.block, label %vector.body, !llvm.loop !10
+
+ middle.block: ; preds = %vector.body
+ %cmp.n = icmp eq i64 %n.vec, %wide.trip.count
+ br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader37
+
+ for.body.preheader37: ; preds = %vector.memcheck, %for.body.preheader, %middle.block
+ %indvars.iv.ph = phi i64 [ %n.vec, %middle.block ], [ 0, %for.body.preheader ], [ 0, %vector.memcheck ]
+ %18 = shl nuw nsw i64 %indvars.iv.ph, 2
+ %scevgep = getelementptr i8, ptr %a, i64 %18
+ %scevgep39 = getelementptr i8, ptr %e, i64 %18
+ %scevgep42 = getelementptr i8, ptr %d, i64 %18
+ %scevgep45 = getelementptr i8, ptr %c, i64 %18
+ %scevgep48 = getelementptr i8, ptr %b, i64 %18
+ %19 = sub i64 %wide.trip.count, %indvars.iv.ph
+ br label %for.body
+
+ for.cond.cleanup: ; preds = %for.body, %middle.block, %entry
+ ret void
+
+ for.body: ; preds = %for.body.preheader37, %for.body
+ %lsr.iv51 = phi i64 [ %19, %for.body.preheader37 ], [ %lsr.iv.next, %for.body ]
+ %lsr.iv49 = phi ptr [ %scevgep48, %for.body.preheader37 ], [ %scevgep50, %for.body ]
+ %lsr.iv46 = phi ptr [ %scevgep45, %for.body.preheader37 ], [ %scevgep47, %for.body ]
+ %lsr.iv43 = phi ptr [ %scevgep42, %for.body.preheader37 ], [ %scevgep44, %for.body ]
+ %lsr.iv40 = phi ptr [ %scevgep39, %for.body.preheader37 ], [ %scevgep41, %for.body ]
+ %lsr.iv = phi ptr [ %scevgep, %for.body.preheader37 ], [ %scevgep38, %for.body ]
+ %20 = load float, ptr %lsr.iv49, align 4, !tbaa !6
+ %mul = fmul float %20, %f
+ %21 = load float, ptr %lsr.iv46, align 4, !tbaa !6
+ %22 = load float, ptr %lsr.iv43, align 4, !tbaa !6
+ %sub = fsub float %21, %22
+ %mul5 = fmul float %mul, %sub
+ %23 = load float, ptr %lsr.iv40, align 4, !tbaa !6
+ %div = fdiv float %mul5, %23
+ store float %div, ptr %lsr.iv, align 4, !tbaa !6
+ %scevgep38 = getelementptr i8, ptr %lsr.iv, i64 4
+ %scevgep41 = getelementptr i8, ptr %lsr.iv40, i64 4
+ %scevgep44 = getelementptr i8, ptr %lsr.iv43, i64 4
+ %scevgep47 = getelementptr i8, ptr %lsr.iv46, i64 4
+ %scevgep50 = getelementptr i8, ptr %lsr.iv49, i64 4
+ %lsr.iv.next = add i64 %lsr.iv51, -1
+ %exitcond.not = icmp eq i64 %lsr.iv.next, 0
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+ }
+
+ !6 = !{!7, !7, i64 0}
+ !7 = !{!"float", !8, i64 0}
+ !8 = !{!"omnipotent char", !9, i64 0}
+ !9 = !{!"Simple C/C++ TBAA"}
+ !10 = distinct !{!10, !11, !12, !13}
+ !11 = !{!"llvm.loop.mustprogress"}
+ !12 = !{!"llvm.loop.isvectorized", i32 1}
+ !13 = !{!"llvm.loop.unroll.runtime.disable"}
+ !14 = distinct !{!14, !11, !12}
+
+...
+---
+name: f
+tracksRegLiveness: true
+liveins:
+ - { reg: '$x0', virtual-reg: '%39' }
+ - { reg: '$x1', virtual-reg: '%40' }
+ - { reg: '$x2', virtual-reg: '%41' }
+ - { reg: '$x3', virtual-reg: '%42' }
+ - { reg: '$x4', virtual-reg: '%43' }
+ - { reg: '$s0', virtual-reg: '%44' }
+ - { reg: '$w5', virtual-reg: '%45' }
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.7
+ liveins: $x0, $x1, $x2, $x3, $x4, $s0, $w5
+
+ %45:gpr32common = COPY $w5
+ %44:fpr32 = COPY $s0
+ %43:gpr64common = COPY $x4
+ %42:gpr64common = COPY $x3
+ %41:gpr64common = COPY $x2
+ %40:gpr64common = COPY $x1
+ %39:gpr64common = COPY $x0
+ dead $wzr = SUBSWri %45, 1, 0, implicit-def $nzcv
+ Bcc 11, %bb.7, implicit $nzcv
+ B %bb.1
+
+ bb.1.for.body.preheader:
+ successors: %bb.12, %bb.2
+
+ %48:gpr32 = ORRWrs $wzr, %45, 0
+ %0:gpr64 = SUBREG_TO_REG 0, killed %48, %subreg.sub_32
+ dead $wzr = SUBSWri %45, 8, 0, implicit-def $nzcv
+ Bcc 2, %bb.2, implicit $nzcv
+
+ bb.12:
+ %49:gpr64all = COPY $xzr
+ %47:gpr64all = COPY %49
+ B %bb.6
+
+ bb.2.vector.memcheck:
+ successors: %bb.6, %bb.11
+
+ %55:gpr64common = SUBXrr %39, %40
+ %59:gpr64all = COPY $xzr
+ %51:gpr64all = COPY %59
+ dead $xzr = SUBSXri killed %55, 32, 0, implicit-def $nzcv
+ Bcc 3, %bb.6, implicit $nzcv
+ B %bb.11
+
+ bb.11.vector.memcheck:
+ successors: %bb.6, %bb.10
+
+ %56:gpr64common = SUBXrr %39, %41
+ dead $xzr = SUBSXri %56, 32, 0, implicit-def $nzcv
+ Bcc 3, %bb.6, implicit $nzcv
+ B %bb.10
+
+ bb.10.vector.memcheck:
+ successors: %bb.6, %bb.9
+
+ %57:gpr64common = SUBXrr %39, %42
+ dead $xzr = SUBSXri %57, 32, 0, implicit-def $nzcv
+ Bcc 3, %bb.6, implicit $nzcv
+ B %bb.9
+
+ bb.9.vector.memcheck:
+ successors: %bb.6, %bb.3
+
+ %58:gpr64common = SUBXrr %39, %43
+ dead $xzr = SUBSXri %58, 32, 0, implicit-def $nzcv
+ Bcc 3, %bb.6, implicit $nzcv
+ B %bb.3
+
+ bb.3.vector.ph:
+ %64:gpr64common = ANDXri %0, 8027
+ %1:gpr64 = COPY %64
+ %66:fpr128 = IMPLICIT_DEF
+ %65:fpr128 = INSERT_SUBREG %66, %44, %subreg.ssub
+ %67:gpr64sp = ADDXri %40, 16, 0
+ %3:gpr64all = COPY %67
+ %68:gpr64sp = ADDXri %39, 16, 0
+ %4:gpr64all = COPY %68
+ %69:gpr64sp = ADDXri %41, 16, 0
+ %5:gpr64all = COPY %69
+ %70:gpr64sp = ADDXri %43, 16, 0
+ %6:gpr64all = COPY %70
+ %71:gpr64sp = ADDXri %42, 16, 0
+ %7:gpr64all = COPY %71
+
+ bb.4.vector.body:
+ successors: %bb.5, %bb.4
+
+ %8:gpr64sp = PHI %7, %bb.3, %19, %bb.4
+ %9:gpr64sp = PHI %6, %bb.3, %18, %bb.4
+ %10:gpr64sp = PHI %5, %bb.3, %17, %bb.4
+ %11:gpr64sp = PHI %4, %bb.3, %16, %bb.4
+ %12:gpr64sp = PHI %3, %bb.3, %15, %bb.4
+ %13:gpr64sp = PHI %1, %bb.3, %14, %bb.4
+ %72:fpr128 = LDURQi %12, -16 :: (load (s128) from %ir.scevgep57, align 4, !tbaa !6)
+ %73:fpr128 = LDRQui %12, 0 :: (load (s128) from %ir.lsr.iv55, align 4, !tbaa !6)
+ %74:fpr128 = nofpexcept FMULv4i32_indexed killed %72, %65, 0, implicit $fpcr
+ %75:fpr128 = nofpexcept FMULv4i32_indexed killed %73, %65, 0, implicit $fpcr
+ %76:fpr128 = LDURQi %10, -16 :: (load (s128) from %ir.scevgep65, align 4, !tbaa !6)
+ %77:fpr128 = LDRQui %10, 0 :: (load (s128) from %ir.lsr.iv63, align 4, !tbaa !6)
+ %78:fpr128 = LDURQi %8, -16 :: (load (s128) from %ir.scevgep73, align 4, !tbaa !6)
+ %79:fpr128 = LDRQui %8, 0 :: (load (s128) from %ir.lsr.iv71, align 4, !tbaa !6)
+ %80:fpr128 = nofpexcept FSUBv4f32 killed %76, killed %78, implicit $fpcr
+ %81:fpr128 = nofpexcept FSUBv4f32 killed %77, killed %79, implicit $fpcr
+ %82:fpr128 = nofpexcept FMULv4f32 killed %74, killed %80, implicit $fpcr
+ %83:fpr128 = nofpexcept FMULv4f32 killed %75, killed %81, implicit $fpcr
+ %84:fpr128 = LDURQi %9, -16 :: (load (s128) from %ir.scevgep69, align 4, !tbaa !6)
+ %85:fpr128 = LDRQui %9, 0 :: (load (s128) from %ir.lsr.iv67, align 4, !tbaa !6)
+ %86:fpr128 = nofpexcept FDIVv4f32 killed %82, killed %84, implicit $fpcr
+ %87:fpr128 = nofpexcept FDIVv4f32 killed %83, killed %85, implicit $fpcr
+ STURQi killed %86, %11, -16 :: (store (s128) into %ir.scevgep61, align 4, !tbaa !6)
+ STRQui killed %87, %11, 0 :: (store (s128) into %ir.lsr.iv59, align 4, !tbaa !6)
+ %88:gpr64 = nsw SUBSXri %13, 8, 0, implicit-def $nzcv
+ %14:gpr64all = COPY %88
+ %89:gpr64sp = ADDXri %12, 32, 0
+ %15:gpr64all = COPY %89
+ %90:gpr64sp = ADDXri %11, 32, 0
+ %16:gpr64all = COPY %90
+ %91:gpr64sp = ADDXri %10, 32, 0
+ %17:gpr64all = COPY %91
+ %92:gpr64sp = ADDXri %9, 32, 0
+ %18:gpr64all = COPY %92
+ %93:gpr64sp = ADDXri %8, 32, 0
+ %19:gpr64all = COPY %93
+ Bcc 1, %bb.4, implicit $nzcv
+ B %bb.5
+
+ bb.5.middle.block:
+ dead $xzr = SUBSXrr %64, %0, implicit-def $nzcv
+ Bcc 0, %bb.7, implicit $nzcv
+ B %bb.6
+
+ bb.6.for.body.preheader37:
+ %20:gpr64 = PHI %47, %bb.12, %51, %bb.2, %51, %bb.11, %51, %bb.10, %51, %bb.9, %1, %bb.5
+ %95:gpr64 = nuw nsw UBFMXri %20, 62, 61
+ %96:gpr64 = ADDXrr %39, %95
+ %21:gpr64all = COPY %96
+ %97:gpr64 = ADDXrr %43, %95
+ %22:gpr64all = COPY %97
+ %98:gpr64 = ADDXrr %42, %95
+ %23:gpr64all = COPY %98
+ %99:gpr64 = ADDXrr %41, %95
+ %24:gpr64all = COPY %99
+ %100:gpr64 = ADDXrr %40, %95
+ %25:gpr64all = COPY %100
+ %101:gpr64 = SUBXrr %0, %20
+ %26:gpr64all = COPY %101
+ B %bb.8
+
+ bb.7.for.cond.cleanup:
+ RET_ReallyLR
+
+ bb.8.for.body:
+ successors: %bb.7, %bb.8
+
+ %27:gpr64sp = PHI %26, %bb.6, %38, %bb.8
+ %28:gpr64sp = PHI %25, %bb.6, %37, %bb.8
+ %29:gpr64sp = PHI %24, %bb.6, %36, %bb.8
+ %30:gpr64sp = PHI %23, %bb.6, %35, %bb.8
+ %31:gpr64sp = PHI %22, %bb.6, %34, %bb.8
+ %32:gpr64sp = PHI %21, %bb.6, %33, %bb.8
+ early-clobber %102:gpr64sp, %103:fpr32 = LDRSpost %28, 4 :: (load (s32) from %ir.lsr.iv49, !tbaa !6)
+ %104:fpr32 = nofpexcept FMULSrr killed %103, %44, implicit $fpcr
+ early-clobber %105:gpr64sp, %106:fpr32 = LDRSpost %29, 4 :: (load (s32) from %ir.lsr.iv46, !tbaa !6)
+ early-clobber %107:gpr64sp, %108:fpr32 = LDRSpost %30, 4 :: (load (s32) from %ir.lsr.iv43, !tbaa !6)
+ %109:fpr32 = nofpexcept FSUBSrr killed %106, killed %108, implicit $fpcr
+ %110:fpr32 = nofpexcept FMULSrr killed %104, killed %109, implicit $fpcr
+ early-clobber %111:gpr64sp, %112:fpr32 = LDRSpost %31, 4 :: (load (s32) from %ir.lsr.iv40, !tbaa !6)
+ %113:fpr32 = nofpexcept FDIVSrr killed %110, killed %112, implicit $fpcr
+ early-clobber %114:gpr64sp = STRSpost killed %113, %32, 4 :: (store (s32) into %ir.lsr.iv, !tbaa !6)
+ %33:gpr64all = COPY %114
+ %34:gpr64all = COPY %111
+ %35:gpr64all = COPY %107
+ %36:gpr64all = COPY %105
+ %37:gpr64all = COPY %102
+ %115:gpr64 = SUBSXri %27, 1, 0, implicit-def $nzcv
+ %38:gpr64all = COPY %115
+ Bcc 0, %bb.7, implicit $nzcv
+ B %bb.8
+
+...
>From 7c94a227bccf4d067a65f4ed88aa415fd1de9d81 Mon Sep 17 00:00:00 2001
From: Alexander Shaposhnikov <ashaposhnikov at google.com>
Date: Tue, 2 Jul 2024 17:34:44 -0700
Subject: [PATCH 058/246] [Clang][Driver] Link nsan runtime (#97364)
Link nsan runtime.
---
clang/lib/Driver/ToolChains/CommonArgs.cpp | 2 ++
clang/test/Driver/sanitizer-ld.c | 12 ++++++++++++
2 files changed, 14 insertions(+)
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 2a4c1369f5a73..be4d7d2cffb16 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -1469,6 +1469,8 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("msan_cxx");
}
+ if (SanArgs.needsNsanRt())
+ StaticRuntimes.push_back("nsan");
if (!SanArgs.needsSharedRt() && SanArgs.needsTsanRt()) {
StaticRuntimes.push_back("tsan");
if (SanArgs.linkCXXRuntimes())
diff --git a/clang/test/Driver/sanitizer-ld.c b/clang/test/Driver/sanitizer-ld.c
index 1dc8b446d7b6d..93702f456229f 100644
--- a/clang/test/Driver/sanitizer-ld.c
+++ b/clang/test/Driver/sanitizer-ld.c
@@ -627,6 +627,18 @@
// CHECK-COV-LINUX: "-lpthread"
// CHECK-COV-LINUX: "-lresolv"
+// RUN: %clang -### %s 2>&1 \
+// RUN: --target=x86_64-unknown-linux -fuse-ld=ld -fsanitize=numerical \
+// RUN: -resource-dir=%S/Inputs/resource_dir \
+// RUN: --sysroot=%S/Inputs/basic_linux_tree \
+// RUN: | FileCheck --check-prefix=CHECK-NSAN-LINUX %s
+//
+// CHECK-NSAN-LINUX: "{{.*}}ld{{(.exe)?}}"
+// CHECK-NSAN-LINUX-NOT: "-lc"
+// CHECK-NSAN-LINUX-NOT: libclang_rt.ubsan
+// CHECK-NSAN-LINUX: libclang_rt.nsan.a"
+// CHECK-NSAN-LINUX: "-lpthread" "-lrt" "-lm" "-ldl" "-lresolv"
+
// CFI by itself does not link runtime libraries.
// RUN: not %clang -fsanitize=cfi -### %s 2>&1 \
// RUN: --target=x86_64-unknown-linux -fuse-ld=ld -rtlib=platform \
>From 0856064ea219d029e7d2c4f68bb88196fe647f6b Mon Sep 17 00:00:00 2001
From: Youngsuk Kim <youngsuk.kim at hpe.com>
Date: Tue, 2 Jul 2024 11:38:37 -0500
Subject: [PATCH 059/246] [clang][StaticAnalyzer] Avoid
'raw_string_ostream::str' (NFC)
Since `raw_string_ostream` doesn't own the string buffer, it is
desirable (in terms of memory safety) for users to directly reference
the string buffer rather than use `raw_string_ostream::str()`.
Work towards TODO comment to remove `raw_string_ostream::str()`.
---
.../StaticAnalyzer/Checkers/CheckObjCDealloc.cpp | 12 ++++++------
.../RetainCountDiagnostics.cpp | 16 ++++++++--------
clang/lib/StaticAnalyzer/Core/ExprEngine.cpp | 2 +-
.../Core/RangeConstraintManager.cpp | 4 ++--
clang/lib/StaticAnalyzer/Core/SVals.cpp | 2 +-
5 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index b4390f0b85bbe..9d3aeff465ca1 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -247,8 +247,8 @@ void ObjCDeallocChecker::checkASTDecl(const ObjCImplementationDecl *D,
PathDiagnosticLocation DLoc =
PathDiagnosticLocation::createBegin(D, BR.getSourceManager());
- BR.EmitBasicReport(D, this, Name, categories::CoreFoundationObjectiveC,
- OS.str(), DLoc);
+ BR.EmitBasicReport(D, this, Name, categories::CoreFoundationObjectiveC, Buf,
+ DLoc);
return;
}
}
@@ -585,7 +585,7 @@ void ObjCDeallocChecker::diagnoseMissingReleases(CheckerContext &C) const {
" before '[super dealloc]'";
auto BR = std::make_unique<PathSensitiveBugReport>(MissingReleaseBugType,
- OS.str(), ErrNode);
+ Buf, ErrNode);
C.emitReport(std::move(BR));
}
@@ -706,8 +706,8 @@ bool ObjCDeallocChecker::diagnoseExtraRelease(SymbolRef ReleasedValue,
OS << " property but was released in 'dealloc'";
}
- auto BR = std::make_unique<PathSensitiveBugReport>(ExtraReleaseBugType,
- OS.str(), ErrNode);
+ auto BR = std::make_unique<PathSensitiveBugReport>(ExtraReleaseBugType, Buf,
+ ErrNode);
BR->addRange(M.getOriginExpr()->getSourceRange());
C.emitReport(std::move(BR));
@@ -749,7 +749,7 @@ bool ObjCDeallocChecker::diagnoseMistakenDealloc(SymbolRef DeallocedValue,
<< "' should be released rather than deallocated";
auto BR = std::make_unique<PathSensitiveBugReport>(MistakenDeallocBugType,
- OS.str(), ErrNode);
+ Buf, ErrNode);
BR->addRange(M.getOriginExpr()->getSourceRange());
C.emitReport(std::move(BR));
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index 086c3e5e49b77..f73c9007c1838 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -411,11 +411,11 @@ annotateConsumedSummaryMismatch(const ExplodedNode *N,
}
}
- if (os.str().empty())
+ if (sbuf.empty())
return nullptr;
PathDiagnosticLocation L = PathDiagnosticLocation::create(CallExitLoc, SM);
- return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+ return std::make_shared<PathDiagnosticEventPiece>(L, sbuf);
}
/// Annotate the parameter at the analysis entry point.
@@ -446,7 +446,7 @@ annotateStartParameter(const ExplodedNode *N, SymbolRef Sym,
assert(CurrT->getCount() == 0);
os << "0";
}
- return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+ return std::make_shared<PathDiagnosticEventPiece>(L, s);
}
PathDiagnosticPieceRef
@@ -493,7 +493,7 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
if (PrevT && IsFreeUnowned && CurrV.isNotOwned() && PrevT->isOwned()) {
os << "Object is now not exclusively owned";
auto Pos = PathDiagnosticLocation::create(N->getLocation(), SM);
- return std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
+ return std::make_shared<PathDiagnosticEventPiece>(Pos, sbuf);
}
// This is the allocation site since the previous node had no bindings
@@ -535,7 +535,7 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
}
PathDiagnosticLocation Pos(S, SM, N->getLocationContext());
- return std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
+ return std::make_shared<PathDiagnosticEventPiece>(Pos, sbuf);
}
// Gather up the effects that were performed on the object at this
@@ -582,13 +582,13 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
if (!shouldGenerateNote(os, PrevT, CurrV, DeallocSent))
return nullptr;
- if (os.str().empty())
+ if (sbuf.empty())
return nullptr; // We have nothing to say!
const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
N->getLocationContext());
- auto P = std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
+ auto P = std::make_shared<PathDiagnosticEventPiece>(Pos, sbuf);
// Add the range by scanning the children of the statement for any bindings
// to Sym.
@@ -831,7 +831,7 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
<< RV->getCount();
}
- return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+ return std::make_shared<PathDiagnosticEventPiece>(L, sbuf);
}
RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index c1a8aad83a90b..977deb3182deb 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -3900,7 +3900,7 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
State->printDOT(Out, N->getLocationContext(), Space);
Out << "\\l}\\l";
- return Out.str();
+ return Buf;
}
};
diff --git a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index d8c257dbd731e..fab8e35962d75 100644
--- a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -3283,7 +3283,7 @@ static std::string toString(const SymbolRef &Sym) {
std::string S;
llvm::raw_string_ostream O(S);
Sym->dumpToStream(O);
- return O.str();
+ return S;
}
void RangeConstraintManager::printConstraints(raw_ostream &Out,
@@ -3354,7 +3354,7 @@ static std::string toString(ProgramStateRef State, EquivalenceClass Class) {
Out << "\"" << ClassMember << "\"";
}
Out << " ]";
- return Out.str();
+ return Str;
}
void RangeConstraintManager::printEquivalenceClasses(raw_ostream &Out,
diff --git a/clang/lib/StaticAnalyzer/Core/SVals.cpp b/clang/lib/StaticAnalyzer/Core/SVals.cpp
index 0e1351215bb42..291e4fa752a8f 100644
--- a/clang/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -271,7 +271,7 @@ void SVal::printJson(raw_ostream &Out, bool AddQuotes) const {
dumpToStream(TempOut);
- Out << JsonFormat(TempOut.str(), AddQuotes);
+ Out << JsonFormat(Buf, AddQuotes);
}
void SVal::dumpToStream(raw_ostream &os) const {
>From de5ff38a0d20faedac43a1d838fb65b67e77c34e Mon Sep 17 00:00:00 2001
From: Min-Yih Hsu <min.hsu at sifive.com>
Date: Tue, 2 Jul 2024 18:21:41 -0700
Subject: [PATCH 060/246] [LoopIdiomVectorize][NFC] Factoring out the part that
handles vectorization strategy (#94682)
To pave the way for porting LIV to RISC-V, which uses VP intrinsics for
vectors.
NFC.
---
.../Vectorize/LoopIdiomVectorize.cpp | 240 ++++++++++--------
1 file changed, 133 insertions(+), 107 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
index 63f14208bf556..c7a8700e14531 100644
--- a/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
@@ -78,6 +78,13 @@ class LoopIdiomVectorize {
const TargetTransformInfo *TTI;
const DataLayout *DL;
+ // Blocks that will be used for inserting vectorized code.
+ BasicBlock *EndBlock = nullptr;
+ BasicBlock *VectorLoopPreheaderBlock = nullptr;
+ BasicBlock *VectorLoopStartBlock = nullptr;
+ BasicBlock *VectorLoopMismatchBlock = nullptr;
+ BasicBlock *VectorLoopIncBlock = nullptr;
+
public:
explicit LoopIdiomVectorize(DominatorTree *DT, LoopInfo *LI,
const TargetTransformInfo *TTI,
@@ -95,9 +102,16 @@ class LoopIdiomVectorize {
SmallVectorImpl<BasicBlock *> &ExitBlocks);
bool recognizeByteCompare();
+
Value *expandFindMismatch(IRBuilder<> &Builder, DomTreeUpdater &DTU,
GetElementPtrInst *GEPA, GetElementPtrInst *GEPB,
Instruction *Index, Value *Start, Value *MaxLen);
+
+ Value *createMaskedFindMismatch(IRBuilder<> &Builder, DomTreeUpdater &DTU,
+ GetElementPtrInst *GEPA,
+ GetElementPtrInst *GEPB, Value *ExtStart,
+ Value *ExtEnd);
+
void transformByteCompare(GetElementPtrInst *GEPA, GetElementPtrInst *GEPB,
PHINode *IndPhi, Value *MaxLen, Instruction *Index,
Value *Start, bool IncIdx, BasicBlock *FoundBB,
@@ -331,6 +345,115 @@ bool LoopIdiomVectorize::recognizeByteCompare() {
return true;
}
+Value *LoopIdiomVectorize::createMaskedFindMismatch(
+ IRBuilder<> &Builder, DomTreeUpdater &DTU, GetElementPtrInst *GEPA,
+ GetElementPtrInst *GEPB, Value *ExtStart, Value *ExtEnd) {
+ Type *I64Type = Builder.getInt64Ty();
+ Type *ResType = Builder.getInt32Ty();
+ Type *LoadType = Builder.getInt8Ty();
+ Value *PtrA = GEPA->getPointerOperand();
+ Value *PtrB = GEPB->getPointerOperand();
+
+ // At this point we know two things must be true:
+ // 1. Start <= End
+ // 2. ExtMaxLen <= MinPageSize due to the page checks.
+ // Therefore, we know that we can use a 64-bit induction variable that
+ // starts from 0 -> ExtMaxLen and it will not overflow.
+ ScalableVectorType *PredVTy =
+ ScalableVectorType::get(Builder.getInt1Ty(), 16);
+
+ Value *InitialPred = Builder.CreateIntrinsic(
+ Intrinsic::get_active_lane_mask, {PredVTy, I64Type}, {ExtStart, ExtEnd});
+
+ Value *VecLen = Builder.CreateIntrinsic(Intrinsic::vscale, {I64Type}, {});
+ VecLen = Builder.CreateMul(VecLen, ConstantInt::get(I64Type, 16), "",
+ /*HasNUW=*/true, /*HasNSW=*/true);
+
+ Value *PFalse = Builder.CreateVectorSplat(PredVTy->getElementCount(),
+ Builder.getInt1(false));
+
+ BranchInst *JumpToVectorLoop = BranchInst::Create(VectorLoopStartBlock);
+ Builder.Insert(JumpToVectorLoop);
+
+ DTU.applyUpdates({{DominatorTree::Insert, VectorLoopPreheaderBlock,
+ VectorLoopStartBlock}});
+
+ // Set up the first vector loop block by creating the PHIs, doing the vector
+ // loads and comparing the vectors.
+ Builder.SetInsertPoint(VectorLoopStartBlock);
+ PHINode *LoopPred = Builder.CreatePHI(PredVTy, 2, "mismatch_vec_loop_pred");
+ LoopPred->addIncoming(InitialPred, VectorLoopPreheaderBlock);
+ PHINode *VectorIndexPhi = Builder.CreatePHI(I64Type, 2, "mismatch_vec_index");
+ VectorIndexPhi->addIncoming(ExtStart, VectorLoopPreheaderBlock);
+ Type *VectorLoadType = ScalableVectorType::get(Builder.getInt8Ty(), 16);
+ Value *Passthru = ConstantInt::getNullValue(VectorLoadType);
+
+ Value *VectorLhsGep =
+ Builder.CreateGEP(LoadType, PtrA, VectorIndexPhi, "", GEPA->isInBounds());
+ Value *VectorLhsLoad = Builder.CreateMaskedLoad(VectorLoadType, VectorLhsGep,
+ Align(1), LoopPred, Passthru);
+
+ Value *VectorRhsGep =
+ Builder.CreateGEP(LoadType, PtrB, VectorIndexPhi, "", GEPB->isInBounds());
+ Value *VectorRhsLoad = Builder.CreateMaskedLoad(VectorLoadType, VectorRhsGep,
+ Align(1), LoopPred, Passthru);
+
+ Value *VectorMatchCmp = Builder.CreateICmpNE(VectorLhsLoad, VectorRhsLoad);
+ VectorMatchCmp = Builder.CreateSelect(LoopPred, VectorMatchCmp, PFalse);
+ Value *VectorMatchHasActiveLanes = Builder.CreateOrReduce(VectorMatchCmp);
+ BranchInst *VectorEarlyExit = BranchInst::Create(
+ VectorLoopMismatchBlock, VectorLoopIncBlock, VectorMatchHasActiveLanes);
+ Builder.Insert(VectorEarlyExit);
+
+ DTU.applyUpdates(
+ {{DominatorTree::Insert, VectorLoopStartBlock, VectorLoopMismatchBlock},
+ {DominatorTree::Insert, VectorLoopStartBlock, VectorLoopIncBlock}});
+
+ // Increment the index counter and calculate the predicate for the next
+ // iteration of the loop. We branch back to the start of the loop if there
+ // is at least one active lane.
+ Builder.SetInsertPoint(VectorLoopIncBlock);
+ Value *NewVectorIndexPhi =
+ Builder.CreateAdd(VectorIndexPhi, VecLen, "",
+ /*HasNUW=*/true, /*HasNSW=*/true);
+ VectorIndexPhi->addIncoming(NewVectorIndexPhi, VectorLoopIncBlock);
+ Value *NewPred =
+ Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
+ {PredVTy, I64Type}, {NewVectorIndexPhi, ExtEnd});
+ LoopPred->addIncoming(NewPred, VectorLoopIncBlock);
+
+ Value *PredHasActiveLanes =
+ Builder.CreateExtractElement(NewPred, uint64_t(0));
+ BranchInst *VectorLoopBranchBack =
+ BranchInst::Create(VectorLoopStartBlock, EndBlock, PredHasActiveLanes);
+ Builder.Insert(VectorLoopBranchBack);
+
+ DTU.applyUpdates(
+ {{DominatorTree::Insert, VectorLoopIncBlock, VectorLoopStartBlock},
+ {DominatorTree::Insert, VectorLoopIncBlock, EndBlock}});
+
+ // If we found a mismatch then we need to calculate which lane in the vector
+ // had a mismatch and add that on to the current loop index.
+ Builder.SetInsertPoint(VectorLoopMismatchBlock);
+ PHINode *FoundPred = Builder.CreatePHI(PredVTy, 1, "mismatch_vec_found_pred");
+ FoundPred->addIncoming(VectorMatchCmp, VectorLoopStartBlock);
+ PHINode *LastLoopPred =
+ Builder.CreatePHI(PredVTy, 1, "mismatch_vec_last_loop_pred");
+ LastLoopPred->addIncoming(LoopPred, VectorLoopStartBlock);
+ PHINode *VectorFoundIndex =
+ Builder.CreatePHI(I64Type, 1, "mismatch_vec_found_index");
+ VectorFoundIndex->addIncoming(VectorIndexPhi, VectorLoopStartBlock);
+
+ Value *PredMatchCmp = Builder.CreateAnd(LastLoopPred, FoundPred);
+ Value *Ctz = Builder.CreateIntrinsic(
+ Intrinsic::experimental_cttz_elts, {ResType, PredMatchCmp->getType()},
+ {PredMatchCmp, /*ZeroIsPoison=*/Builder.getInt1(true)});
+ Ctz = Builder.CreateZExt(Ctz, I64Type);
+ Value *VectorLoopRes64 = Builder.CreateAdd(VectorFoundIndex, Ctz, "",
+ /*HasNUW=*/true, /*HasNSW=*/true);
+ return Builder.CreateTrunc(VectorLoopRes64, ResType);
+}
+
Value *LoopIdiomVectorize::expandFindMismatch(
IRBuilder<> &Builder, DomTreeUpdater &DTU, GetElementPtrInst *GEPA,
GetElementPtrInst *GEPB, Instruction *Index, Value *Start, Value *MaxLen) {
@@ -345,8 +468,7 @@ Value *LoopIdiomVectorize::expandFindMismatch(
Type *ResType = Builder.getInt32Ty();
// Split block in the original loop preheader.
- BasicBlock *EndBlock =
- SplitBlock(Preheader, PHBranch, DT, LI, nullptr, "mismatch_end");
+ EndBlock = SplitBlock(Preheader, PHBranch, DT, LI, nullptr, "mismatch_end");
// Create the blocks that we're going to need:
// 1. A block for checking the zero-extended length exceeds 0
@@ -370,17 +492,17 @@ Value *LoopIdiomVectorize::expandFindMismatch(
BasicBlock *MemCheckBlock = BasicBlock::Create(
Ctx, "mismatch_mem_check", EndBlock->getParent(), EndBlock);
- BasicBlock *VectorLoopPreheaderBlock = BasicBlock::Create(
+ VectorLoopPreheaderBlock = BasicBlock::Create(
Ctx, "mismatch_vec_loop_preheader", EndBlock->getParent(), EndBlock);
- BasicBlock *VectorLoopStartBlock = BasicBlock::Create(
- Ctx, "mismatch_vec_loop", EndBlock->getParent(), EndBlock);
+ VectorLoopStartBlock = BasicBlock::Create(Ctx, "mismatch_vec_loop",
+ EndBlock->getParent(), EndBlock);
- BasicBlock *VectorLoopIncBlock = BasicBlock::Create(
- Ctx, "mismatch_vec_loop_inc", EndBlock->getParent(), EndBlock);
+ VectorLoopIncBlock = BasicBlock::Create(Ctx, "mismatch_vec_loop_inc",
+ EndBlock->getParent(), EndBlock);
- BasicBlock *VectorLoopMismatchBlock = BasicBlock::Create(
- Ctx, "mismatch_vec_loop_found", EndBlock->getParent(), EndBlock);
+ VectorLoopMismatchBlock = BasicBlock::Create(Ctx, "mismatch_vec_loop_found",
+ EndBlock->getParent(), EndBlock);
BasicBlock *LoopPreHeaderBlock = BasicBlock::Create(
Ctx, "mismatch_loop_pre", EndBlock->getParent(), EndBlock);
@@ -491,104 +613,8 @@ Value *LoopIdiomVectorize::expandFindMismatch(
// processed in each iteration, etc.
Builder.SetInsertPoint(VectorLoopPreheaderBlock);
- // At this point we know two things must be true:
- // 1. Start <= End
- // 2. ExtMaxLen <= MinPageSize due to the page checks.
- // Therefore, we know that we can use a 64-bit induction variable that
- // starts from 0 -> ExtMaxLen and it will not overflow.
- ScalableVectorType *PredVTy =
- ScalableVectorType::get(Builder.getInt1Ty(), 16);
-
- Value *InitialPred = Builder.CreateIntrinsic(
- Intrinsic::get_active_lane_mask, {PredVTy, I64Type}, {ExtStart, ExtEnd});
-
- Value *VecLen = Builder.CreateIntrinsic(Intrinsic::vscale, {I64Type}, {});
- VecLen = Builder.CreateMul(VecLen, ConstantInt::get(I64Type, 16), "",
- /*HasNUW=*/true, /*HasNSW=*/true);
-
- Value *PFalse = Builder.CreateVectorSplat(PredVTy->getElementCount(),
- Builder.getInt1(false));
-
- BranchInst *JumpToVectorLoop = BranchInst::Create(VectorLoopStartBlock);
- Builder.Insert(JumpToVectorLoop);
-
- DTU.applyUpdates({{DominatorTree::Insert, VectorLoopPreheaderBlock,
- VectorLoopStartBlock}});
-
- // Set up the first vector loop block by creating the PHIs, doing the vector
- // loads and comparing the vectors.
- Builder.SetInsertPoint(VectorLoopStartBlock);
- PHINode *LoopPred = Builder.CreatePHI(PredVTy, 2, "mismatch_vec_loop_pred");
- LoopPred->addIncoming(InitialPred, VectorLoopPreheaderBlock);
- PHINode *VectorIndexPhi = Builder.CreatePHI(I64Type, 2, "mismatch_vec_index");
- VectorIndexPhi->addIncoming(ExtStart, VectorLoopPreheaderBlock);
- Type *VectorLoadType = ScalableVectorType::get(Builder.getInt8Ty(), 16);
- Value *Passthru = ConstantInt::getNullValue(VectorLoadType);
-
- Value *VectorLhsGep =
- Builder.CreateGEP(LoadType, PtrA, VectorIndexPhi, "", GEPA->isInBounds());
- Value *VectorLhsLoad = Builder.CreateMaskedLoad(VectorLoadType, VectorLhsGep,
- Align(1), LoopPred, Passthru);
-
- Value *VectorRhsGep =
- Builder.CreateGEP(LoadType, PtrB, VectorIndexPhi, "", GEPB->isInBounds());
- Value *VectorRhsLoad = Builder.CreateMaskedLoad(VectorLoadType, VectorRhsGep,
- Align(1), LoopPred, Passthru);
-
- Value *VectorMatchCmp = Builder.CreateICmpNE(VectorLhsLoad, VectorRhsLoad);
- VectorMatchCmp = Builder.CreateSelect(LoopPred, VectorMatchCmp, PFalse);
- Value *VectorMatchHasActiveLanes = Builder.CreateOrReduce(VectorMatchCmp);
- BranchInst *VectorEarlyExit = BranchInst::Create(
- VectorLoopMismatchBlock, VectorLoopIncBlock, VectorMatchHasActiveLanes);
- Builder.Insert(VectorEarlyExit);
-
- DTU.applyUpdates(
- {{DominatorTree::Insert, VectorLoopStartBlock, VectorLoopMismatchBlock},
- {DominatorTree::Insert, VectorLoopStartBlock, VectorLoopIncBlock}});
-
- // Increment the index counter and calculate the predicate for the next
- // iteration of the loop. We branch back to the start of the loop if there
- // is at least one active lane.
- Builder.SetInsertPoint(VectorLoopIncBlock);
- Value *NewVectorIndexPhi =
- Builder.CreateAdd(VectorIndexPhi, VecLen, "",
- /*HasNUW=*/true, /*HasNSW=*/true);
- VectorIndexPhi->addIncoming(NewVectorIndexPhi, VectorLoopIncBlock);
- Value *NewPred =
- Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
- {PredVTy, I64Type}, {NewVectorIndexPhi, ExtEnd});
- LoopPred->addIncoming(NewPred, VectorLoopIncBlock);
-
- Value *PredHasActiveLanes =
- Builder.CreateExtractElement(NewPred, uint64_t(0));
- BranchInst *VectorLoopBranchBack =
- BranchInst::Create(VectorLoopStartBlock, EndBlock, PredHasActiveLanes);
- Builder.Insert(VectorLoopBranchBack);
-
- DTU.applyUpdates(
- {{DominatorTree::Insert, VectorLoopIncBlock, VectorLoopStartBlock},
- {DominatorTree::Insert, VectorLoopIncBlock, EndBlock}});
-
- // If we found a mismatch then we need to calculate which lane in the vector
- // had a mismatch and add that on to the current loop index.
- Builder.SetInsertPoint(VectorLoopMismatchBlock);
- PHINode *FoundPred = Builder.CreatePHI(PredVTy, 1, "mismatch_vec_found_pred");
- FoundPred->addIncoming(VectorMatchCmp, VectorLoopStartBlock);
- PHINode *LastLoopPred =
- Builder.CreatePHI(PredVTy, 1, "mismatch_vec_last_loop_pred");
- LastLoopPred->addIncoming(LoopPred, VectorLoopStartBlock);
- PHINode *VectorFoundIndex =
- Builder.CreatePHI(I64Type, 1, "mismatch_vec_found_index");
- VectorFoundIndex->addIncoming(VectorIndexPhi, VectorLoopStartBlock);
-
- Value *PredMatchCmp = Builder.CreateAnd(LastLoopPred, FoundPred);
- Value *Ctz = Builder.CreateIntrinsic(
- Intrinsic::experimental_cttz_elts, {ResType, PredMatchCmp->getType()},
- {PredMatchCmp, /*ZeroIsPoison=*/Builder.getInt1(true)});
- Ctz = Builder.CreateZExt(Ctz, I64Type);
- Value *VectorLoopRes64 = Builder.CreateAdd(VectorFoundIndex, Ctz, "",
- /*HasNUW=*/true, /*HasNSW=*/true);
- Value *VectorLoopRes = Builder.CreateTrunc(VectorLoopRes64, ResType);
+ Value *VectorLoopRes =
+ createMaskedFindMismatch(Builder, DTU, GEPA, GEPB, ExtStart, ExtEnd);
Builder.Insert(BranchInst::Create(EndBlock));
>From a355c2d07464f020c9a66cbd6189c22a42c2be2e Mon Sep 17 00:00:00 2001
From: Shan Huang <52285902006 at stu.ecnu.edu.cn>
Date: Wed, 3 Jul 2024 09:39:17 +0800
Subject: [PATCH 061/246] [DebugInfo][InferAddressSpaces] Fix the missing debug
location update for the new addrspacecast (#97038)
Fix #97006 .
---
.../Transforms/Scalar/InferAddressSpaces.cpp | 1 +
.../preserving-debugloc-addrspacecast.ll | 33 +++++++++++++++++++
2 files changed, 34 insertions(+)
create mode 100644 llvm/test/Transforms/InferAddressSpaces/AMDGPU/preserving-debugloc-addrspacecast.ll
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 5074d049d2521..c9be8ee00cdc7 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -642,6 +642,7 @@ Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
Type *NewPtrTy = getPtrOrVecOfPtrsWithNewAS(I->getType(), AS);
auto *NewI = new AddrSpaceCastInst(I, NewPtrTy);
NewI->insertAfter(I);
+ NewI->setDebugLoc(I->getDebugLoc());
return NewI;
}
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/preserving-debugloc-addrspacecast.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/preserving-debugloc-addrspacecast.ll
new file mode 100644
index 0000000000000..b60d7d7da088c
--- /dev/null
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/preserving-debugloc-addrspacecast.ll
@@ -0,0 +1,33 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=infer-address-spaces -o - %s | FileCheck %s
+
+; Check that InferAddressSpaces's cloneInstructionWithNewAddressSpace() propagates
+; the debug location to new addrspacecast instruction which casts `%p` in the following test.
+
+ at c0 = addrspace(4) global ptr poison
+
+define float @generic_ptr_from_constant() !dbg !5 {
+; CHECK-LABEL: define float @generic_ptr_from_constant(
+; CHECK: [[TMP1:%.*]] = addrspacecast ptr [[P:%.*]] to ptr addrspace(1), !dbg [[DBG8:![0-9]+]]
+;
+ %p = load ptr, ptr addrspace(4) @c0, align 8, !dbg !8
+ %v = load float, ptr %p, align 4, !dbg !9
+ ret float %v, !dbg !10
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!2, !3}
+!llvm.module.flags = !{!4}
+
+; CHECK: [[DBG8]] = !DILocation(line: 1,
+;
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = !DIFile(filename: "temp.ll", directory: "/")
+!2 = !{i32 3}
+!3 = !{i32 0}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "generic_ptr_from_constant", linkageName: "generic_ptr_from_constant", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!6 = !DISubroutineType(types: !7)
+!7 = !{}
+!8 = !DILocation(line: 1, column: 1, scope: !5)
+!9 = !DILocation(line: 2, column: 1, scope: !5)
+!10 = !DILocation(line: 3, column: 1, scope: !5)
>From 8b55d342b6375ec64985272020d21f29422dce6a Mon Sep 17 00:00:00 2001
From: Min-Yih Hsu <min.hsu at sifive.com>
Date: Tue, 2 Jul 2024 18:48:28 -0700
Subject: [PATCH 062/246] [RISCV][LoopIdiomVectorize] Support VP intrinsics in
LoopIdiomVectorize (#94082)
Teach LoopIdiomVectorize to use VP intrinsics to replace the byte
compare loops. Right now only RISC-V uses LoopIdiomVectorize of this
style.
---
.../Transforms/Vectorize/LoopIdiomVectorize.h | 17 +-
llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 9 +
llvm/lib/Target/RISCV/RISCVTargetMachine.h | 1 +
.../Target/RISCV/RISCVTargetTransformInfo.h | 2 +
.../Vectorize/LoopIdiomVectorize.cpp | 178 +-
.../RISCV/rvv/vfirst-byte-compare-index.ll | 177 ++
.../LoopIdiom/RISCV/byte-compare-index.ll | 2309 +++++++++++++++++
7 files changed, 2674 insertions(+), 19 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfirst-byte-compare-index.ll
create mode 100644 llvm/test/Transforms/LoopIdiom/RISCV/byte-compare-index.ll
diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopIdiomVectorize.h b/llvm/include/llvm/Transforms/Vectorize/LoopIdiomVectorize.h
index 56f44b7dc6b2a..ef6e0e0687809 100644
--- a/llvm/include/llvm/Transforms/Vectorize/LoopIdiomVectorize.h
+++ b/llvm/include/llvm/Transforms/Vectorize/LoopIdiomVectorize.h
@@ -13,7 +13,22 @@
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
-struct LoopIdiomVectorizePass : PassInfoMixin<LoopIdiomVectorizePass> {
+enum class LoopIdiomVectorizeStyle { Masked, Predicated };
+
+class LoopIdiomVectorizePass : public PassInfoMixin<LoopIdiomVectorizePass> {
+ LoopIdiomVectorizeStyle VectorizeStyle = LoopIdiomVectorizeStyle::Masked;
+
+ // The VF used in vectorizing the byte compare pattern.
+ unsigned ByteCompareVF = 16;
+
+public:
+ LoopIdiomVectorizePass() = default;
+ explicit LoopIdiomVectorizePass(LoopIdiomVectorizeStyle S)
+ : VectorizeStyle(S) {}
+
+ LoopIdiomVectorizePass(LoopIdiomVectorizeStyle S, unsigned BCVF)
+ : VectorizeStyle(S), ByteCompareVF(BCVF) {}
+
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index f76aef742290c..c132a6ef9611c 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -33,10 +33,12 @@
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/InitializePasses.h"
#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Passes/PassBuilder.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Vectorize/LoopIdiomVectorize.h"
#include <optional>
using namespace llvm;
@@ -572,6 +574,13 @@ void RISCVPassConfig::addPostRegAlloc() {
addPass(createRISCVRedundantCopyEliminationPass());
}
+void RISCVTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) {
+ PB.registerLateLoopOptimizationsEPCallback([=](LoopPassManager &LPM,
+ OptimizationLevel Level) {
+ LPM.addPass(LoopIdiomVectorizePass(LoopIdiomVectorizeStyle::Predicated));
+ });
+}
+
yaml::MachineFunctionInfo *
RISCVTargetMachine::createDefaultFuncInfoYAML() const {
return new yaml::RISCVMachineFunctionInfo();
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.h b/llvm/lib/Target/RISCV/RISCVTargetMachine.h
index 68dfb3c81f2fe..ce7b7907e1f3a 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.h
@@ -59,6 +59,7 @@ class RISCVTargetMachine : public LLVMTargetMachine {
PerFunctionMIParsingState &PFS,
SMDiagnostic &Error,
SMRange &SourceRange) const override;
+ void registerPassBuilderCallbacks(PassBuilder &PB) override;
};
} // namespace llvm
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index c4d10aada1f4c..9c37a4f6ec2d0 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -397,6 +397,8 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
bool shouldFoldTerminatingConditionAfterLSR() const {
return true;
}
+
+ std::optional<unsigned> getMinPageSize() const { return 4096; }
};
} // end namespace llvm
diff --git a/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
index c7a8700e14531..64e04cae2773f 100644
--- a/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
@@ -59,19 +59,34 @@ static cl::opt<bool> DisableAll("disable-loop-idiom-vectorize-all", cl::Hidden,
cl::init(false),
cl::desc("Disable Loop Idiom Vectorize Pass."));
+static cl::opt<LoopIdiomVectorizeStyle>
+ LITVecStyle("loop-idiom-vectorize-style", cl::Hidden,
+ cl::desc("The vectorization style for loop idiom transform."),
+ cl::values(clEnumValN(LoopIdiomVectorizeStyle::Masked, "masked",
+ "Use masked vector intrinsics"),
+ clEnumValN(LoopIdiomVectorizeStyle::Predicated,
+ "predicated", "Use VP intrinsics")),
+ cl::init(LoopIdiomVectorizeStyle::Masked));
+
static cl::opt<bool>
DisableByteCmp("disable-loop-idiom-vectorize-bytecmp", cl::Hidden,
cl::init(false),
cl::desc("Proceed with Loop Idiom Vectorize Pass, but do "
"not convert byte-compare loop(s)."));
+static cl::opt<unsigned>
+ ByteCmpVF("loop-idiom-vectorize-bytecmp-vf", cl::Hidden,
+ cl::desc("The vectorization factor for byte-compare patterns."),
+ cl::init(16));
+
static cl::opt<bool>
VerifyLoops("loop-idiom-vectorize-verify", cl::Hidden, cl::init(false),
cl::desc("Verify loops generated Loop Idiom Vectorize Pass."));
namespace {
-
class LoopIdiomVectorize {
+ LoopIdiomVectorizeStyle VectorizeStyle;
+ unsigned ByteCompareVF;
Loop *CurLoop = nullptr;
DominatorTree *DT;
LoopInfo *LI;
@@ -86,10 +101,11 @@ class LoopIdiomVectorize {
BasicBlock *VectorLoopIncBlock = nullptr;
public:
- explicit LoopIdiomVectorize(DominatorTree *DT, LoopInfo *LI,
- const TargetTransformInfo *TTI,
- const DataLayout *DL)
- : DT(DT), LI(LI), TTI(TTI), DL(DL) {}
+ LoopIdiomVectorize(LoopIdiomVectorizeStyle S, unsigned VF, DominatorTree *DT,
+ LoopInfo *LI, const TargetTransformInfo *TTI,
+ const DataLayout *DL)
+ : VectorizeStyle(S), ByteCompareVF(VF), DT(DT), LI(LI), TTI(TTI), DL(DL) {
+ }
bool run(Loop *L);
@@ -111,6 +127,10 @@ class LoopIdiomVectorize {
GetElementPtrInst *GEPA,
GetElementPtrInst *GEPB, Value *ExtStart,
Value *ExtEnd);
+ Value *createPredicatedFindMismatch(IRBuilder<> &Builder, DomTreeUpdater &DTU,
+ GetElementPtrInst *GEPA,
+ GetElementPtrInst *GEPB, Value *ExtStart,
+ Value *ExtEnd);
void transformByteCompare(GetElementPtrInst *GEPA, GetElementPtrInst *GEPB,
PHINode *IndPhi, Value *MaxLen, Instruction *Index,
@@ -128,8 +148,16 @@ PreservedAnalyses LoopIdiomVectorizePass::run(Loop &L, LoopAnalysisManager &AM,
const auto *DL = &L.getHeader()->getDataLayout();
- LoopIdiomVectorize LIT(&AR.DT, &AR.LI, &AR.TTI, DL);
- if (!LIT.run(&L))
+ LoopIdiomVectorizeStyle VecStyle = VectorizeStyle;
+ if (LITVecStyle.getNumOccurrences())
+ VecStyle = LITVecStyle;
+
+ unsigned BCVF = ByteCompareVF;
+ if (ByteCmpVF.getNumOccurrences())
+ BCVF = ByteCmpVF;
+
+ LoopIdiomVectorize LIV(VecStyle, BCVF, &AR.DT, &AR.LI, &AR.TTI, DL);
+ if (!LIV.run(&L))
return PreservedAnalyses::all();
return PreservedAnalyses::none();
@@ -354,20 +382,16 @@ Value *LoopIdiomVectorize::createMaskedFindMismatch(
Value *PtrA = GEPA->getPointerOperand();
Value *PtrB = GEPB->getPointerOperand();
- // At this point we know two things must be true:
- // 1. Start <= End
- // 2. ExtMaxLen <= MinPageSize due to the page checks.
- // Therefore, we know that we can use a 64-bit induction variable that
- // starts from 0 -> ExtMaxLen and it will not overflow.
ScalableVectorType *PredVTy =
- ScalableVectorType::get(Builder.getInt1Ty(), 16);
+ ScalableVectorType::get(Builder.getInt1Ty(), ByteCompareVF);
Value *InitialPred = Builder.CreateIntrinsic(
Intrinsic::get_active_lane_mask, {PredVTy, I64Type}, {ExtStart, ExtEnd});
Value *VecLen = Builder.CreateIntrinsic(Intrinsic::vscale, {I64Type}, {});
- VecLen = Builder.CreateMul(VecLen, ConstantInt::get(I64Type, 16), "",
- /*HasNUW=*/true, /*HasNSW=*/true);
+ VecLen =
+ Builder.CreateMul(VecLen, ConstantInt::get(I64Type, ByteCompareVF), "",
+ /*HasNUW=*/true, /*HasNSW=*/true);
Value *PFalse = Builder.CreateVectorSplat(PredVTy->getElementCount(),
Builder.getInt1(false));
@@ -385,7 +409,8 @@ Value *LoopIdiomVectorize::createMaskedFindMismatch(
LoopPred->addIncoming(InitialPred, VectorLoopPreheaderBlock);
PHINode *VectorIndexPhi = Builder.CreatePHI(I64Type, 2, "mismatch_vec_index");
VectorIndexPhi->addIncoming(ExtStart, VectorLoopPreheaderBlock);
- Type *VectorLoadType = ScalableVectorType::get(Builder.getInt8Ty(), 16);
+ Type *VectorLoadType =
+ ScalableVectorType::get(Builder.getInt8Ty(), ByteCompareVF);
Value *Passthru = ConstantInt::getNullValue(VectorLoadType);
Value *VectorLhsGep =
@@ -454,6 +479,109 @@ Value *LoopIdiomVectorize::createMaskedFindMismatch(
return Builder.CreateTrunc(VectorLoopRes64, ResType);
}
+Value *LoopIdiomVectorize::createPredicatedFindMismatch(
+ IRBuilder<> &Builder, DomTreeUpdater &DTU, GetElementPtrInst *GEPA,
+ GetElementPtrInst *GEPB, Value *ExtStart, Value *ExtEnd) {
+ Type *I64Type = Builder.getInt64Ty();
+ Type *I32Type = Builder.getInt32Ty();
+ Type *ResType = I32Type;
+ Type *LoadType = Builder.getInt8Ty();
+ Value *PtrA = GEPA->getPointerOperand();
+ Value *PtrB = GEPB->getPointerOperand();
+
+ auto *JumpToVectorLoop = BranchInst::Create(VectorLoopStartBlock);
+ Builder.Insert(JumpToVectorLoop);
+
+ DTU.applyUpdates({{DominatorTree::Insert, VectorLoopPreheaderBlock,
+ VectorLoopStartBlock}});
+
+ // Set up the first Vector loop block by creating the PHIs, doing the vector
+ // loads and comparing the vectors.
+ Builder.SetInsertPoint(VectorLoopStartBlock);
+ auto *VectorIndexPhi = Builder.CreatePHI(I64Type, 2, "mismatch_vector_index");
+ VectorIndexPhi->addIncoming(ExtStart, VectorLoopPreheaderBlock);
+
+ // Calculate AVL by subtracting the vector loop index from the trip count
+ Value *AVL = Builder.CreateSub(ExtEnd, VectorIndexPhi, "avl", /*HasNUW=*/true,
+ /*HasNSW=*/true);
+
+ auto *VectorLoadType = ScalableVectorType::get(LoadType, ByteCompareVF);
+ auto *VF = ConstantInt::get(I32Type, ByteCompareVF);
+
+ Value *VL = Builder.CreateIntrinsic(Intrinsic::experimental_get_vector_length,
+ {I64Type}, {AVL, VF, Builder.getTrue()});
+ Value *GepOffset = VectorIndexPhi;
+
+ Value *VectorLhsGep =
+ Builder.CreateGEP(LoadType, PtrA, GepOffset, "", GEPA->isInBounds());
+ VectorType *TrueMaskTy =
+ VectorType::get(Builder.getInt1Ty(), VectorLoadType->getElementCount());
+ Value *AllTrueMask = Constant::getAllOnesValue(TrueMaskTy);
+ Value *VectorLhsLoad = Builder.CreateIntrinsic(
+ Intrinsic::vp_load, {VectorLoadType, VectorLhsGep->getType()},
+ {VectorLhsGep, AllTrueMask, VL}, nullptr, "lhs.load");
+
+ Value *VectorRhsGep =
+ Builder.CreateGEP(LoadType, PtrB, GepOffset, "", GEPB->isInBounds());
+ Value *VectorRhsLoad = Builder.CreateIntrinsic(
+ Intrinsic::vp_load, {VectorLoadType, VectorLhsGep->getType()},
+ {VectorRhsGep, AllTrueMask, VL}, nullptr, "rhs.load");
+
+ StringRef PredicateStr = CmpInst::getPredicateName(CmpInst::ICMP_NE);
+ auto *PredicateMDS = MDString::get(VectorLhsLoad->getContext(), PredicateStr);
+ Value *Pred = MetadataAsValue::get(VectorLhsLoad->getContext(), PredicateMDS);
+ Value *VectorMatchCmp = Builder.CreateIntrinsic(
+ Intrinsic::vp_icmp, {VectorLhsLoad->getType()},
+ {VectorLhsLoad, VectorRhsLoad, Pred, AllTrueMask, VL}, nullptr,
+ "mismatch.cmp");
+ Value *CTZ = Builder.CreateIntrinsic(
+ Intrinsic::vp_cttz_elts, {ResType, VectorMatchCmp->getType()},
+ {VectorMatchCmp, /*ZeroIsPoison=*/Builder.getInt1(false), AllTrueMask,
+ VL});
+ Value *MismatchFound = Builder.CreateICmpNE(CTZ, VL);
+ auto *VectorEarlyExit = BranchInst::Create(VectorLoopMismatchBlock,
+ VectorLoopIncBlock, MismatchFound);
+ Builder.Insert(VectorEarlyExit);
+
+ DTU.applyUpdates(
+ {{DominatorTree::Insert, VectorLoopStartBlock, VectorLoopMismatchBlock},
+ {DominatorTree::Insert, VectorLoopStartBlock, VectorLoopIncBlock}});
+
+ // Increment the index counter and calculate the predicate for the next
+ // iteration of the loop. We branch back to the start of the loop if there
+ // is at least one active lane.
+ Builder.SetInsertPoint(VectorLoopIncBlock);
+ Value *VL64 = Builder.CreateZExt(VL, I64Type);
+ Value *NewVectorIndexPhi =
+ Builder.CreateAdd(VectorIndexPhi, VL64, "",
+ /*HasNUW=*/true, /*HasNSW=*/true);
+ VectorIndexPhi->addIncoming(NewVectorIndexPhi, VectorLoopIncBlock);
+ Value *ExitCond = Builder.CreateICmpNE(NewVectorIndexPhi, ExtEnd);
+ auto *VectorLoopBranchBack =
+ BranchInst::Create(VectorLoopStartBlock, EndBlock, ExitCond);
+ Builder.Insert(VectorLoopBranchBack);
+
+ DTU.applyUpdates(
+ {{DominatorTree::Insert, VectorLoopIncBlock, VectorLoopStartBlock},
+ {DominatorTree::Insert, VectorLoopIncBlock, EndBlock}});
+
+ // If we found a mismatch then we need to calculate which lane in the vector
+ // had a mismatch and add that on to the current loop index.
+ Builder.SetInsertPoint(VectorLoopMismatchBlock);
+
+ // Add LCSSA phis for CTZ and VectorIndexPhi.
+ auto *CTZLCSSAPhi = Builder.CreatePHI(CTZ->getType(), 1, "ctz");
+ CTZLCSSAPhi->addIncoming(CTZ, VectorLoopStartBlock);
+ auto *VectorIndexLCSSAPhi =
+ Builder.CreatePHI(VectorIndexPhi->getType(), 1, "mismatch_vector_index");
+ VectorIndexLCSSAPhi->addIncoming(VectorIndexPhi, VectorLoopStartBlock);
+
+ Value *CTZI64 = Builder.CreateZExt(CTZLCSSAPhi, I64Type);
+ Value *VectorLoopRes64 = Builder.CreateAdd(VectorIndexLCSSAPhi, CTZI64, "",
+ /*HasNUW=*/true, /*HasNSW=*/true);
+ return Builder.CreateTrunc(VectorLoopRes64, ResType);
+}
+
Value *LoopIdiomVectorize::expandFindMismatch(
IRBuilder<> &Builder, DomTreeUpdater &DTU, GetElementPtrInst *GEPA,
GetElementPtrInst *GEPB, Instruction *Index, Value *Start, Value *MaxLen) {
@@ -613,8 +741,22 @@ Value *LoopIdiomVectorize::expandFindMismatch(
// processed in each iteration, etc.
Builder.SetInsertPoint(VectorLoopPreheaderBlock);
- Value *VectorLoopRes =
- createMaskedFindMismatch(Builder, DTU, GEPA, GEPB, ExtStart, ExtEnd);
+ // At this point we know two things must be true:
+ // 1. Start <= End
+ // 2. ExtMaxLen <= MinPageSize due to the page checks.
+ // Therefore, we know that we can use a 64-bit induction variable that
+ // starts from 0 -> ExtMaxLen and it will not overflow.
+ Value *VectorLoopRes = nullptr;
+ switch (VectorizeStyle) {
+ case LoopIdiomVectorizeStyle::Masked:
+ VectorLoopRes =
+ createMaskedFindMismatch(Builder, DTU, GEPA, GEPB, ExtStart, ExtEnd);
+ break;
+ case LoopIdiomVectorizeStyle::Predicated:
+ VectorLoopRes = createPredicatedFindMismatch(Builder, DTU, GEPA, GEPB,
+ ExtStart, ExtEnd);
+ break;
+ }
Builder.Insert(BranchInst::Create(EndBlock));
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst-byte-compare-index.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst-byte-compare-index.ll
new file mode 100644
index 0000000000000..3107d4e044cae
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst-byte-compare-index.ll
@@ -0,0 +1,177 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s
+
+; Testing VFIRST patterns related to llvm/test/Transforms/LoopIdiom/RISCV/byte-compare-index.ll
+
+define i32 @compare_bytes_simple(ptr %a, ptr %b, i32 signext %len, i32 signext %n) {
+; CHECK-LABEL: compare_bytes_simple:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addiw a4, a2, 1
+; CHECK-NEXT: bltu a3, a4, .LBB0_7
+; CHECK-NEXT: # %bb.1: # %mismatch_mem_check
+; CHECK-NEXT: slli a2, a4, 32
+; CHECK-NEXT: srli a2, a2, 32
+; CHECK-NEXT: slli a5, a3, 32
+; CHECK-NEXT: srli a5, a5, 32
+; CHECK-NEXT: add a6, a0, a2
+; CHECK-NEXT: add a7, a0, a5
+; CHECK-NEXT: srli a6, a6, 12
+; CHECK-NEXT: srli a7, a7, 12
+; CHECK-NEXT: bne a6, a7, .LBB0_7
+; CHECK-NEXT: # %bb.2: # %mismatch_mem_check
+; CHECK-NEXT: add a6, a1, a2
+; CHECK-NEXT: add a7, a1, a5
+; CHECK-NEXT: srli a6, a6, 12
+; CHECK-NEXT: srli a7, a7, 12
+; CHECK-NEXT: bne a6, a7, .LBB0_7
+; CHECK-NEXT: .LBB0_3: # %mismatch_vec_loop
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: sub a4, a5, a2
+; CHECK-NEXT: vsetvli a4, a4, e8, m2, ta, ma
+; CHECK-NEXT: add a6, a0, a2
+; CHECK-NEXT: vle8.v v8, (a6)
+; CHECK-NEXT: add a6, a1, a2
+; CHECK-NEXT: vle8.v v10, (a6)
+; CHECK-NEXT: vmsne.vv v12, v8, v10
+; CHECK-NEXT: vfirst.m a7, v12
+; CHECK-NEXT: mv a6, a4
+; CHECK-NEXT: bltz a7, .LBB0_5
+; CHECK-NEXT: # %bb.4: # %mismatch_vec_loop
+; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: mv a6, a7
+; CHECK-NEXT: .LBB0_5: # %mismatch_vec_loop
+; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: sext.w a7, a6
+; CHECK-NEXT: bne a7, a4, .LBB0_11
+; CHECK-NEXT: # %bb.6: # %mismatch_vec_loop_inc
+; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: add a2, a2, a4
+; CHECK-NEXT: bne a2, a5, .LBB0_3
+; CHECK-NEXT: j .LBB0_9
+; CHECK-NEXT: .LBB0_7: # %mismatch_loop
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: slli a2, a4, 32
+; CHECK-NEXT: srli a2, a2, 32
+; CHECK-NEXT: add a5, a0, a2
+; CHECK-NEXT: lbu a5, 0(a5)
+; CHECK-NEXT: add a2, a1, a2
+; CHECK-NEXT: lbu a2, 0(a2)
+; CHECK-NEXT: bne a5, a2, .LBB0_10
+; CHECK-NEXT: # %bb.8: # %mismatch_loop_inc
+; CHECK-NEXT: # in Loop: Header=BB0_7 Depth=1
+; CHECK-NEXT: addiw a4, a4, 1
+; CHECK-NEXT: bne a3, a4, .LBB0_7
+; CHECK-NEXT: .LBB0_9: # %while.end
+; CHECK-NEXT: mv a0, a3
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB0_10:
+; CHECK-NEXT: mv a0, a4
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB0_11: # %mismatch_vec_loop_found
+; CHECK-NEXT: slli a6, a6, 32
+; CHECK-NEXT: srli a3, a6, 32
+; CHECK-NEXT: add a0, a2, a3
+; CHECK-NEXT: ret
+entry:
+ %0 = add i32 %len, 1
+ br label %mismatch_min_it_check
+
+mismatch_min_it_check: ; preds = %entry
+ %1 = zext i32 %0 to i64
+ %2 = zext i32 %n to i64
+ %3 = icmp ule i32 %0, %n
+ br i1 %3, label %mismatch_mem_check, label %mismatch_loop_pre
+
+mismatch_mem_check: ; preds = %mismatch_min_it_check
+ %4 = getelementptr i8, ptr %a, i64 %1
+ %5 = getelementptr i8, ptr %b, i64 %1
+ %6 = ptrtoint ptr %5 to i64
+ %7 = ptrtoint ptr %4 to i64
+ %8 = getelementptr i8, ptr %a, i64 %2
+ %9 = getelementptr i8, ptr %b, i64 %2
+ %10 = ptrtoint ptr %8 to i64
+ %11 = ptrtoint ptr %9 to i64
+ %12 = lshr i64 %7, 12
+ %13 = lshr i64 %10, 12
+ %14 = lshr i64 %6, 12
+ %15 = lshr i64 %11, 12
+ %16 = icmp ne i64 %12, %13
+ %17 = icmp ne i64 %14, %15
+ %18 = or i1 %16, %17
+ br i1 %18, label %mismatch_loop_pre, label %mismatch_vec_loop_preheader
+
+mismatch_vec_loop_preheader: ; preds = %mismatch_mem_check
+ br label %mismatch_vec_loop
+
+mismatch_vec_loop: ; preds = %mismatch_vec_loop_inc, %mismatch_vec_loop_preheader
+ %mismatch_vector_index = phi i64 [ %1, %mismatch_vec_loop_preheader ], [ %25, %mismatch_vec_loop_inc ]
+ %avl = sub nuw nsw i64 %2, %mismatch_vector_index
+ %19 = call i32 @llvm.experimental.get.vector.length.i64(i64 %avl, i32 16, i1 true)
+ %20 = getelementptr inbounds i8, ptr %a, i64 %mismatch_vector_index
+ %lhs.load = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr %20, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 %19)
+ %21 = getelementptr inbounds i8, ptr %b, i64 %mismatch_vector_index
+ %rhs.load = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr %21, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 %19)
+ %mismatch.cmp = call <vscale x 16 x i1> @llvm.vp.icmp.nxv16i8(<vscale x 16 x i8> %lhs.load, <vscale x 16 x i8> %rhs.load, metadata !"ne", <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 %19)
+ %22 = call i32 @llvm.vp.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> %mismatch.cmp, i1 false, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 %19)
+ %23 = icmp ne i32 %22, %19
+ br i1 %23, label %mismatch_vec_loop_found, label %mismatch_vec_loop_inc
+
+mismatch_vec_loop_inc: ; preds = %mismatch_vec_loop
+ %24 = zext i32 %19 to i64
+ %25 = add nuw nsw i64 %mismatch_vector_index, %24
+ %26 = icmp ne i64 %25, %2
+ br i1 %26, label %mismatch_vec_loop, label %mismatch_end
+
+mismatch_vec_loop_found: ; preds = %mismatch_vec_loop
+ %ctz = phi i32 [ %22, %mismatch_vec_loop ]
+ %mismatch_vector_index1 = phi i64 [ %mismatch_vector_index, %mismatch_vec_loop ]
+ %27 = zext i32 %ctz to i64
+ %28 = add nuw nsw i64 %mismatch_vector_index1, %27
+ %29 = trunc i64 %28 to i32
+ br label %mismatch_end
+
+mismatch_loop_pre: ; preds = %mismatch_mem_check, %mismatch_min_it_check
+ br label %mismatch_loop
+
+mismatch_loop: ; preds = %mismatch_loop_inc, %mismatch_loop_pre
+ %mismatch_index = phi i32 [ %0, %mismatch_loop_pre ], [ %36, %mismatch_loop_inc ]
+ %30 = zext i32 %mismatch_index to i64
+ %31 = getelementptr inbounds i8, ptr %a, i64 %30
+ %32 = load i8, ptr %31, align 1
+ %33 = getelementptr inbounds i8, ptr %b, i64 %30
+ %34 = load i8, ptr %33, align 1
+ %35 = icmp eq i8 %32, %34
+ br i1 %35, label %mismatch_loop_inc, label %mismatch_end
+
+mismatch_loop_inc: ; preds = %mismatch_loop
+ %36 = add i32 %mismatch_index, 1
+ %37 = icmp eq i32 %36, %n
+ br i1 %37, label %mismatch_end, label %mismatch_loop
+
+mismatch_end: ; preds = %mismatch_loop_inc, %mismatch_loop, %mismatch_vec_loop_found, %mismatch_vec_loop_inc
+ %mismatch_result = phi i32 [ %n, %mismatch_loop_inc ], [ %mismatch_index, %mismatch_loop ], [ %n, %mismatch_vec_loop_inc ], [ %29, %mismatch_vec_loop_found ]
+ br i1 true, label %byte.compare, label %while.cond
+
+while.cond: ; preds = %mismatch_end, %while.body
+ %len.addr = phi i32 [ %len, %mismatch_end ], [ %mismatch_result, %while.body ]
+ %inc = add i32 %len.addr, 1
+ %cmp.not = icmp eq i32 %mismatch_result, %n
+ br i1 %cmp.not, label %while.end, label %while.body
+
+while.body: ; preds = %while.cond
+ %idxprom = zext i32 %mismatch_result to i64
+ %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom
+ %38 = load i8, ptr %arrayidx, align 1
+ %arrayidx2 = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ %39 = load i8, ptr %arrayidx2, align 1
+ %cmp.not2 = icmp eq i8 %38, %39
+ br i1 %cmp.not2, label %while.cond, label %while.end
+
+byte.compare: ; preds = %mismatch_end
+ br label %while.end
+
+while.end: ; preds = %byte.compare, %while.body, %while.cond
+ %inc.lcssa = phi i32 [ %mismatch_result, %while.body ], [ %mismatch_result, %while.cond ], [ %mismatch_result, %byte.compare ]
+ ret i32 %inc.lcssa
+}
+
diff --git a/llvm/test/Transforms/LoopIdiom/RISCV/byte-compare-index.ll b/llvm/test/Transforms/LoopIdiom/RISCV/byte-compare-index.ll
new file mode 100644
index 0000000000000..8cf761055bd38
--- /dev/null
+++ b/llvm/test/Transforms/LoopIdiom/RISCV/byte-compare-index.ll
@@ -0,0 +1,2309 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
+; RUN: opt -passes=loop-idiom-vectorize -mtriple=riscv64-unknown-linux-gnu -loop-idiom-vectorize-style=predicated -mattr=+v -S < %s | FileCheck %s
+; RUN: opt -passes=loop-idiom-vectorize -mtriple=riscv64-unknown-linux-gnu -loop-idiom-vectorize-style=predicated -loop-idiom-vectorize-bytecmp-vf=64 -mattr=+v -S < %s | FileCheck %s --check-prefix=LMUL8
+; RUN: opt -passes='loop(loop-idiom-vectorize),simplifycfg' -mtriple=riscv64-unknown-linux-gnu -loop-idiom-vectorize-style=predicated -mattr=+v -S < %s | FileCheck %s --check-prefix=LOOP-DEL
+; RUN: opt -passes=loop-idiom-vectorize -mtriple=riscv64-unknown-linux-gnu -loop-idiom-vectorize-style=masked -mattr=+v -S < %s | FileCheck %s --check-prefix=MASKED
+
+define i32 @compare_bytes_simple(ptr %a, ptr %b, i32 %len, i32 %n) {
+; CHECK-LABEL: define i32 @compare_bytes_simple(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; CHECK-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; CHECK: mismatch_min_it_check:
+; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; CHECK-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK: mismatch_mem_check:
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; CHECK-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; CHECK-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; CHECK-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP_PREHEADER:%.*]], !prof [[PROF1:![0-9]+]]
+; CHECK: mismatch_vec_loop_preheader:
+; CHECK-NEXT: br label [[MISMATCH_VECTOR_LOOP:%.*]]
+; CHECK: mismatch_vec_loop:
+; CHECK-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VECTOR_LOOP_PREHEADER]] ], [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ]
+; CHECK-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP20]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP21]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 16 x i1> @llvm.vp.icmp.nxv16i8(<vscale x 16 x i8> [[LHS_LOAD]], <vscale x 16 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; CHECK-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; CHECK: mismatch_vec_loop_inc:
+; CHECK-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; CHECK-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[MISMATCH_END:%.*]]
+; CHECK: mismatch_vec_loop_found:
+; CHECK-NEXT: [[FIRST1:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; CHECK-NEXT: [[MISMATCH_VECTOR_INDEX2:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; CHECK-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST1]] to i64
+; CHECK-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX2]], [[TMP26]]
+; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; CHECK-NEXT: br label [[MISMATCH_END]]
+; CHECK: mismatch_loop_pre:
+; CHECK-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; CHECK: mismatch_loop:
+; CHECK-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; CHECK-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; CHECK-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; CHECK-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; CHECK-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; CHECK-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; CHECK: mismatch_loop_inc:
+; CHECK-NEXT: [[TMP35]] = add i32 [[MISMATCH_INDEX]], 1
+; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; CHECK-NEXT: br i1 [[TMP36]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; CHECK: mismatch_end:
+; CHECK-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; CHECK-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; CHECK: while.cond:
+; CHECK-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; CHECK-NEXT: [[INC:%.*]] = add i32 [[LEN_ADDR]], 1
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; CHECK: while.body:
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP37:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP38:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP37]], [[TMP38]]
+; CHECK-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; CHECK: byte.compare:
+; CHECK-NEXT: br label [[WHILE_END]]
+; CHECK: while.end:
+; CHECK-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[WHILE_COND]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; CHECK-NEXT: ret i32 [[INC_LCSSA]]
+;
+; LMUL8-LABEL: define i32 @compare_bytes_simple(
+; LMUL8-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; LMUL8-NEXT: entry:
+; LMUL8-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; LMUL8-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; LMUL8: mismatch_min_it_check:
+; LMUL8-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; LMUL8-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; LMUL8-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; LMUL8-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0:![0-9]+]]
+; LMUL8: mismatch_mem_check:
+; LMUL8-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; LMUL8-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; LMUL8-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; LMUL8-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; LMUL8-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; LMUL8-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; LMUL8-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; LMUL8-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; LMUL8-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; LMUL8-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; LMUL8-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; LMUL8-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; LMUL8-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; LMUL8-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; LMUL8-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; LMUL8-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP_PREHEADER:%.*]], !prof [[PROF1:![0-9]+]]
+; LMUL8: mismatch_vec_loop_preheader:
+; LMUL8-NEXT: br label [[MISMATCH_VECTOR_LOOP:%.*]]
+; LMUL8: mismatch_vec_loop:
+; LMUL8-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VECTOR_LOOP_PREHEADER]] ], [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ]
+; LMUL8-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 64, i1 true)
+; LMUL8-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 64 x i8> @llvm.vp.load.nxv64i8.p0(ptr [[TMP20]], <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 64 x i8> @llvm.vp.load.nxv64i8.p0(ptr [[TMP21]], <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 64 x i1> @llvm.vp.icmp.nxv64i8(<vscale x 64 x i8> [[LHS_LOAD]], <vscale x 64 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv64i1(<vscale x 64 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; LMUL8-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; LMUL8: mismatch_vec_loop_inc:
+; LMUL8-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; LMUL8-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; LMUL8-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; LMUL8-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[MISMATCH_END:%.*]]
+; LMUL8: mismatch_vec_loop_found:
+; LMUL8-NEXT: [[FIRST1:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; LMUL8-NEXT: [[MISMATCH_VECTOR_INDEX2:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; LMUL8-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST1]] to i64
+; LMUL8-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX2]], [[TMP26]]
+; LMUL8-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; LMUL8-NEXT: br label [[MISMATCH_END]]
+; LMUL8: mismatch_loop_pre:
+; LMUL8-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; LMUL8: mismatch_loop:
+; LMUL8-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; LMUL8-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; LMUL8-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; LMUL8-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; LMUL8-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; LMUL8-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; LMUL8-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; LMUL8-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; LMUL8: mismatch_loop_inc:
+; LMUL8-NEXT: [[TMP35]] = add i32 [[MISMATCH_INDEX]], 1
+; LMUL8-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; LMUL8-NEXT: br i1 [[TMP36]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; LMUL8: mismatch_end:
+; LMUL8-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; LMUL8-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; LMUL8: while.cond:
+; LMUL8-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; LMUL8-NEXT: [[INC:%.*]] = add i32 [[LEN_ADDR]], 1
+; LMUL8-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; LMUL8-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; LMUL8: while.body:
+; LMUL8-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; LMUL8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP37:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; LMUL8-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP38:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; LMUL8-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP37]], [[TMP38]]
+; LMUL8-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; LMUL8: byte.compare:
+; LMUL8-NEXT: br label [[WHILE_END]]
+; LMUL8: while.end:
+; LMUL8-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[WHILE_COND]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; LMUL8-NEXT: ret i32 [[INC_LCSSA]]
+;
+; LOOP-DEL-LABEL: define i32 @compare_bytes_simple(
+; LOOP-DEL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; LOOP-DEL-NEXT: entry:
+; LOOP-DEL-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; LOOP-DEL-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; LOOP-DEL-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; LOOP-DEL-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; LOOP-DEL-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0:![0-9]+]]
+; LOOP-DEL: mismatch_mem_check:
+; LOOP-DEL-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; LOOP-DEL-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; LOOP-DEL-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; LOOP-DEL-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; LOOP-DEL-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; LOOP-DEL-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; LOOP-DEL-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; LOOP-DEL-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; LOOP-DEL-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; LOOP-DEL-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; LOOP-DEL-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; LOOP-DEL-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; LOOP-DEL-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; LOOP-DEL-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; LOOP-DEL-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; LOOP-DEL-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP:%.*]], !prof [[PROF1:![0-9]+]]
+; LOOP-DEL: mismatch_vec_loop:
+; LOOP-DEL-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ], [ [[TMP1]], [[MISMATCH_MEM_CHECK]] ]
+; LOOP-DEL-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
+; LOOP-DEL-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP20]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP21]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 16 x i1> @llvm.vp.icmp.nxv16i8(<vscale x 16 x i8> [[LHS_LOAD]], <vscale x 16 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; LOOP-DEL-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; LOOP-DEL: mismatch_vec_loop_inc:
+; LOOP-DEL-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; LOOP-DEL-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; LOOP-DEL-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; LOOP-DEL-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[WHILE_END:%.*]]
+; LOOP-DEL: mismatch_vec_loop_found:
+; LOOP-DEL-NEXT: [[FIRST1:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; LOOP-DEL-NEXT: [[MISMATCH_VECTOR_INDEX2:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; LOOP-DEL-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST1]] to i64
+; LOOP-DEL-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX2]], [[TMP26]]
+; LOOP-DEL-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; LOOP-DEL-NEXT: br label [[WHILE_END]]
+; LOOP-DEL: mismatch_loop_pre:
+; LOOP-DEL-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; LOOP-DEL: mismatch_loop:
+; LOOP-DEL-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; LOOP-DEL-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; LOOP-DEL-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; LOOP-DEL-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; LOOP-DEL-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; LOOP-DEL-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; LOOP-DEL-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; LOOP-DEL-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[WHILE_END]]
+; LOOP-DEL: mismatch_loop_inc:
+; LOOP-DEL-NEXT: [[TMP35]] = add i32 [[MISMATCH_INDEX]], 1
+; LOOP-DEL-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; LOOP-DEL-NEXT: br i1 [[TMP36]], label [[WHILE_END]], label [[MISMATCH_LOOP]]
+; LOOP-DEL: while.end:
+; LOOP-DEL-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; LOOP-DEL-NEXT: ret i32 [[MISMATCH_RESULT]]
+;
+; MASKED-LABEL: define i32 @compare_bytes_simple(
+; MASKED-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; MASKED-NEXT: entry:
+; MASKED-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; MASKED-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; MASKED: mismatch_min_it_check:
+; MASKED-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; MASKED-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; MASKED-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; MASKED-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0:![0-9]+]]
+; MASKED: mismatch_mem_check:
+; MASKED-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; MASKED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; MASKED-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; MASKED-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; MASKED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; MASKED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; MASKED-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; MASKED-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP9]] to i64
+; MASKED-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP7]], 12
+; MASKED-NEXT: [[TMP13:%.*]] = lshr i64 [[TMP10]], 12
+; MASKED-NEXT: [[TMP14:%.*]] = lshr i64 [[TMP6]], 12
+; MASKED-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP11]], 12
+; MASKED-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP12]], [[TMP13]]
+; MASKED-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP14]], [[TMP15]]
+; MASKED-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; MASKED-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VEC_LOOP_PREHEADER:%.*]], !prof [[PROF1:![0-9]+]]
+; MASKED: mismatch_vec_loop_preheader:
+; MASKED-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[TMP1]], i64 [[TMP2]])
+; MASKED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
+; MASKED-NEXT: [[TMP21:%.*]] = mul nuw nsw i64 [[TMP20]], 16
+; MASKED-NEXT: br label [[MISMATCH_VEC_LOOP:%.*]]
+; MASKED: mismatch_vec_loop:
+; MASKED-NEXT: [[MISMATCH_VEC_LOOP_PRED:%.*]] = phi <vscale x 16 x i1> [ [[TMP19]], [[MISMATCH_VEC_LOOP_PREHEADER]] ], [ [[TMP30:%.*]], [[MISMATCH_VEC_LOOP_INC:%.*]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VEC_LOOP_PREHEADER]] ], [ [[TMP29:%.*]], [[MISMATCH_VEC_LOOP_INC]] ]
+; MASKED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VEC_INDEX]]
+; MASKED-NEXT: [[TMP23:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP22]], i32 1, <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i8> zeroinitializer)
+; MASKED-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VEC_INDEX]]
+; MASKED-NEXT: [[TMP25:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP24]], i32 1, <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i8> zeroinitializer)
+; MASKED-NEXT: [[TMP26:%.*]] = icmp ne <vscale x 16 x i8> [[TMP23]], [[TMP25]]
+; MASKED-NEXT: [[TMP27:%.*]] = select <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i1> [[TMP26]], <vscale x 16 x i1> zeroinitializer
+; MASKED-NEXT: [[TMP28:%.*]] = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> [[TMP27]])
+; MASKED-NEXT: br i1 [[TMP28]], label [[MISMATCH_VEC_LOOP_FOUND:%.*]], label [[MISMATCH_VEC_LOOP_INC]]
+; MASKED: mismatch_vec_loop_inc:
+; MASKED-NEXT: [[TMP29]] = add nuw nsw i64 [[MISMATCH_VEC_INDEX]], [[TMP21]]
+; MASKED-NEXT: [[TMP30]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[TMP29]], i64 [[TMP2]])
+; MASKED-NEXT: [[TMP31:%.*]] = extractelement <vscale x 16 x i1> [[TMP30]], i64 0
+; MASKED-NEXT: br i1 [[TMP31]], label [[MISMATCH_VEC_LOOP]], label [[MISMATCH_END:%.*]]
+; MASKED: mismatch_vec_loop_found:
+; MASKED-NEXT: [[MISMATCH_VEC_FOUND_PRED:%.*]] = phi <vscale x 16 x i1> [ [[TMP27]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_LAST_LOOP_PRED:%.*]] = phi <vscale x 16 x i1> [ [[MISMATCH_VEC_LOOP_PRED]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_FOUND_INDEX:%.*]] = phi i64 [ [[MISMATCH_VEC_INDEX]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[TMP32:%.*]] = and <vscale x 16 x i1> [[MISMATCH_VEC_LAST_LOOP_PRED]], [[MISMATCH_VEC_FOUND_PRED]]
+; MASKED-NEXT: [[TMP33:%.*]] = call i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[TMP32]], i1 true)
+; MASKED-NEXT: [[TMP34:%.*]] = zext i32 [[TMP33]] to i64
+; MASKED-NEXT: [[TMP35:%.*]] = add nuw nsw i64 [[MISMATCH_VEC_FOUND_INDEX]], [[TMP34]]
+; MASKED-NEXT: [[TMP36:%.*]] = trunc i64 [[TMP35]] to i32
+; MASKED-NEXT: br label [[MISMATCH_END]]
+; MASKED: mismatch_loop_pre:
+; MASKED-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; MASKED: mismatch_loop:
+; MASKED-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP43:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; MASKED-NEXT: [[TMP37:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; MASKED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP37]]
+; MASKED-NEXT: [[TMP39:%.*]] = load i8, ptr [[TMP38]], align 1
+; MASKED-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP37]]
+; MASKED-NEXT: [[TMP41:%.*]] = load i8, ptr [[TMP40]], align 1
+; MASKED-NEXT: [[TMP42:%.*]] = icmp eq i8 [[TMP39]], [[TMP41]]
+; MASKED-NEXT: br i1 [[TMP42]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; MASKED: mismatch_loop_inc:
+; MASKED-NEXT: [[TMP43]] = add i32 [[MISMATCH_INDEX]], 1
+; MASKED-NEXT: [[TMP44:%.*]] = icmp eq i32 [[TMP43]], [[N]]
+; MASKED-NEXT: br i1 [[TMP44]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; MASKED: mismatch_end:
+; MASKED-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VEC_LOOP_INC]] ], [ [[TMP36]], [[MISMATCH_VEC_LOOP_FOUND]] ]
+; MASKED-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; MASKED: while.cond:
+; MASKED-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; MASKED-NEXT: [[INC:%.*]] = add i32 [[LEN_ADDR]], 1
+; MASKED-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; MASKED-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; MASKED: while.body:
+; MASKED-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; MASKED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP45:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; MASKED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP46:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; MASKED-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP45]], [[TMP46]]
+; MASKED-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; MASKED: byte.compare:
+; MASKED-NEXT: br label [[WHILE_END]]
+; MASKED: while.end:
+; MASKED-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[WHILE_COND]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; MASKED-NEXT: ret i32 [[INC_LCSSA]]
+;
+entry:
+ br label %while.cond
+
+while.cond:
+ %len.addr = phi i32 [ %len, %entry ], [ %inc, %while.body ]
+ %inc = add i32 %len.addr, 1
+ %cmp.not = icmp eq i32 %inc, %n
+ br i1 %cmp.not, label %while.end, label %while.body
+
+while.body:
+ %idxprom = zext i32 %inc to i64
+ %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom
+ %0 = load i8, ptr %arrayidx
+ %arrayidx2 = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ %1 = load i8, ptr %arrayidx2
+ %cmp.not2 = icmp eq i8 %0, %1
+ br i1 %cmp.not2, label %while.cond, label %while.end
+
+while.end:
+ %inc.lcssa = phi i32 [ %inc, %while.body ], [ %inc, %while.cond ]
+ ret i32 %inc.lcssa
+}
+
+define i32 @compare_bytes_signed_wrap(ptr %a, ptr %b, i32 %len, i32 %n) {
+; CHECK-LABEL: define i32 @compare_bytes_signed_wrap(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; CHECK-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; CHECK: mismatch_min_it_check:
+; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; CHECK-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; CHECK: mismatch_mem_check:
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; CHECK-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; CHECK-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; CHECK-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; CHECK: mismatch_vec_loop_preheader:
+; CHECK-NEXT: br label [[MISMATCH_VECTOR_LOOP:%.*]]
+; CHECK: mismatch_vec_loop:
+; CHECK-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VECTOR_LOOP_PREHEADER]] ], [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ]
+; CHECK-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP20]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP21]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 16 x i1> @llvm.vp.icmp.nxv16i8(<vscale x 16 x i8> [[LHS_LOAD]], <vscale x 16 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; CHECK-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; CHECK: mismatch_vec_loop_inc:
+; CHECK-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; CHECK-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[MISMATCH_END:%.*]]
+; CHECK: mismatch_vec_loop_found:
+; CHECK-NEXT: [[FIRST1:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; CHECK-NEXT: [[MISMATCH_VECTOR_INDEX2:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; CHECK-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST1]] to i64
+; CHECK-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX2]], [[TMP26]]
+; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; CHECK-NEXT: br label [[MISMATCH_END]]
+; CHECK: mismatch_loop_pre:
+; CHECK-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; CHECK: mismatch_loop:
+; CHECK-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; CHECK-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; CHECK-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; CHECK-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; CHECK-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; CHECK-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; CHECK: mismatch_loop_inc:
+; CHECK-NEXT: [[TMP35]] = add nsw i32 [[MISMATCH_INDEX]], 1
+; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; CHECK-NEXT: br i1 [[TMP36]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; CHECK: mismatch_end:
+; CHECK-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; CHECK-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; CHECK: while.cond:
+; CHECK-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[LEN_ADDR]], 1
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; CHECK: while.body:
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP37:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP38:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP37]], [[TMP38]]
+; CHECK-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; CHECK: byte.compare:
+; CHECK-NEXT: br label [[WHILE_END]]
+; CHECK: while.end:
+; CHECK-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[WHILE_COND]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; CHECK-NEXT: ret i32 [[INC_LCSSA]]
+;
+; LMUL8-LABEL: define i32 @compare_bytes_signed_wrap(
+; LMUL8-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; LMUL8-NEXT: entry:
+; LMUL8-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; LMUL8-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; LMUL8: mismatch_min_it_check:
+; LMUL8-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; LMUL8-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; LMUL8-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; LMUL8-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; LMUL8: mismatch_mem_check:
+; LMUL8-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; LMUL8-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; LMUL8-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; LMUL8-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; LMUL8-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; LMUL8-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; LMUL8-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; LMUL8-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; LMUL8-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; LMUL8-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; LMUL8-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; LMUL8-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; LMUL8-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; LMUL8-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; LMUL8-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; LMUL8-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; LMUL8: mismatch_vec_loop_preheader:
+; LMUL8-NEXT: br label [[MISMATCH_VECTOR_LOOP:%.*]]
+; LMUL8: mismatch_vec_loop:
+; LMUL8-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VECTOR_LOOP_PREHEADER]] ], [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ]
+; LMUL8-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 64, i1 true)
+; LMUL8-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 64 x i8> @llvm.vp.load.nxv64i8.p0(ptr [[TMP20]], <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 64 x i8> @llvm.vp.load.nxv64i8.p0(ptr [[TMP21]], <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 64 x i1> @llvm.vp.icmp.nxv64i8(<vscale x 64 x i8> [[LHS_LOAD]], <vscale x 64 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv64i1(<vscale x 64 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; LMUL8-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; LMUL8: mismatch_vec_loop_inc:
+; LMUL8-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; LMUL8-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; LMUL8-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; LMUL8-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[MISMATCH_END:%.*]]
+; LMUL8: mismatch_vec_loop_found:
+; LMUL8-NEXT: [[FIRST1:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; LMUL8-NEXT: [[MISMATCH_VECTOR_INDEX2:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; LMUL8-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST1]] to i64
+; LMUL8-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX2]], [[TMP26]]
+; LMUL8-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; LMUL8-NEXT: br label [[MISMATCH_END]]
+; LMUL8: mismatch_loop_pre:
+; LMUL8-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; LMUL8: mismatch_loop:
+; LMUL8-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; LMUL8-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; LMUL8-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; LMUL8-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; LMUL8-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; LMUL8-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; LMUL8-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; LMUL8-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; LMUL8: mismatch_loop_inc:
+; LMUL8-NEXT: [[TMP35]] = add nsw i32 [[MISMATCH_INDEX]], 1
+; LMUL8-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; LMUL8-NEXT: br i1 [[TMP36]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; LMUL8: mismatch_end:
+; LMUL8-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; LMUL8-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; LMUL8: while.cond:
+; LMUL8-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; LMUL8-NEXT: [[INC:%.*]] = add nsw i32 [[LEN_ADDR]], 1
+; LMUL8-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; LMUL8-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; LMUL8: while.body:
+; LMUL8-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; LMUL8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP37:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; LMUL8-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP38:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; LMUL8-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP37]], [[TMP38]]
+; LMUL8-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; LMUL8: byte.compare:
+; LMUL8-NEXT: br label [[WHILE_END]]
+; LMUL8: while.end:
+; LMUL8-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[WHILE_COND]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; LMUL8-NEXT: ret i32 [[INC_LCSSA]]
+;
+; LOOP-DEL-LABEL: define i32 @compare_bytes_signed_wrap(
+; LOOP-DEL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; LOOP-DEL-NEXT: entry:
+; LOOP-DEL-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; LOOP-DEL-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; LOOP-DEL-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; LOOP-DEL-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; LOOP-DEL-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; LOOP-DEL: mismatch_mem_check:
+; LOOP-DEL-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; LOOP-DEL-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; LOOP-DEL-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; LOOP-DEL-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; LOOP-DEL-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; LOOP-DEL-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; LOOP-DEL-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; LOOP-DEL-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; LOOP-DEL-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; LOOP-DEL-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; LOOP-DEL-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; LOOP-DEL-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; LOOP-DEL-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; LOOP-DEL-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; LOOP-DEL-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; LOOP-DEL-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP:%.*]], !prof [[PROF1]]
+; LOOP-DEL: mismatch_vec_loop:
+; LOOP-DEL-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ], [ [[TMP1]], [[MISMATCH_MEM_CHECK]] ]
+; LOOP-DEL-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
+; LOOP-DEL-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP20]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP21]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 16 x i1> @llvm.vp.icmp.nxv16i8(<vscale x 16 x i8> [[LHS_LOAD]], <vscale x 16 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; LOOP-DEL-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; LOOP-DEL: mismatch_vec_loop_inc:
+; LOOP-DEL-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; LOOP-DEL-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; LOOP-DEL-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; LOOP-DEL-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[WHILE_END:%.*]]
+; LOOP-DEL: mismatch_vec_loop_found:
+; LOOP-DEL-NEXT: [[FIRST1:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; LOOP-DEL-NEXT: [[MISMATCH_VECTOR_INDEX2:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; LOOP-DEL-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST1]] to i64
+; LOOP-DEL-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX2]], [[TMP26]]
+; LOOP-DEL-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; LOOP-DEL-NEXT: br label [[WHILE_END]]
+; LOOP-DEL: mismatch_loop_pre:
+; LOOP-DEL-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; LOOP-DEL: mismatch_loop:
+; LOOP-DEL-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; LOOP-DEL-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; LOOP-DEL-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; LOOP-DEL-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; LOOP-DEL-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; LOOP-DEL-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; LOOP-DEL-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; LOOP-DEL-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[WHILE_END]]
+; LOOP-DEL: mismatch_loop_inc:
+; LOOP-DEL-NEXT: [[TMP35]] = add nsw i32 [[MISMATCH_INDEX]], 1
+; LOOP-DEL-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; LOOP-DEL-NEXT: br i1 [[TMP36]], label [[WHILE_END]], label [[MISMATCH_LOOP]]
+; LOOP-DEL: while.end:
+; LOOP-DEL-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; LOOP-DEL-NEXT: ret i32 [[MISMATCH_RESULT]]
+;
+; MASKED-LABEL: define i32 @compare_bytes_signed_wrap(
+; MASKED-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; MASKED-NEXT: entry:
+; MASKED-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; MASKED-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; MASKED: mismatch_min_it_check:
+; MASKED-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; MASKED-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; MASKED-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; MASKED-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; MASKED: mismatch_mem_check:
+; MASKED-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; MASKED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; MASKED-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; MASKED-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; MASKED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; MASKED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; MASKED-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; MASKED-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP9]] to i64
+; MASKED-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP7]], 12
+; MASKED-NEXT: [[TMP13:%.*]] = lshr i64 [[TMP10]], 12
+; MASKED-NEXT: [[TMP14:%.*]] = lshr i64 [[TMP6]], 12
+; MASKED-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP11]], 12
+; MASKED-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP12]], [[TMP13]]
+; MASKED-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP14]], [[TMP15]]
+; MASKED-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; MASKED-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VEC_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; MASKED: mismatch_vec_loop_preheader:
+; MASKED-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[TMP1]], i64 [[TMP2]])
+; MASKED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
+; MASKED-NEXT: [[TMP21:%.*]] = mul nuw nsw i64 [[TMP20]], 16
+; MASKED-NEXT: br label [[MISMATCH_VEC_LOOP:%.*]]
+; MASKED: mismatch_vec_loop:
+; MASKED-NEXT: [[MISMATCH_VEC_LOOP_PRED:%.*]] = phi <vscale x 16 x i1> [ [[TMP19]], [[MISMATCH_VEC_LOOP_PREHEADER]] ], [ [[TMP30:%.*]], [[MISMATCH_VEC_LOOP_INC:%.*]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VEC_LOOP_PREHEADER]] ], [ [[TMP29:%.*]], [[MISMATCH_VEC_LOOP_INC]] ]
+; MASKED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VEC_INDEX]]
+; MASKED-NEXT: [[TMP23:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP22]], i32 1, <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i8> zeroinitializer)
+; MASKED-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VEC_INDEX]]
+; MASKED-NEXT: [[TMP25:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP24]], i32 1, <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i8> zeroinitializer)
+; MASKED-NEXT: [[TMP26:%.*]] = icmp ne <vscale x 16 x i8> [[TMP23]], [[TMP25]]
+; MASKED-NEXT: [[TMP27:%.*]] = select <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i1> [[TMP26]], <vscale x 16 x i1> zeroinitializer
+; MASKED-NEXT: [[TMP28:%.*]] = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> [[TMP27]])
+; MASKED-NEXT: br i1 [[TMP28]], label [[MISMATCH_VEC_LOOP_FOUND:%.*]], label [[MISMATCH_VEC_LOOP_INC]]
+; MASKED: mismatch_vec_loop_inc:
+; MASKED-NEXT: [[TMP29]] = add nuw nsw i64 [[MISMATCH_VEC_INDEX]], [[TMP21]]
+; MASKED-NEXT: [[TMP30]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[TMP29]], i64 [[TMP2]])
+; MASKED-NEXT: [[TMP31:%.*]] = extractelement <vscale x 16 x i1> [[TMP30]], i64 0
+; MASKED-NEXT: br i1 [[TMP31]], label [[MISMATCH_VEC_LOOP]], label [[MISMATCH_END:%.*]]
+; MASKED: mismatch_vec_loop_found:
+; MASKED-NEXT: [[MISMATCH_VEC_FOUND_PRED:%.*]] = phi <vscale x 16 x i1> [ [[TMP27]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_LAST_LOOP_PRED:%.*]] = phi <vscale x 16 x i1> [ [[MISMATCH_VEC_LOOP_PRED]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_FOUND_INDEX:%.*]] = phi i64 [ [[MISMATCH_VEC_INDEX]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[TMP32:%.*]] = and <vscale x 16 x i1> [[MISMATCH_VEC_LAST_LOOP_PRED]], [[MISMATCH_VEC_FOUND_PRED]]
+; MASKED-NEXT: [[TMP33:%.*]] = call i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[TMP32]], i1 true)
+; MASKED-NEXT: [[TMP34:%.*]] = zext i32 [[TMP33]] to i64
+; MASKED-NEXT: [[TMP35:%.*]] = add nuw nsw i64 [[MISMATCH_VEC_FOUND_INDEX]], [[TMP34]]
+; MASKED-NEXT: [[TMP36:%.*]] = trunc i64 [[TMP35]] to i32
+; MASKED-NEXT: br label [[MISMATCH_END]]
+; MASKED: mismatch_loop_pre:
+; MASKED-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; MASKED: mismatch_loop:
+; MASKED-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP43:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; MASKED-NEXT: [[TMP37:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; MASKED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP37]]
+; MASKED-NEXT: [[TMP39:%.*]] = load i8, ptr [[TMP38]], align 1
+; MASKED-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP37]]
+; MASKED-NEXT: [[TMP41:%.*]] = load i8, ptr [[TMP40]], align 1
+; MASKED-NEXT: [[TMP42:%.*]] = icmp eq i8 [[TMP39]], [[TMP41]]
+; MASKED-NEXT: br i1 [[TMP42]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; MASKED: mismatch_loop_inc:
+; MASKED-NEXT: [[TMP43]] = add nsw i32 [[MISMATCH_INDEX]], 1
+; MASKED-NEXT: [[TMP44:%.*]] = icmp eq i32 [[TMP43]], [[N]]
+; MASKED-NEXT: br i1 [[TMP44]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; MASKED: mismatch_end:
+; MASKED-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VEC_LOOP_INC]] ], [ [[TMP36]], [[MISMATCH_VEC_LOOP_FOUND]] ]
+; MASKED-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; MASKED: while.cond:
+; MASKED-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; MASKED-NEXT: [[INC:%.*]] = add nsw i32 [[LEN_ADDR]], 1
+; MASKED-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; MASKED-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; MASKED: while.body:
+; MASKED-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; MASKED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP45:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; MASKED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP46:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; MASKED-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP45]], [[TMP46]]
+; MASKED-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; MASKED: byte.compare:
+; MASKED-NEXT: br label [[WHILE_END]]
+; MASKED: while.end:
+; MASKED-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[WHILE_COND]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; MASKED-NEXT: ret i32 [[INC_LCSSA]]
+;
+; NO-TRANSFORM-LABEL: define i32 @compare_bytes_signed_wrap(
+; NO-TRANSFORM-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) {
+; NO-TRANSFORM-NEXT: entry:
+; NO-TRANSFORM-NEXT: br label [[WHILE_COND:%.*]]
+; NO-TRANSFORM: while.cond:
+; NO-TRANSFORM-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; NO-TRANSFORM-NEXT: [[INC]] = add nsw i32 [[LEN_ADDR]], 1
+; NO-TRANSFORM-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; NO-TRANSFORM: while.body:
+; NO-TRANSFORM-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; NO-TRANSFORM-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; NO-TRANSFORM-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; NO-TRANSFORM-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; NO-TRANSFORM-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; NO-TRANSFORM-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; NO-TRANSFORM: while.end:
+; NO-TRANSFORM-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; NO-TRANSFORM-NEXT: ret i32 [[INC_LCSSA]]
+entry:
+ br label %while.cond
+
+while.cond:
+ %len.addr = phi i32 [ %len, %entry ], [ %inc, %while.body ]
+ %inc = add nsw i32 %len.addr, 1
+ %cmp.not = icmp eq i32 %inc, %n
+ br i1 %cmp.not, label %while.end, label %while.body
+
+while.body:
+ %idxprom = zext i32 %inc to i64
+ %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom
+ %0 = load i8, ptr %arrayidx
+ %arrayidx2 = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ %1 = load i8, ptr %arrayidx2
+ %cmp.not2 = icmp eq i8 %0, %1
+ br i1 %cmp.not2, label %while.cond, label %while.end
+
+while.end:
+ %inc.lcssa = phi i32 [ %inc, %while.body ], [ %inc, %while.cond ]
+ ret i32 %inc.lcssa
+}
+
+
+define i32 @compare_bytes_simple_end_ne_found(ptr %a, ptr %b, ptr %c, ptr %d, i32 %len, i32 %n) {
+; CHECK-LABEL: define i32 @compare_bytes_simple_end_ne_found(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; CHECK-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; CHECK: mismatch_min_it_check:
+; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; CHECK-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; CHECK: mismatch_mem_check:
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; CHECK-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; CHECK-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; CHECK-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; CHECK: mismatch_vec_loop_preheader:
+; CHECK-NEXT: br label [[MISMATCH_VECTOR_LOOP:%.*]]
+; CHECK: mismatch_vec_loop:
+; CHECK-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VECTOR_LOOP_PREHEADER]] ], [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ]
+; CHECK-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP20]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP21]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 16 x i1> @llvm.vp.icmp.nxv16i8(<vscale x 16 x i8> [[LHS_LOAD]], <vscale x 16 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; CHECK-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; CHECK: mismatch_vec_loop_inc:
+; CHECK-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; CHECK-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[MISMATCH_END:%.*]]
+; CHECK: mismatch_vec_loop_found:
+; CHECK-NEXT: [[FIRST1:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; CHECK-NEXT: [[MISMATCH_VECTOR_INDEX2:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; CHECK-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST1]] to i64
+; CHECK-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX2]], [[TMP26]]
+; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; CHECK-NEXT: br label [[MISMATCH_END]]
+; CHECK: mismatch_loop_pre:
+; CHECK-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; CHECK: mismatch_loop:
+; CHECK-NEXT: [[MISMATCH_INDEX3:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; CHECK-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX3]] to i64
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; CHECK-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; CHECK-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; CHECK-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; CHECK-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; CHECK: mismatch_loop_inc:
+; CHECK-NEXT: [[TMP35]] = add i32 [[MISMATCH_INDEX3]], 1
+; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; CHECK-NEXT: br i1 [[TMP36]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; CHECK: mismatch_end:
+; CHECK-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX3]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; CHECK-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; CHECK: while.cond:
+; CHECK-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; CHECK-NEXT: [[INC:%.*]] = add i32 [[LEN_ADDR]], 1
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; CHECK: while.body:
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP37:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP38:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP37]], [[TMP38]]
+; CHECK-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_FOUND:%.*]]
+; CHECK: while.found:
+; CHECK-NEXT: [[MISMATCH_INDEX1:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; CHECK-NEXT: [[FOUND_PTR:%.*]] = phi ptr [ [[C]], [[WHILE_BODY]] ], [ [[C]], [[BYTE_COMPARE]] ]
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: byte.compare:
+; CHECK-NEXT: [[TMP39:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP39]], label [[WHILE_END]], label [[WHILE_FOUND]]
+; CHECK: while.end:
+; CHECK-NEXT: [[MISMATCH_INDEX2:%.*]] = phi i32 [ [[N]], [[WHILE_COND]] ], [ [[N]], [[BYTE_COMPARE]] ]
+; CHECK-NEXT: [[END_PTR:%.*]] = phi ptr [ [[D]], [[WHILE_COND]] ], [ [[D]], [[BYTE_COMPARE]] ]
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[MISMATCH_INDEX1]], [[WHILE_FOUND]] ], [ [[MISMATCH_INDEX2]], [[WHILE_END]] ]
+; CHECK-NEXT: [[STORE_PTR:%.*]] = phi ptr [ [[END_PTR]], [[WHILE_END]] ], [ [[FOUND_PTR]], [[WHILE_FOUND]] ]
+; CHECK-NEXT: store i32 [[MISMATCH_INDEX]], ptr [[STORE_PTR]], align 4
+; CHECK-NEXT: ret i32 [[MISMATCH_INDEX]]
+;
+; LMUL8-LABEL: define i32 @compare_bytes_simple_end_ne_found(
+; LMUL8-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; LMUL8-NEXT: entry:
+; LMUL8-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; LMUL8-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; LMUL8: mismatch_min_it_check:
+; LMUL8-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; LMUL8-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; LMUL8-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; LMUL8-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; LMUL8: mismatch_mem_check:
+; LMUL8-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; LMUL8-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; LMUL8-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; LMUL8-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; LMUL8-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; LMUL8-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; LMUL8-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; LMUL8-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; LMUL8-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; LMUL8-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; LMUL8-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; LMUL8-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; LMUL8-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; LMUL8-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; LMUL8-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; LMUL8-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; LMUL8: mismatch_vec_loop_preheader:
+; LMUL8-NEXT: br label [[MISMATCH_VECTOR_LOOP:%.*]]
+; LMUL8: mismatch_vec_loop:
+; LMUL8-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VECTOR_LOOP_PREHEADER]] ], [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ]
+; LMUL8-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 64, i1 true)
+; LMUL8-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 64 x i8> @llvm.vp.load.nxv64i8.p0(ptr [[TMP20]], <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 64 x i8> @llvm.vp.load.nxv64i8.p0(ptr [[TMP21]], <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 64 x i1> @llvm.vp.icmp.nxv64i8(<vscale x 64 x i8> [[LHS_LOAD]], <vscale x 64 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv64i1(<vscale x 64 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; LMUL8-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; LMUL8: mismatch_vec_loop_inc:
+; LMUL8-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; LMUL8-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; LMUL8-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; LMUL8-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[MISMATCH_END:%.*]]
+; LMUL8: mismatch_vec_loop_found:
+; LMUL8-NEXT: [[FIRST1:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; LMUL8-NEXT: [[MISMATCH_VECTOR_INDEX2:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; LMUL8-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST1]] to i64
+; LMUL8-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX2]], [[TMP26]]
+; LMUL8-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; LMUL8-NEXT: br label [[MISMATCH_END]]
+; LMUL8: mismatch_loop_pre:
+; LMUL8-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; LMUL8: mismatch_loop:
+; LMUL8-NEXT: [[MISMATCH_INDEX3:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; LMUL8-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX3]] to i64
+; LMUL8-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; LMUL8-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; LMUL8-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; LMUL8-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; LMUL8-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; LMUL8-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; LMUL8: mismatch_loop_inc:
+; LMUL8-NEXT: [[TMP35]] = add i32 [[MISMATCH_INDEX3]], 1
+; LMUL8-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; LMUL8-NEXT: br i1 [[TMP36]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; LMUL8: mismatch_end:
+; LMUL8-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX3]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; LMUL8-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; LMUL8: while.cond:
+; LMUL8-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; LMUL8-NEXT: [[INC:%.*]] = add i32 [[LEN_ADDR]], 1
+; LMUL8-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; LMUL8-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; LMUL8: while.body:
+; LMUL8-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; LMUL8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP37:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; LMUL8-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP38:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; LMUL8-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP37]], [[TMP38]]
+; LMUL8-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_FOUND:%.*]]
+; LMUL8: while.found:
+; LMUL8-NEXT: [[MISMATCH_INDEX1:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; LMUL8-NEXT: [[FOUND_PTR:%.*]] = phi ptr [ [[C]], [[WHILE_BODY]] ], [ [[C]], [[BYTE_COMPARE]] ]
+; LMUL8-NEXT: br label [[END:%.*]]
+; LMUL8: byte.compare:
+; LMUL8-NEXT: [[TMP39:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; LMUL8-NEXT: br i1 [[TMP39]], label [[WHILE_END]], label [[WHILE_FOUND]]
+; LMUL8: while.end:
+; LMUL8-NEXT: [[MISMATCH_INDEX2:%.*]] = phi i32 [ [[N]], [[WHILE_COND]] ], [ [[N]], [[BYTE_COMPARE]] ]
+; LMUL8-NEXT: [[END_PTR:%.*]] = phi ptr [ [[D]], [[WHILE_COND]] ], [ [[D]], [[BYTE_COMPARE]] ]
+; LMUL8-NEXT: br label [[END]]
+; LMUL8: end:
+; LMUL8-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[MISMATCH_INDEX1]], [[WHILE_FOUND]] ], [ [[MISMATCH_INDEX2]], [[WHILE_END]] ]
+; LMUL8-NEXT: [[STORE_PTR:%.*]] = phi ptr [ [[END_PTR]], [[WHILE_END]] ], [ [[FOUND_PTR]], [[WHILE_FOUND]] ]
+; LMUL8-NEXT: store i32 [[MISMATCH_INDEX]], ptr [[STORE_PTR]], align 4
+; LMUL8-NEXT: ret i32 [[MISMATCH_INDEX]]
+;
+; LOOP-DEL-LABEL: define i32 @compare_bytes_simple_end_ne_found(
+; LOOP-DEL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; LOOP-DEL-NEXT: entry:
+; LOOP-DEL-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; LOOP-DEL-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; LOOP-DEL-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; LOOP-DEL-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; LOOP-DEL-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; LOOP-DEL: mismatch_mem_check:
+; LOOP-DEL-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; LOOP-DEL-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; LOOP-DEL-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; LOOP-DEL-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; LOOP-DEL-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; LOOP-DEL-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; LOOP-DEL-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; LOOP-DEL-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; LOOP-DEL-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; LOOP-DEL-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; LOOP-DEL-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; LOOP-DEL-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; LOOP-DEL-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; LOOP-DEL-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; LOOP-DEL-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; LOOP-DEL-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP:%.*]], !prof [[PROF1]]
+; LOOP-DEL: mismatch_vec_loop:
+; LOOP-DEL-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ], [ [[TMP1]], [[MISMATCH_MEM_CHECK]] ]
+; LOOP-DEL-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
+; LOOP-DEL-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP20]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP21]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 16 x i1> @llvm.vp.icmp.nxv16i8(<vscale x 16 x i8> [[LHS_LOAD]], <vscale x 16 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; LOOP-DEL-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; LOOP-DEL: mismatch_vec_loop_inc:
+; LOOP-DEL-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; LOOP-DEL-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; LOOP-DEL-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; LOOP-DEL-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[BYTE_COMPARE:%.*]]
+; LOOP-DEL: mismatch_vec_loop_found:
+; LOOP-DEL-NEXT: [[FIRST1:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; LOOP-DEL-NEXT: [[MISMATCH_VECTOR_INDEX2:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; LOOP-DEL-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST1]] to i64
+; LOOP-DEL-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX2]], [[TMP26]]
+; LOOP-DEL-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; LOOP-DEL-NEXT: br label [[BYTE_COMPARE]]
+; LOOP-DEL: mismatch_loop_pre:
+; LOOP-DEL-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; LOOP-DEL: mismatch_loop:
+; LOOP-DEL-NEXT: [[MISMATCH_INDEX3:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; LOOP-DEL-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX3]] to i64
+; LOOP-DEL-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; LOOP-DEL-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; LOOP-DEL-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; LOOP-DEL-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; LOOP-DEL-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; LOOP-DEL-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[BYTE_COMPARE]]
+; LOOP-DEL: mismatch_loop_inc:
+; LOOP-DEL-NEXT: [[TMP35]] = add i32 [[MISMATCH_INDEX3]], 1
+; LOOP-DEL-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; LOOP-DEL-NEXT: br i1 [[TMP36]], label [[BYTE_COMPARE]], label [[MISMATCH_LOOP]]
+; LOOP-DEL: byte.compare:
+; LOOP-DEL-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX3]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; LOOP-DEL-NEXT: [[TMP37:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; LOOP-DEL-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[TMP37]], i32 [[N]], i32 [[MISMATCH_RESULT]]
+; LOOP-DEL-NEXT: [[SPEC_SELECT4:%.*]] = select i1 [[TMP37]], ptr [[D]], ptr [[C]]
+; LOOP-DEL-NEXT: store i32 [[SPEC_SELECT]], ptr [[SPEC_SELECT4]], align 4
+; LOOP-DEL-NEXT: ret i32 [[SPEC_SELECT]]
+;
+; MASKED-LABEL: define i32 @compare_bytes_simple_end_ne_found(
+; MASKED-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; MASKED-NEXT: entry:
+; MASKED-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; MASKED-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; MASKED: mismatch_min_it_check:
+; MASKED-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; MASKED-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; MASKED-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; MASKED-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; MASKED: mismatch_mem_check:
+; MASKED-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; MASKED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; MASKED-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; MASKED-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; MASKED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; MASKED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; MASKED-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; MASKED-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP9]] to i64
+; MASKED-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP7]], 12
+; MASKED-NEXT: [[TMP13:%.*]] = lshr i64 [[TMP10]], 12
+; MASKED-NEXT: [[TMP14:%.*]] = lshr i64 [[TMP6]], 12
+; MASKED-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP11]], 12
+; MASKED-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP12]], [[TMP13]]
+; MASKED-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP14]], [[TMP15]]
+; MASKED-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; MASKED-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VEC_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; MASKED: mismatch_vec_loop_preheader:
+; MASKED-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[TMP1]], i64 [[TMP2]])
+; MASKED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
+; MASKED-NEXT: [[TMP21:%.*]] = mul nuw nsw i64 [[TMP20]], 16
+; MASKED-NEXT: br label [[MISMATCH_VEC_LOOP:%.*]]
+; MASKED: mismatch_vec_loop:
+; MASKED-NEXT: [[MISMATCH_VEC_LOOP_PRED:%.*]] = phi <vscale x 16 x i1> [ [[TMP19]], [[MISMATCH_VEC_LOOP_PREHEADER]] ], [ [[TMP30:%.*]], [[MISMATCH_VEC_LOOP_INC:%.*]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VEC_LOOP_PREHEADER]] ], [ [[TMP29:%.*]], [[MISMATCH_VEC_LOOP_INC]] ]
+; MASKED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VEC_INDEX]]
+; MASKED-NEXT: [[TMP23:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP22]], i32 1, <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i8> zeroinitializer)
+; MASKED-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VEC_INDEX]]
+; MASKED-NEXT: [[TMP25:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP24]], i32 1, <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i8> zeroinitializer)
+; MASKED-NEXT: [[TMP26:%.*]] = icmp ne <vscale x 16 x i8> [[TMP23]], [[TMP25]]
+; MASKED-NEXT: [[TMP27:%.*]] = select <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i1> [[TMP26]], <vscale x 16 x i1> zeroinitializer
+; MASKED-NEXT: [[TMP28:%.*]] = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> [[TMP27]])
+; MASKED-NEXT: br i1 [[TMP28]], label [[MISMATCH_VEC_LOOP_FOUND:%.*]], label [[MISMATCH_VEC_LOOP_INC]]
+; MASKED: mismatch_vec_loop_inc:
+; MASKED-NEXT: [[TMP29]] = add nuw nsw i64 [[MISMATCH_VEC_INDEX]], [[TMP21]]
+; MASKED-NEXT: [[TMP30]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[TMP29]], i64 [[TMP2]])
+; MASKED-NEXT: [[TMP31:%.*]] = extractelement <vscale x 16 x i1> [[TMP30]], i64 0
+; MASKED-NEXT: br i1 [[TMP31]], label [[MISMATCH_VEC_LOOP]], label [[MISMATCH_END:%.*]]
+; MASKED: mismatch_vec_loop_found:
+; MASKED-NEXT: [[MISMATCH_VEC_FOUND_PRED:%.*]] = phi <vscale x 16 x i1> [ [[TMP27]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_LAST_LOOP_PRED:%.*]] = phi <vscale x 16 x i1> [ [[MISMATCH_VEC_LOOP_PRED]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_FOUND_INDEX:%.*]] = phi i64 [ [[MISMATCH_VEC_INDEX]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[TMP32:%.*]] = and <vscale x 16 x i1> [[MISMATCH_VEC_LAST_LOOP_PRED]], [[MISMATCH_VEC_FOUND_PRED]]
+; MASKED-NEXT: [[TMP33:%.*]] = call i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[TMP32]], i1 true)
+; MASKED-NEXT: [[TMP34:%.*]] = zext i32 [[TMP33]] to i64
+; MASKED-NEXT: [[TMP35:%.*]] = add nuw nsw i64 [[MISMATCH_VEC_FOUND_INDEX]], [[TMP34]]
+; MASKED-NEXT: [[TMP36:%.*]] = trunc i64 [[TMP35]] to i32
+; MASKED-NEXT: br label [[MISMATCH_END]]
+; MASKED: mismatch_loop_pre:
+; MASKED-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; MASKED: mismatch_loop:
+; MASKED-NEXT: [[MISMATCH_INDEX3:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP43:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; MASKED-NEXT: [[TMP37:%.*]] = zext i32 [[MISMATCH_INDEX3]] to i64
+; MASKED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP37]]
+; MASKED-NEXT: [[TMP39:%.*]] = load i8, ptr [[TMP38]], align 1
+; MASKED-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP37]]
+; MASKED-NEXT: [[TMP41:%.*]] = load i8, ptr [[TMP40]], align 1
+; MASKED-NEXT: [[TMP42:%.*]] = icmp eq i8 [[TMP39]], [[TMP41]]
+; MASKED-NEXT: br i1 [[TMP42]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; MASKED: mismatch_loop_inc:
+; MASKED-NEXT: [[TMP43]] = add i32 [[MISMATCH_INDEX3]], 1
+; MASKED-NEXT: [[TMP44:%.*]] = icmp eq i32 [[TMP43]], [[N]]
+; MASKED-NEXT: br i1 [[TMP44]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; MASKED: mismatch_end:
+; MASKED-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX3]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VEC_LOOP_INC]] ], [ [[TMP36]], [[MISMATCH_VEC_LOOP_FOUND]] ]
+; MASKED-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; MASKED: while.cond:
+; MASKED-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; MASKED-NEXT: [[INC:%.*]] = add i32 [[LEN_ADDR]], 1
+; MASKED-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; MASKED-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; MASKED: while.body:
+; MASKED-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; MASKED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP45:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; MASKED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP46:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; MASKED-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP45]], [[TMP46]]
+; MASKED-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_FOUND:%.*]]
+; MASKED: while.found:
+; MASKED-NEXT: [[MISMATCH_INDEX1:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; MASKED-NEXT: [[FOUND_PTR:%.*]] = phi ptr [ [[C]], [[WHILE_BODY]] ], [ [[C]], [[BYTE_COMPARE]] ]
+; MASKED-NEXT: br label [[END:%.*]]
+; MASKED: byte.compare:
+; MASKED-NEXT: [[TMP47:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; MASKED-NEXT: br i1 [[TMP47]], label [[WHILE_END]], label [[WHILE_FOUND]]
+; MASKED: while.end:
+; MASKED-NEXT: [[MISMATCH_INDEX2:%.*]] = phi i32 [ [[N]], [[WHILE_COND]] ], [ [[N]], [[BYTE_COMPARE]] ]
+; MASKED-NEXT: [[END_PTR:%.*]] = phi ptr [ [[D]], [[WHILE_COND]] ], [ [[D]], [[BYTE_COMPARE]] ]
+; MASKED-NEXT: br label [[END]]
+; MASKED: end:
+; MASKED-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[MISMATCH_INDEX1]], [[WHILE_FOUND]] ], [ [[MISMATCH_INDEX2]], [[WHILE_END]] ]
+; MASKED-NEXT: [[STORE_PTR:%.*]] = phi ptr [ [[END_PTR]], [[WHILE_END]] ], [ [[FOUND_PTR]], [[WHILE_FOUND]] ]
+; MASKED-NEXT: store i32 [[MISMATCH_INDEX]], ptr [[STORE_PTR]], align 4
+; MASKED-NEXT: ret i32 [[MISMATCH_INDEX]]
+;
+; NO-TRANSFORM-LABEL: define i32 @compare_bytes_simple_end_ne_found(
+; NO-TRANSFORM-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) {
+; NO-TRANSFORM-NEXT: entry:
+; NO-TRANSFORM-NEXT: br label [[WHILE_COND:%.*]]
+; NO-TRANSFORM: while.cond:
+; NO-TRANSFORM-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; NO-TRANSFORM-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; NO-TRANSFORM-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; NO-TRANSFORM: while.body:
+; NO-TRANSFORM-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; NO-TRANSFORM-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; NO-TRANSFORM-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; NO-TRANSFORM-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; NO-TRANSFORM-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; NO-TRANSFORM-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_FOUND:%.*]]
+; NO-TRANSFORM: while.found:
+; NO-TRANSFORM-NEXT: [[MISMATCH_INDEX1:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ]
+; NO-TRANSFORM-NEXT: [[FOUND_PTR:%.*]] = phi ptr [ [[C]], [[WHILE_BODY]] ]
+; NO-TRANSFORM-NEXT: br label [[END:%.*]]
+; NO-TRANSFORM: while.end:
+; NO-TRANSFORM-NEXT: [[MISMATCH_INDEX2:%.*]] = phi i32 [ [[N]], [[WHILE_COND]] ]
+; NO-TRANSFORM-NEXT: [[END_PTR:%.*]] = phi ptr [ [[D]], [[WHILE_COND]] ]
+; NO-TRANSFORM-NEXT: br label [[END]]
+; NO-TRANSFORM: end:
+; NO-TRANSFORM-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[MISMATCH_INDEX1]], [[WHILE_FOUND]] ], [ [[MISMATCH_INDEX2]], [[WHILE_END]] ]
+; NO-TRANSFORM-NEXT: [[STORE_PTR:%.*]] = phi ptr [ [[END_PTR]], [[WHILE_END]] ], [ [[FOUND_PTR]], [[WHILE_FOUND]] ]
+; NO-TRANSFORM-NEXT: store i32 [[MISMATCH_INDEX]], ptr [[STORE_PTR]], align 4
+; NO-TRANSFORM-NEXT: ret i32 [[MISMATCH_INDEX]]
+entry:
+ br label %while.cond
+
+while.cond:
+ %len.addr = phi i32 [ %len, %entry ], [ %inc, %while.body ]
+ %inc = add i32 %len.addr, 1
+ %cmp.not = icmp eq i32 %inc, %n
+ br i1 %cmp.not, label %while.end, label %while.body
+
+while.body:
+ %idxprom = zext i32 %inc to i64
+ %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom
+ %0 = load i8, ptr %arrayidx
+ %arrayidx2 = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ %1 = load i8, ptr %arrayidx2
+ %cmp.not2 = icmp eq i8 %0, %1
+ br i1 %cmp.not2, label %while.cond, label %while.found
+
+while.found:
+ %mismatch_index1 = phi i32 [ %inc, %while.body ]
+ %found_ptr = phi ptr [ %c, %while.body ]
+ br label %end
+
+while.end:
+ %mismatch_index2 = phi i32 [ %n, %while.cond ]
+ %end_ptr = phi ptr [ %d, %while.cond ]
+ br label %end
+
+end:
+ %mismatch_index = phi i32 [ %mismatch_index1, %while.found ], [ %mismatch_index2, %while.end ]
+ %store_ptr = phi ptr [ %end_ptr, %while.end ], [ %found_ptr, %while.found ]
+ store i32 %mismatch_index, ptr %store_ptr
+ ret i32 %mismatch_index
+}
+
+
+
+define i32 @compare_bytes_extra_cmp(ptr %a, ptr %b, i32 %len, i32 %n, i32 %x) {
+; CHECK-LABEL: define i32 @compare_bytes_extra_cmp(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP_X:%.*]] = icmp ult i32 [[N]], [[X]]
+; CHECK-NEXT: br i1 [[CMP_X]], label [[PH:%.*]], label [[WHILE_END:%.*]]
+; CHECK: ph:
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; CHECK-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; CHECK: mismatch_min_it_check:
+; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; CHECK-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; CHECK: mismatch_mem_check:
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; CHECK-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; CHECK-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; CHECK-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; CHECK: mismatch_vec_loop_preheader:
+; CHECK-NEXT: br label [[MISMATCH_VECTOR_LOOP:%.*]]
+; CHECK: mismatch_vec_loop:
+; CHECK-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VECTOR_LOOP_PREHEADER]] ], [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ]
+; CHECK-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP20]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP21]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 16 x i1> @llvm.vp.icmp.nxv16i8(<vscale x 16 x i8> [[LHS_LOAD]], <vscale x 16 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; CHECK-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; CHECK-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; CHECK: mismatch_vec_loop_inc:
+; CHECK-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; CHECK-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[MISMATCH_END:%.*]]
+; CHECK: mismatch_vec_loop_found:
+; CHECK-NEXT: [[FIRST2:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; CHECK-NEXT: [[MISMATCH_VECTOR_INDEX3:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; CHECK-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST2]] to i64
+; CHECK-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX3]], [[TMP26]]
+; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; CHECK-NEXT: br label [[MISMATCH_END]]
+; CHECK: mismatch_loop_pre:
+; CHECK-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; CHECK: mismatch_loop:
+; CHECK-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; CHECK-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; CHECK-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; CHECK-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; CHECK-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; CHECK-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; CHECK: mismatch_loop_inc:
+; CHECK-NEXT: [[TMP35]] = add i32 [[MISMATCH_INDEX]], 1
+; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; CHECK-NEXT: br i1 [[TMP36]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; CHECK: mismatch_end:
+; CHECK-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; CHECK-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; CHECK: while.cond:
+; CHECK-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; CHECK-NEXT: [[INC:%.*]] = add i32 [[LEN_ADDR]], 1
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END_LOOPEXIT:%.*]], label [[WHILE_BODY]]
+; CHECK: while.body:
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP37:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP38:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP37]], [[TMP38]]
+; CHECK-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END_LOOPEXIT]]
+; CHECK: byte.compare:
+; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT]]
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: [[INC_LCSSA1:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_COND]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; CHECK-NEXT: br label [[WHILE_END]]
+; CHECK: while.end:
+; CHECK-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[X]], [[ENTRY:%.*]] ], [ [[INC_LCSSA1]], [[WHILE_END_LOOPEXIT]] ]
+; CHECK-NEXT: ret i32 [[INC_LCSSA]]
+;
+; LMUL8-LABEL: define i32 @compare_bytes_extra_cmp(
+; LMUL8-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
+; LMUL8-NEXT: entry:
+; LMUL8-NEXT: [[CMP_X:%.*]] = icmp ult i32 [[N]], [[X]]
+; LMUL8-NEXT: br i1 [[CMP_X]], label [[PH:%.*]], label [[WHILE_END:%.*]]
+; LMUL8: ph:
+; LMUL8-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; LMUL8-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; LMUL8: mismatch_min_it_check:
+; LMUL8-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; LMUL8-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; LMUL8-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; LMUL8-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; LMUL8: mismatch_mem_check:
+; LMUL8-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; LMUL8-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; LMUL8-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; LMUL8-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; LMUL8-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; LMUL8-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; LMUL8-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; LMUL8-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; LMUL8-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; LMUL8-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; LMUL8-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; LMUL8-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; LMUL8-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; LMUL8-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; LMUL8-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; LMUL8-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; LMUL8: mismatch_vec_loop_preheader:
+; LMUL8-NEXT: br label [[MISMATCH_VECTOR_LOOP:%.*]]
+; LMUL8: mismatch_vec_loop:
+; LMUL8-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VECTOR_LOOP_PREHEADER]] ], [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ]
+; LMUL8-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 64, i1 true)
+; LMUL8-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 64 x i8> @llvm.vp.load.nxv64i8.p0(ptr [[TMP20]], <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 64 x i8> @llvm.vp.load.nxv64i8.p0(ptr [[TMP21]], <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 64 x i1> @llvm.vp.icmp.nxv64i8(<vscale x 64 x i8> [[LHS_LOAD]], <vscale x 64 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv64i1(<vscale x 64 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP19]])
+; LMUL8-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; LMUL8-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; LMUL8: mismatch_vec_loop_inc:
+; LMUL8-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; LMUL8-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; LMUL8-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; LMUL8-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[MISMATCH_END:%.*]]
+; LMUL8: mismatch_vec_loop_found:
+; LMUL8-NEXT: [[FIRST2:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; LMUL8-NEXT: [[MISMATCH_VECTOR_INDEX3:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; LMUL8-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST2]] to i64
+; LMUL8-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX3]], [[TMP26]]
+; LMUL8-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; LMUL8-NEXT: br label [[MISMATCH_END]]
+; LMUL8: mismatch_loop_pre:
+; LMUL8-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; LMUL8: mismatch_loop:
+; LMUL8-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; LMUL8-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; LMUL8-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; LMUL8-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; LMUL8-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; LMUL8-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; LMUL8-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; LMUL8-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; LMUL8: mismatch_loop_inc:
+; LMUL8-NEXT: [[TMP35]] = add i32 [[MISMATCH_INDEX]], 1
+; LMUL8-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; LMUL8-NEXT: br i1 [[TMP36]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; LMUL8: mismatch_end:
+; LMUL8-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; LMUL8-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; LMUL8: while.cond:
+; LMUL8-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; LMUL8-NEXT: [[INC:%.*]] = add i32 [[LEN_ADDR]], 1
+; LMUL8-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; LMUL8-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END_LOOPEXIT:%.*]], label [[WHILE_BODY]]
+; LMUL8: while.body:
+; LMUL8-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; LMUL8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP37:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; LMUL8-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP38:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; LMUL8-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP37]], [[TMP38]]
+; LMUL8-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END_LOOPEXIT]]
+; LMUL8: byte.compare:
+; LMUL8-NEXT: br label [[WHILE_END_LOOPEXIT]]
+; LMUL8: while.end.loopexit:
+; LMUL8-NEXT: [[INC_LCSSA1:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_COND]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; LMUL8-NEXT: br label [[WHILE_END]]
+; LMUL8: while.end:
+; LMUL8-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[X]], [[ENTRY:%.*]] ], [ [[INC_LCSSA1]], [[WHILE_END_LOOPEXIT]] ]
+; LMUL8-NEXT: ret i32 [[INC_LCSSA]]
+;
+; LOOP-DEL-LABEL: define i32 @compare_bytes_extra_cmp(
+; LOOP-DEL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
+; LOOP-DEL-NEXT: entry:
+; LOOP-DEL-NEXT: [[CMP_X:%.*]] = icmp ult i32 [[N]], [[X]]
+; LOOP-DEL-NEXT: br i1 [[CMP_X]], label [[PH:%.*]], label [[WHILE_END:%.*]]
+; LOOP-DEL: ph:
+; LOOP-DEL-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; LOOP-DEL-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; LOOP-DEL-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; LOOP-DEL-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; LOOP-DEL-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; LOOP-DEL: mismatch_mem_check:
+; LOOP-DEL-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; LOOP-DEL-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; LOOP-DEL-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; LOOP-DEL-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; LOOP-DEL-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; LOOP-DEL-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; LOOP-DEL-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; LOOP-DEL-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; LOOP-DEL-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP7]], 12
+; LOOP-DEL-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP11]], 12
+; LOOP-DEL-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP8]], 12
+; LOOP-DEL-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 12
+; LOOP-DEL-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP6]], [[TMP9]]
+; LOOP-DEL-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP12]], [[TMP15]]
+; LOOP-DEL-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; LOOP-DEL-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP:%.*]], !prof [[PROF1]]
+; LOOP-DEL: mismatch_vec_loop:
+; LOOP-DEL-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ [[TMP24:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ], [ [[TMP1]], [[MISMATCH_MEM_CHECK]] ]
+; LOOP-DEL-NEXT: [[AVL:%.*]] = sub nuw nsw i64 [[TMP2]], [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
+; LOOP-DEL-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP20]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LOOP-DEL-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP21]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 16 x i1> @llvm.vp.icmp.nxv16i8(<vscale x 16 x i8> [[LHS_LOAD]], <vscale x 16 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP19]])
+; LOOP-DEL-NEXT: [[TMP22:%.*]] = icmp ne i32 [[FIRST]], [[TMP19]]
+; LOOP-DEL-NEXT: br i1 [[TMP22]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; LOOP-DEL: mismatch_vec_loop_inc:
+; LOOP-DEL-NEXT: [[TMP23:%.*]] = zext i32 [[TMP19]] to i64
+; LOOP-DEL-NEXT: [[TMP24]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP23]]
+; LOOP-DEL-NEXT: [[TMP25:%.*]] = icmp ne i64 [[TMP24]], [[TMP2]]
+; LOOP-DEL-NEXT: br i1 [[TMP25]], label [[MISMATCH_VECTOR_LOOP]], label [[WHILE_END]]
+; LOOP-DEL: mismatch_vec_loop_found:
+; LOOP-DEL-NEXT: [[FIRST2:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; LOOP-DEL-NEXT: [[MISMATCH_VECTOR_INDEX3:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; LOOP-DEL-NEXT: [[TMP26:%.*]] = zext i32 [[FIRST2]] to i64
+; LOOP-DEL-NEXT: [[TMP27:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX3]], [[TMP26]]
+; LOOP-DEL-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; LOOP-DEL-NEXT: br label [[WHILE_END]]
+; LOOP-DEL: mismatch_loop_pre:
+; LOOP-DEL-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; LOOP-DEL: mismatch_loop:
+; LOOP-DEL-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP35:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; LOOP-DEL-NEXT: [[TMP29:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; LOOP-DEL-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP29]]
+; LOOP-DEL-NEXT: [[TMP31:%.*]] = load i8, ptr [[TMP30]], align 1
+; LOOP-DEL-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP29]]
+; LOOP-DEL-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
+; LOOP-DEL-NEXT: [[TMP34:%.*]] = icmp eq i8 [[TMP31]], [[TMP33]]
+; LOOP-DEL-NEXT: br i1 [[TMP34]], label [[MISMATCH_LOOP_INC]], label [[WHILE_END]]
+; LOOP-DEL: mismatch_loop_inc:
+; LOOP-DEL-NEXT: [[TMP35]] = add i32 [[MISMATCH_INDEX]], 1
+; LOOP-DEL-NEXT: [[TMP36:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; LOOP-DEL-NEXT: br i1 [[TMP36]], label [[WHILE_END]], label [[MISMATCH_LOOP]]
+; LOOP-DEL: while.end:
+; LOOP-DEL-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[X]], [[ENTRY:%.*]] ], [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP28]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; LOOP-DEL-NEXT: ret i32 [[INC_LCSSA]]
+;
+; MASKED-LABEL: define i32 @compare_bytes_extra_cmp(
+; MASKED-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
+; MASKED-NEXT: entry:
+; MASKED-NEXT: [[CMP_X:%.*]] = icmp ult i32 [[N]], [[X]]
+; MASKED-NEXT: br i1 [[CMP_X]], label [[PH:%.*]], label [[WHILE_END:%.*]]
+; MASKED: ph:
+; MASKED-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], 1
+; MASKED-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; MASKED: mismatch_min_it_check:
+; MASKED-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; MASKED-NEXT: [[TMP2:%.*]] = zext i32 [[N]] to i64
+; MASKED-NEXT: [[TMP3:%.*]] = icmp ule i32 [[TMP0]], [[N]]
+; MASKED-NEXT: br i1 [[TMP3]], label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; MASKED: mismatch_mem_check:
+; MASKED-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
+; MASKED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; MASKED-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; MASKED-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; MASKED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; MASKED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; MASKED-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; MASKED-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP9]] to i64
+; MASKED-NEXT: [[TMP12:%.*]] = lshr i64 [[TMP7]], 12
+; MASKED-NEXT: [[TMP13:%.*]] = lshr i64 [[TMP10]], 12
+; MASKED-NEXT: [[TMP14:%.*]] = lshr i64 [[TMP6]], 12
+; MASKED-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP11]], 12
+; MASKED-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP12]], [[TMP13]]
+; MASKED-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP14]], [[TMP15]]
+; MASKED-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; MASKED-NEXT: br i1 [[TMP18]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VEC_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; MASKED: mismatch_vec_loop_preheader:
+; MASKED-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[TMP1]], i64 [[TMP2]])
+; MASKED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
+; MASKED-NEXT: [[TMP21:%.*]] = mul nuw nsw i64 [[TMP20]], 16
+; MASKED-NEXT: br label [[MISMATCH_VEC_LOOP:%.*]]
+; MASKED: mismatch_vec_loop:
+; MASKED-NEXT: [[MISMATCH_VEC_LOOP_PRED:%.*]] = phi <vscale x 16 x i1> [ [[TMP19]], [[MISMATCH_VEC_LOOP_PREHEADER]] ], [ [[TMP30:%.*]], [[MISMATCH_VEC_LOOP_INC:%.*]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_INDEX:%.*]] = phi i64 [ [[TMP1]], [[MISMATCH_VEC_LOOP_PREHEADER]] ], [ [[TMP29:%.*]], [[MISMATCH_VEC_LOOP_INC]] ]
+; MASKED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[MISMATCH_VEC_INDEX]]
+; MASKED-NEXT: [[TMP23:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP22]], i32 1, <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i8> zeroinitializer)
+; MASKED-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[MISMATCH_VEC_INDEX]]
+; MASKED-NEXT: [[TMP25:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP24]], i32 1, <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i8> zeroinitializer)
+; MASKED-NEXT: [[TMP26:%.*]] = icmp ne <vscale x 16 x i8> [[TMP23]], [[TMP25]]
+; MASKED-NEXT: [[TMP27:%.*]] = select <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i1> [[TMP26]], <vscale x 16 x i1> zeroinitializer
+; MASKED-NEXT: [[TMP28:%.*]] = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> [[TMP27]])
+; MASKED-NEXT: br i1 [[TMP28]], label [[MISMATCH_VEC_LOOP_FOUND:%.*]], label [[MISMATCH_VEC_LOOP_INC]]
+; MASKED: mismatch_vec_loop_inc:
+; MASKED-NEXT: [[TMP29]] = add nuw nsw i64 [[MISMATCH_VEC_INDEX]], [[TMP21]]
+; MASKED-NEXT: [[TMP30]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[TMP29]], i64 [[TMP2]])
+; MASKED-NEXT: [[TMP31:%.*]] = extractelement <vscale x 16 x i1> [[TMP30]], i64 0
+; MASKED-NEXT: br i1 [[TMP31]], label [[MISMATCH_VEC_LOOP]], label [[MISMATCH_END:%.*]]
+; MASKED: mismatch_vec_loop_found:
+; MASKED-NEXT: [[MISMATCH_VEC_FOUND_PRED:%.*]] = phi <vscale x 16 x i1> [ [[TMP27]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_LAST_LOOP_PRED:%.*]] = phi <vscale x 16 x i1> [ [[MISMATCH_VEC_LOOP_PRED]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_FOUND_INDEX:%.*]] = phi i64 [ [[MISMATCH_VEC_INDEX]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[TMP32:%.*]] = and <vscale x 16 x i1> [[MISMATCH_VEC_LAST_LOOP_PRED]], [[MISMATCH_VEC_FOUND_PRED]]
+; MASKED-NEXT: [[TMP33:%.*]] = call i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[TMP32]], i1 true)
+; MASKED-NEXT: [[TMP34:%.*]] = zext i32 [[TMP33]] to i64
+; MASKED-NEXT: [[TMP35:%.*]] = add nuw nsw i64 [[MISMATCH_VEC_FOUND_INDEX]], [[TMP34]]
+; MASKED-NEXT: [[TMP36:%.*]] = trunc i64 [[TMP35]] to i32
+; MASKED-NEXT: br label [[MISMATCH_END]]
+; MASKED: mismatch_loop_pre:
+; MASKED-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; MASKED: mismatch_loop:
+; MASKED-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ [[TMP0]], [[MISMATCH_LOOP_PRE]] ], [ [[TMP43:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; MASKED-NEXT: [[TMP37:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; MASKED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP37]]
+; MASKED-NEXT: [[TMP39:%.*]] = load i8, ptr [[TMP38]], align 1
+; MASKED-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP37]]
+; MASKED-NEXT: [[TMP41:%.*]] = load i8, ptr [[TMP40]], align 1
+; MASKED-NEXT: [[TMP42:%.*]] = icmp eq i8 [[TMP39]], [[TMP41]]
+; MASKED-NEXT: br i1 [[TMP42]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; MASKED: mismatch_loop_inc:
+; MASKED-NEXT: [[TMP43]] = add i32 [[MISMATCH_INDEX]], 1
+; MASKED-NEXT: [[TMP44:%.*]] = icmp eq i32 [[TMP43]], [[N]]
+; MASKED-NEXT: br i1 [[TMP44]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; MASKED: mismatch_end:
+; MASKED-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ [[N]], [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ [[N]], [[MISMATCH_VEC_LOOP_INC]] ], [ [[TMP36]], [[MISMATCH_VEC_LOOP_FOUND]] ]
+; MASKED-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; MASKED: while.cond:
+; MASKED-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
+; MASKED-NEXT: [[INC:%.*]] = add i32 [[LEN_ADDR]], 1
+; MASKED-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
+; MASKED-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END_LOOPEXIT:%.*]], label [[WHILE_BODY]]
+; MASKED: while.body:
+; MASKED-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; MASKED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP45:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; MASKED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP46:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; MASKED-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP45]], [[TMP46]]
+; MASKED-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END_LOOPEXIT]]
+; MASKED: byte.compare:
+; MASKED-NEXT: br label [[WHILE_END_LOOPEXIT]]
+; MASKED: while.end.loopexit:
+; MASKED-NEXT: [[INC_LCSSA1:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_COND]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; MASKED-NEXT: br label [[WHILE_END]]
+; MASKED: while.end:
+; MASKED-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[X]], [[ENTRY:%.*]] ], [ [[INC_LCSSA1]], [[WHILE_END_LOOPEXIT]] ]
+; MASKED-NEXT: ret i32 [[INC_LCSSA]]
+;
+; NO-TRANSFORM-LABEL: define i32 @compare_bytes_extra_cmp(
+; NO-TRANSFORM-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]], i32 [[X:%.*]]) {
+; NO-TRANSFORM-NEXT: entry:
+; NO-TRANSFORM-NEXT: [[CMP_X:%.*]] = icmp ult i32 [[N]], [[X]]
+; NO-TRANSFORM-NEXT: br i1 [[CMP_X]], label [[PH:%.*]], label [[WHILE_END:%.*]]
+; NO-TRANSFORM: ph:
+; NO-TRANSFORM-NEXT: br label [[WHILE_COND:%.*]]
+; NO-TRANSFORM: while.cond:
+; NO-TRANSFORM-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[PH]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; NO-TRANSFORM-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; NO-TRANSFORM-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END]], label [[WHILE_BODY]]
+; NO-TRANSFORM: while.body:
+; NO-TRANSFORM-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; NO-TRANSFORM-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; NO-TRANSFORM-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; NO-TRANSFORM-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; NO-TRANSFORM-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; NO-TRANSFORM-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; NO-TRANSFORM: while.end:
+; NO-TRANSFORM-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ], [ [[X]], [[ENTRY:%.*]] ]
+; NO-TRANSFORM-NEXT: ret i32 [[INC_LCSSA]]
+entry:
+ %cmp.x = icmp ult i32 %n, %x
+ br i1 %cmp.x, label %ph, label %while.end
+
+ph:
+ br label %while.cond
+
+while.cond:
+ %len.addr = phi i32 [ %len, %ph ], [ %inc, %while.body ]
+ %inc = add i32 %len.addr, 1
+ %cmp.not = icmp eq i32 %inc, %n
+ br i1 %cmp.not, label %while.end, label %while.body
+
+while.body:
+ %idxprom = zext i32 %inc to i64
+ %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom
+ %0 = load i8, ptr %arrayidx
+ %arrayidx2 = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ %1 = load i8, ptr %arrayidx2
+ %cmp.not2 = icmp eq i8 %0, %1
+ br i1 %cmp.not2, label %while.cond, label %while.end
+
+while.end:
+ %inc.lcssa = phi i32 [ %inc, %while.body ], [ %inc, %while.cond ], [ %x, %entry ]
+ ret i32 %inc.lcssa
+}
+
+define void @compare_bytes_cleanup_block(ptr %src1, ptr %src2) {
+; CHECK-LABEL: define void @compare_bytes_cleanup_block(
+; CHECK-SAME: ptr [[SRC1:%.*]], ptr [[SRC2:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; CHECK: mismatch_min_it_check:
+; CHECK-NEXT: br i1 false, label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; CHECK: mismatch_mem_check:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[SRC1]], i64 1
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[SRC2]], i64 1
+; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[TMP1]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[SRC1]], i64 0
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC2]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[TMP3]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP6]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP10]], 12
+; CHECK-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP4]], 12
+; CHECK-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP9]], 12
+; CHECK-NEXT: [[TMP11:%.*]] = lshr i64 [[TMP7]], 12
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne i64 [[TMP2]], [[TMP5]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i64 [[TMP8]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
+; CHECK-NEXT: br i1 [[TMP14]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; CHECK: mismatch_vec_loop_preheader:
+; CHECK-NEXT: br label [[MISMATCH_VECTOR_LOOP:%.*]]
+; CHECK: mismatch_vec_loop:
+; CHECK-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ 1, [[MISMATCH_VECTOR_LOOP_PREHEADER]] ], [ [[TMP20:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ]
+; CHECK-NEXT: [[AVL:%.*]] = sub nuw nsw i64 0, [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[SRC1]], i64 [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP16]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP15]])
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[SRC2]], i64 [[MISMATCH_VECTOR_INDEX]]
+; CHECK-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr [[TMP17]], <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP15]])
+; CHECK-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 16 x i1> @llvm.vp.icmp.nxv16i8(<vscale x 16 x i8> [[LHS_LOAD]], <vscale x 16 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP15]])
+; CHECK-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), i32 [[TMP15]])
+; CHECK-NEXT: [[TMP18:%.*]] = icmp ne i32 [[FIRST]], [[TMP15]]
+; CHECK-NEXT: br i1 [[TMP18]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; CHECK: mismatch_vec_loop_inc:
+; CHECK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP15]] to i64
+; CHECK-NEXT: [[TMP20]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP19]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne i64 [[TMP20]], 0
+; CHECK-NEXT: br i1 [[TMP21]], label [[MISMATCH_VECTOR_LOOP]], label [[MISMATCH_END:%.*]]
+; CHECK: mismatch_vec_loop_found:
+; CHECK-NEXT: [[FIRST1:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; CHECK-NEXT: [[MISMATCH_VECTOR_INDEX2:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[FIRST1]] to i64
+; CHECK-NEXT: [[TMP23:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX2]], [[TMP22]]
+; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i32
+; CHECK-NEXT: br label [[MISMATCH_END]]
+; CHECK: mismatch_loop_pre:
+; CHECK-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; CHECK: mismatch_loop:
+; CHECK-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ 1, [[MISMATCH_LOOP_PRE]] ], [ [[TMP31:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; CHECK-NEXT: [[TMP25:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[SRC1]], i64 [[TMP25]]
+; CHECK-NEXT: [[TMP27:%.*]] = load i8, ptr [[TMP26]], align 1
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[SRC2]], i64 [[TMP25]]
+; CHECK-NEXT: [[TMP29:%.*]] = load i8, ptr [[TMP28]], align 1
+; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i8 [[TMP27]], [[TMP29]]
+; CHECK-NEXT: br i1 [[TMP30]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; CHECK: mismatch_loop_inc:
+; CHECK-NEXT: [[TMP31]] = add i32 [[MISMATCH_INDEX]], 1
+; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i32 [[TMP31]], 0
+; CHECK-NEXT: br i1 [[TMP32]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; CHECK: mismatch_end:
+; CHECK-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ 0, [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ 0, [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP24]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; CHECK-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; CHECK: while.cond:
+; CHECK-NEXT: [[LEN:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ], [ 0, [[MISMATCH_END]] ]
+; CHECK-NEXT: [[INC:%.*]] = add i32 [[LEN]], 1
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], 0
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[CLEANUP_THREAD:%.*]], label [[WHILE_BODY]]
+; CHECK: while.body:
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i8, ptr [[SRC1]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP33:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[SRC2]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP34:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP33]], [[TMP34]]
+; CHECK-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[IF_END:%.*]]
+; CHECK: byte.compare:
+; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], 0
+; CHECK-NEXT: br i1 [[TMP35]], label [[CLEANUP_THREAD]], label [[IF_END]]
+; CHECK: cleanup.thread:
+; CHECK-NEXT: ret void
+; CHECK: if.end:
+; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; CHECK-NEXT: ret void
+;
+; LMUL8-LABEL: define void @compare_bytes_cleanup_block(
+; LMUL8-SAME: ptr [[SRC1:%.*]], ptr [[SRC2:%.*]]) #[[ATTR0]] {
+; LMUL8-NEXT: entry:
+; LMUL8-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; LMUL8: mismatch_min_it_check:
+; LMUL8-NEXT: br i1 false, label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; LMUL8: mismatch_mem_check:
+; LMUL8-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[SRC1]], i64 1
+; LMUL8-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[SRC2]], i64 1
+; LMUL8-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[TMP1]] to i64
+; LMUL8-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; LMUL8-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[SRC1]], i64 0
+; LMUL8-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC2]], i64 0
+; LMUL8-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[TMP3]] to i64
+; LMUL8-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP6]] to i64
+; LMUL8-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP10]], 12
+; LMUL8-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP4]], 12
+; LMUL8-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP9]], 12
+; LMUL8-NEXT: [[TMP11:%.*]] = lshr i64 [[TMP7]], 12
+; LMUL8-NEXT: [[TMP12:%.*]] = icmp ne i64 [[TMP2]], [[TMP5]]
+; LMUL8-NEXT: [[TMP13:%.*]] = icmp ne i64 [[TMP8]], [[TMP11]]
+; LMUL8-NEXT: [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
+; LMUL8-NEXT: br i1 [[TMP14]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VECTOR_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; LMUL8: mismatch_vec_loop_preheader:
+; LMUL8-NEXT: br label [[MISMATCH_VECTOR_LOOP:%.*]]
+; LMUL8: mismatch_vec_loop:
+; LMUL8-NEXT: [[MISMATCH_VECTOR_INDEX:%.*]] = phi i64 [ 1, [[MISMATCH_VECTOR_LOOP_PREHEADER]] ], [ [[TMP20:%.*]], [[MISMATCH_VECTOR_LOOP_INC:%.*]] ]
+; LMUL8-NEXT: [[AVL:%.*]] = sub nuw nsw i64 0, [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[TMP15:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 64, i1 true)
+; LMUL8-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[SRC1]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[LHS_LOAD:%.*]] = call <vscale x 64 x i8> @llvm.vp.load.nxv64i8.p0(ptr [[TMP16]], <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP15]])
+; LMUL8-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[SRC2]], i64 [[MISMATCH_VECTOR_INDEX]]
+; LMUL8-NEXT: [[RHS_LOAD:%.*]] = call <vscale x 64 x i8> @llvm.vp.load.nxv64i8.p0(ptr [[TMP17]], <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP15]])
+; LMUL8-NEXT: [[MISMATCH_CMP:%.*]] = call <vscale x 64 x i1> @llvm.vp.icmp.nxv64i8(<vscale x 64 x i8> [[LHS_LOAD]], <vscale x 64 x i8> [[RHS_LOAD]], metadata !"ne", <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP15]])
+; LMUL8-NEXT: [[FIRST:%.*]] = call i32 @llvm.vp.cttz.elts.i32.nxv64i1(<vscale x 64 x i1> [[MISMATCH_CMP]], i1 false, <vscale x 64 x i1> shufflevector (<vscale x 64 x i1> insertelement (<vscale x 64 x i1> poison, i1 true, i64 0), <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer), i32 [[TMP15]])
+; LMUL8-NEXT: [[TMP18:%.*]] = icmp ne i32 [[FIRST]], [[TMP15]]
+; LMUL8-NEXT: br i1 [[TMP18]], label [[MISMATCH_VECTOR_LOOP_FOUND:%.*]], label [[MISMATCH_VECTOR_LOOP_INC]]
+; LMUL8: mismatch_vec_loop_inc:
+; LMUL8-NEXT: [[TMP19:%.*]] = zext i32 [[TMP15]] to i64
+; LMUL8-NEXT: [[TMP20]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX]], [[TMP19]]
+; LMUL8-NEXT: [[TMP21:%.*]] = icmp ne i64 [[TMP20]], 0
+; LMUL8-NEXT: br i1 [[TMP21]], label [[MISMATCH_VECTOR_LOOP]], label [[MISMATCH_END:%.*]]
+; LMUL8: mismatch_vec_loop_found:
+; LMUL8-NEXT: [[FIRST1:%.*]] = phi i32 [ [[FIRST]], [[MISMATCH_VECTOR_LOOP]] ]
+; LMUL8-NEXT: [[MISMATCH_VECTOR_INDEX2:%.*]] = phi i64 [ [[MISMATCH_VECTOR_INDEX]], [[MISMATCH_VECTOR_LOOP]] ]
+; LMUL8-NEXT: [[TMP22:%.*]] = zext i32 [[FIRST1]] to i64
+; LMUL8-NEXT: [[TMP23:%.*]] = add nuw nsw i64 [[MISMATCH_VECTOR_INDEX2]], [[TMP22]]
+; LMUL8-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i32
+; LMUL8-NEXT: br label [[MISMATCH_END]]
+; LMUL8: mismatch_loop_pre:
+; LMUL8-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; LMUL8: mismatch_loop:
+; LMUL8-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ 1, [[MISMATCH_LOOP_PRE]] ], [ [[TMP31:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; LMUL8-NEXT: [[TMP25:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; LMUL8-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[SRC1]], i64 [[TMP25]]
+; LMUL8-NEXT: [[TMP27:%.*]] = load i8, ptr [[TMP26]], align 1
+; LMUL8-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[SRC2]], i64 [[TMP25]]
+; LMUL8-NEXT: [[TMP29:%.*]] = load i8, ptr [[TMP28]], align 1
+; LMUL8-NEXT: [[TMP30:%.*]] = icmp eq i8 [[TMP27]], [[TMP29]]
+; LMUL8-NEXT: br i1 [[TMP30]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; LMUL8: mismatch_loop_inc:
+; LMUL8-NEXT: [[TMP31]] = add i32 [[MISMATCH_INDEX]], 1
+; LMUL8-NEXT: [[TMP32:%.*]] = icmp eq i32 [[TMP31]], 0
+; LMUL8-NEXT: br i1 [[TMP32]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; LMUL8: mismatch_end:
+; LMUL8-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ 0, [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ 0, [[MISMATCH_VECTOR_LOOP_INC]] ], [ [[TMP24]], [[MISMATCH_VECTOR_LOOP_FOUND]] ]
+; LMUL8-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; LMUL8: while.cond:
+; LMUL8-NEXT: [[LEN:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ], [ 0, [[MISMATCH_END]] ]
+; LMUL8-NEXT: [[INC:%.*]] = add i32 [[LEN]], 1
+; LMUL8-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], 0
+; LMUL8-NEXT: br i1 [[CMP_NOT]], label [[CLEANUP_THREAD:%.*]], label [[WHILE_BODY]]
+; LMUL8: while.body:
+; LMUL8-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; LMUL8-NEXT: [[ARRAYIDX:%.*]] = getelementptr i8, ptr [[SRC1]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP33:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; LMUL8-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[SRC2]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP34:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; LMUL8-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP33]], [[TMP34]]
+; LMUL8-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[IF_END:%.*]]
+; LMUL8: byte.compare:
+; LMUL8-NEXT: [[TMP35:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], 0
+; LMUL8-NEXT: br i1 [[TMP35]], label [[CLEANUP_THREAD]], label [[IF_END]]
+; LMUL8: cleanup.thread:
+; LMUL8-NEXT: ret void
+; LMUL8: if.end:
+; LMUL8-NEXT: [[RES:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; LMUL8-NEXT: ret void
+;
+; LOOP-DEL-LABEL: define void @compare_bytes_cleanup_block(
+; LOOP-DEL-SAME: ptr [[SRC1:%.*]], ptr [[SRC2:%.*]]) #[[ATTR0]] {
+; LOOP-DEL-NEXT: entry:
+; LOOP-DEL-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; LOOP-DEL: mismatch_loop:
+; LOOP-DEL-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[TMP6:%.*]], [[MISMATCH_LOOP]] ]
+; LOOP-DEL-NEXT: [[TMP0:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; LOOP-DEL-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[SRC1]], i64 [[TMP0]]
+; LOOP-DEL-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1
+; LOOP-DEL-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[SRC2]], i64 [[TMP0]]
+; LOOP-DEL-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; LOOP-DEL-NEXT: [[TMP5:%.*]] = icmp ne i8 [[TMP2]], [[TMP4]]
+; LOOP-DEL-NEXT: [[TMP6]] = add i32 [[MISMATCH_INDEX]], 1
+; LOOP-DEL-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 0
+; LOOP-DEL-NEXT: [[OR_COND:%.*]] = or i1 [[TMP5]], [[TMP7]]
+; LOOP-DEL-NEXT: br i1 [[OR_COND]], label [[COMMON_RET:%.*]], label [[MISMATCH_LOOP]]
+; LOOP-DEL: common.ret:
+; LOOP-DEL-NEXT: ret void
+;
+; MASKED-LABEL: define void @compare_bytes_cleanup_block(
+; MASKED-SAME: ptr [[SRC1:%.*]], ptr [[SRC2:%.*]]) #[[ATTR0]] {
+; MASKED-NEXT: entry:
+; MASKED-NEXT: br label [[MISMATCH_MIN_IT_CHECK:%.*]]
+; MASKED: mismatch_min_it_check:
+; MASKED-NEXT: br i1 false, label [[MISMATCH_MEM_CHECK:%.*]], label [[MISMATCH_LOOP_PRE:%.*]], !prof [[PROF0]]
+; MASKED: mismatch_mem_check:
+; MASKED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[SRC1]], i64 1
+; MASKED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[SRC2]], i64 1
+; MASKED-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[TMP1]] to i64
+; MASKED-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; MASKED-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[SRC1]], i64 0
+; MASKED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SRC2]], i64 0
+; MASKED-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; MASKED-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP5]] to i64
+; MASKED-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP3]], 12
+; MASKED-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP6]], 12
+; MASKED-NEXT: [[TMP10:%.*]] = lshr i64 [[TMP2]], 12
+; MASKED-NEXT: [[TMP11:%.*]] = lshr i64 [[TMP7]], 12
+; MASKED-NEXT: [[TMP12:%.*]] = icmp ne i64 [[TMP8]], [[TMP9]]
+; MASKED-NEXT: [[TMP13:%.*]] = icmp ne i64 [[TMP10]], [[TMP11]]
+; MASKED-NEXT: [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
+; MASKED-NEXT: br i1 [[TMP14]], label [[MISMATCH_LOOP_PRE]], label [[MISMATCH_VEC_LOOP_PREHEADER:%.*]], !prof [[PROF1]]
+; MASKED: mismatch_vec_loop_preheader:
+; MASKED-NEXT: [[TMP15:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 1, i64 0)
+; MASKED-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
+; MASKED-NEXT: [[TMP17:%.*]] = mul nuw nsw i64 [[TMP16]], 16
+; MASKED-NEXT: br label [[MISMATCH_VEC_LOOP:%.*]]
+; MASKED: mismatch_vec_loop:
+; MASKED-NEXT: [[MISMATCH_VEC_LOOP_PRED:%.*]] = phi <vscale x 16 x i1> [ [[TMP15]], [[MISMATCH_VEC_LOOP_PREHEADER]] ], [ [[TMP26:%.*]], [[MISMATCH_VEC_LOOP_INC:%.*]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_INDEX:%.*]] = phi i64 [ 1, [[MISMATCH_VEC_LOOP_PREHEADER]] ], [ [[TMP25:%.*]], [[MISMATCH_VEC_LOOP_INC]] ]
+; MASKED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[SRC1]], i64 [[MISMATCH_VEC_INDEX]]
+; MASKED-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP18]], i32 1, <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i8> zeroinitializer)
+; MASKED-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[SRC2]], i64 [[MISMATCH_VEC_INDEX]]
+; MASKED-NEXT: [[TMP21:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP20]], i32 1, <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i8> zeroinitializer)
+; MASKED-NEXT: [[TMP22:%.*]] = icmp ne <vscale x 16 x i8> [[TMP19]], [[TMP21]]
+; MASKED-NEXT: [[TMP23:%.*]] = select <vscale x 16 x i1> [[MISMATCH_VEC_LOOP_PRED]], <vscale x 16 x i1> [[TMP22]], <vscale x 16 x i1> zeroinitializer
+; MASKED-NEXT: [[TMP24:%.*]] = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> [[TMP23]])
+; MASKED-NEXT: br i1 [[TMP24]], label [[MISMATCH_VEC_LOOP_FOUND:%.*]], label [[MISMATCH_VEC_LOOP_INC]]
+; MASKED: mismatch_vec_loop_inc:
+; MASKED-NEXT: [[TMP25]] = add nuw nsw i64 [[MISMATCH_VEC_INDEX]], [[TMP17]]
+; MASKED-NEXT: [[TMP26]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[TMP25]], i64 0)
+; MASKED-NEXT: [[TMP27:%.*]] = extractelement <vscale x 16 x i1> [[TMP26]], i64 0
+; MASKED-NEXT: br i1 [[TMP27]], label [[MISMATCH_VEC_LOOP]], label [[MISMATCH_END:%.*]]
+; MASKED: mismatch_vec_loop_found:
+; MASKED-NEXT: [[MISMATCH_VEC_FOUND_PRED:%.*]] = phi <vscale x 16 x i1> [ [[TMP23]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_LAST_LOOP_PRED:%.*]] = phi <vscale x 16 x i1> [ [[MISMATCH_VEC_LOOP_PRED]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[MISMATCH_VEC_FOUND_INDEX:%.*]] = phi i64 [ [[MISMATCH_VEC_INDEX]], [[MISMATCH_VEC_LOOP]] ]
+; MASKED-NEXT: [[TMP28:%.*]] = and <vscale x 16 x i1> [[MISMATCH_VEC_LAST_LOOP_PRED]], [[MISMATCH_VEC_FOUND_PRED]]
+; MASKED-NEXT: [[TMP29:%.*]] = call i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> [[TMP28]], i1 true)
+; MASKED-NEXT: [[TMP30:%.*]] = zext i32 [[TMP29]] to i64
+; MASKED-NEXT: [[TMP31:%.*]] = add nuw nsw i64 [[MISMATCH_VEC_FOUND_INDEX]], [[TMP30]]
+; MASKED-NEXT: [[TMP32:%.*]] = trunc i64 [[TMP31]] to i32
+; MASKED-NEXT: br label [[MISMATCH_END]]
+; MASKED: mismatch_loop_pre:
+; MASKED-NEXT: br label [[MISMATCH_LOOP:%.*]]
+; MASKED: mismatch_loop:
+; MASKED-NEXT: [[MISMATCH_INDEX:%.*]] = phi i32 [ 1, [[MISMATCH_LOOP_PRE]] ], [ [[TMP39:%.*]], [[MISMATCH_LOOP_INC:%.*]] ]
+; MASKED-NEXT: [[TMP33:%.*]] = zext i32 [[MISMATCH_INDEX]] to i64
+; MASKED-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[SRC1]], i64 [[TMP33]]
+; MASKED-NEXT: [[TMP35:%.*]] = load i8, ptr [[TMP34]], align 1
+; MASKED-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[SRC2]], i64 [[TMP33]]
+; MASKED-NEXT: [[TMP37:%.*]] = load i8, ptr [[TMP36]], align 1
+; MASKED-NEXT: [[TMP38:%.*]] = icmp eq i8 [[TMP35]], [[TMP37]]
+; MASKED-NEXT: br i1 [[TMP38]], label [[MISMATCH_LOOP_INC]], label [[MISMATCH_END]]
+; MASKED: mismatch_loop_inc:
+; MASKED-NEXT: [[TMP39]] = add i32 [[MISMATCH_INDEX]], 1
+; MASKED-NEXT: [[TMP40:%.*]] = icmp eq i32 [[TMP39]], 0
+; MASKED-NEXT: br i1 [[TMP40]], label [[MISMATCH_END]], label [[MISMATCH_LOOP]]
+; MASKED: mismatch_end:
+; MASKED-NEXT: [[MISMATCH_RESULT:%.*]] = phi i32 [ 0, [[MISMATCH_LOOP_INC]] ], [ [[MISMATCH_INDEX]], [[MISMATCH_LOOP]] ], [ 0, [[MISMATCH_VEC_LOOP_INC]] ], [ [[TMP32]], [[MISMATCH_VEC_LOOP_FOUND]] ]
+; MASKED-NEXT: br i1 true, label [[BYTE_COMPARE:%.*]], label [[WHILE_COND:%.*]]
+; MASKED: while.cond:
+; MASKED-NEXT: [[LEN:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ], [ 0, [[MISMATCH_END]] ]
+; MASKED-NEXT: [[INC:%.*]] = add i32 [[LEN]], 1
+; MASKED-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], 0
+; MASKED-NEXT: br i1 [[CMP_NOT]], label [[CLEANUP_THREAD:%.*]], label [[WHILE_BODY]]
+; MASKED: while.body:
+; MASKED-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
+; MASKED-NEXT: [[ARRAYIDX:%.*]] = getelementptr i8, ptr [[SRC1]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP41:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; MASKED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[SRC2]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP42:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; MASKED-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP41]], [[TMP42]]
+; MASKED-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[IF_END:%.*]]
+; MASKED: byte.compare:
+; MASKED-NEXT: [[TMP43:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], 0
+; MASKED-NEXT: br i1 [[TMP43]], label [[CLEANUP_THREAD]], label [[IF_END]]
+; MASKED: cleanup.thread:
+; MASKED-NEXT: ret void
+; MASKED: if.end:
+; MASKED-NEXT: [[RES:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; MASKED-NEXT: ret void
+;
+; NO-TRANSFORM-LABEL: define void @compare_bytes_cleanup_block(
+; NO-TRANSFORM-SAME: ptr [[SRC1:%.*]], ptr [[SRC2:%.*]]) {
+; NO-TRANSFORM-NEXT: entry:
+; NO-TRANSFORM-NEXT: br label [[WHILE_COND:%.*]]
+; NO-TRANSFORM: while.cond:
+; NO-TRANSFORM-NEXT: [[LEN:%.*]] = phi i32 [ [[INC:%.*]], [[WHILE_BODY:%.*]] ], [ 0, [[ENTRY:%.*]] ]
+; NO-TRANSFORM-NEXT: [[INC]] = add i32 [[LEN]], 1
+; NO-TRANSFORM-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], 0
+; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT]], label [[CLEANUP_THREAD:%.*]], label [[WHILE_BODY]]
+; NO-TRANSFORM: while.body:
+; NO-TRANSFORM-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; NO-TRANSFORM-NEXT: [[ARRAYIDX:%.*]] = getelementptr i8, ptr [[SRC1]], i64 [[IDXPROM]]
+; NO-TRANSFORM-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; NO-TRANSFORM-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[SRC2]], i64 [[IDXPROM]]
+; NO-TRANSFORM-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; NO-TRANSFORM-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[IF_END:%.*]]
+; NO-TRANSFORM: cleanup.thread:
+; NO-TRANSFORM-NEXT: ret void
+; NO-TRANSFORM: if.end:
+; NO-TRANSFORM-NEXT: [[RES:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ]
+; NO-TRANSFORM-NEXT: ret void
+entry:
+ br label %while.cond
+
+while.cond:
+ %len = phi i32 [ %inc, %while.body ], [ 0, %entry ]
+ %inc = add i32 %len, 1
+ %cmp.not = icmp eq i32 %inc, 0
+ br i1 %cmp.not, label %cleanup.thread, label %while.body
+
+while.body:
+ %idxprom = zext i32 %inc to i64
+ %arrayidx = getelementptr i8, ptr %src1, i64 %idxprom
+ %0 = load i8, ptr %arrayidx, align 1
+ %arrayidx2 = getelementptr i8, ptr %src2, i64 %idxprom
+ %1 = load i8, ptr %arrayidx2, align 1
+ %cmp.not2 = icmp eq i8 %0, %1
+ br i1 %cmp.not2, label %while.cond, label %if.end
+
+cleanup.thread:
+ ret void
+
+if.end:
+ %res = phi i32 [ %inc, %while.body ]
+ ret void
+}
+
+;
+; NEGATIVE TESTS
+;
+
+; Similar to @compare_bytes_simple, except in the while.end block we have an extra PHI
+; with unique values for each incoming block from the loop.
+define i32 @compare_bytes_simple2(ptr %a, ptr %b, ptr %c, ptr %d, i32 %len, i32 %n) {
+; CHECK-LABEL: define i32 @compare_bytes_simple2(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[WHILE_COND:%.*]]
+; CHECK: while.cond:
+; CHECK-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; CHECK-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; CHECK: while.body:
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; CHECK-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; CHECK: while.end:
+; CHECK-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; CHECK-NEXT: [[FINAL_PTR:%.*]] = phi ptr [ [[C]], [[WHILE_BODY]] ], [ [[D]], [[WHILE_COND]] ]
+; CHECK-NEXT: store i32 [[INC_LCSSA]], ptr [[FINAL_PTR]], align 4
+; CHECK-NEXT: ret i32 [[INC_LCSSA]]
+;
+; LMUL8-LABEL: define i32 @compare_bytes_simple2(
+; LMUL8-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; LMUL8-NEXT: entry:
+; LMUL8-NEXT: br label [[WHILE_COND:%.*]]
+; LMUL8: while.cond:
+; LMUL8-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; LMUL8-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; LMUL8-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; LMUL8-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; LMUL8: while.body:
+; LMUL8-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; LMUL8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; LMUL8-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; LMUL8-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; LMUL8-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; LMUL8: while.end:
+; LMUL8-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; LMUL8-NEXT: [[FINAL_PTR:%.*]] = phi ptr [ [[C]], [[WHILE_BODY]] ], [ [[D]], [[WHILE_COND]] ]
+; LMUL8-NEXT: store i32 [[INC_LCSSA]], ptr [[FINAL_PTR]], align 4
+; LMUL8-NEXT: ret i32 [[INC_LCSSA]]
+;
+; LOOP-DEL-LABEL: define i32 @compare_bytes_simple2(
+; LOOP-DEL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; LOOP-DEL-NEXT: entry:
+; LOOP-DEL-NEXT: br label [[WHILE_COND:%.*]]
+; LOOP-DEL: while.cond:
+; LOOP-DEL-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; LOOP-DEL-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; LOOP-DEL-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; LOOP-DEL-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; LOOP-DEL: while.body:
+; LOOP-DEL-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; LOOP-DEL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; LOOP-DEL-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; LOOP-DEL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; LOOP-DEL-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; LOOP-DEL-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; LOOP-DEL-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; LOOP-DEL: while.end:
+; LOOP-DEL-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; LOOP-DEL-NEXT: [[FINAL_PTR:%.*]] = phi ptr [ [[C]], [[WHILE_BODY]] ], [ [[D]], [[WHILE_COND]] ]
+; LOOP-DEL-NEXT: store i32 [[INC_LCSSA]], ptr [[FINAL_PTR]], align 4
+; LOOP-DEL-NEXT: ret i32 [[INC_LCSSA]]
+;
+; MASKED-LABEL: define i32 @compare_bytes_simple2(
+; MASKED-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; MASKED-NEXT: entry:
+; MASKED-NEXT: br label [[WHILE_COND:%.*]]
+; MASKED: while.cond:
+; MASKED-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; MASKED-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; MASKED-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; MASKED-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; MASKED: while.body:
+; MASKED-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; MASKED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; MASKED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; MASKED-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; MASKED-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; MASKED: while.end:
+; MASKED-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; MASKED-NEXT: [[FINAL_PTR:%.*]] = phi ptr [ [[C]], [[WHILE_BODY]] ], [ [[D]], [[WHILE_COND]] ]
+; MASKED-NEXT: store i32 [[INC_LCSSA]], ptr [[FINAL_PTR]], align 4
+; MASKED-NEXT: ret i32 [[INC_LCSSA]]
+;
+entry:
+ br label %while.cond
+
+while.cond:
+ %len.addr = phi i32 [ %len, %entry ], [ %inc, %while.body ]
+ %inc = add i32 %len.addr, 1
+ %cmp.not = icmp eq i32 %inc, %n
+ br i1 %cmp.not, label %while.end, label %while.body
+
+while.body:
+ %idxprom = zext i32 %inc to i64
+ %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom
+ %0 = load i8, ptr %arrayidx
+ %arrayidx2 = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ %1 = load i8, ptr %arrayidx2
+ %cmp.not2 = icmp eq i8 %0, %1
+ br i1 %cmp.not2, label %while.cond, label %while.end
+
+while.end:
+ %inc.lcssa = phi i32 [ %inc, %while.body ], [ %inc, %while.cond ]
+ %final_ptr = phi ptr [ %c, %while.body ], [ %d, %while.cond ]
+ store i32 %inc.lcssa, ptr %final_ptr
+ ret i32 %inc.lcssa
+}
+
+define i32 @compare_bytes_simple3(ptr %a, ptr %b, ptr %c, i32 %d, i32 %len, i32 %n) {
+; CHECK-LABEL: define i32 @compare_bytes_simple3(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[WHILE_COND:%.*]]
+; CHECK: while.cond:
+; CHECK-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; CHECK-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; CHECK: while.body:
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; CHECK-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; CHECK: while.end:
+; CHECK-NEXT: [[FINAL_VAL:%.*]] = phi i32 [ [[D]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; CHECK-NEXT: store i32 [[FINAL_VAL]], ptr [[C]], align 4
+; CHECK-NEXT: ret i32 [[FINAL_VAL]]
+;
+; LMUL8-LABEL: define i32 @compare_bytes_simple3(
+; LMUL8-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; LMUL8-NEXT: entry:
+; LMUL8-NEXT: br label [[WHILE_COND:%.*]]
+; LMUL8: while.cond:
+; LMUL8-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; LMUL8-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; LMUL8-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; LMUL8-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; LMUL8: while.body:
+; LMUL8-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; LMUL8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; LMUL8-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; LMUL8-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; LMUL8-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; LMUL8: while.end:
+; LMUL8-NEXT: [[FINAL_VAL:%.*]] = phi i32 [ [[D]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; LMUL8-NEXT: store i32 [[FINAL_VAL]], ptr [[C]], align 4
+; LMUL8-NEXT: ret i32 [[FINAL_VAL]]
+;
+; LOOP-DEL-LABEL: define i32 @compare_bytes_simple3(
+; LOOP-DEL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; LOOP-DEL-NEXT: entry:
+; LOOP-DEL-NEXT: br label [[WHILE_COND:%.*]]
+; LOOP-DEL: while.cond:
+; LOOP-DEL-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; LOOP-DEL-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; LOOP-DEL-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; LOOP-DEL-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; LOOP-DEL: while.body:
+; LOOP-DEL-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; LOOP-DEL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; LOOP-DEL-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; LOOP-DEL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; LOOP-DEL-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; LOOP-DEL-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; LOOP-DEL-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; LOOP-DEL: while.end:
+; LOOP-DEL-NEXT: [[FINAL_VAL:%.*]] = phi i32 [ [[D]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; LOOP-DEL-NEXT: store i32 [[FINAL_VAL]], ptr [[C]], align 4
+; LOOP-DEL-NEXT: ret i32 [[FINAL_VAL]]
+;
+; MASKED-LABEL: define i32 @compare_bytes_simple3(
+; MASKED-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[D:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; MASKED-NEXT: entry:
+; MASKED-NEXT: br label [[WHILE_COND:%.*]]
+; MASKED: while.cond:
+; MASKED-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; MASKED-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; MASKED-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; MASKED-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; MASKED: while.body:
+; MASKED-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; MASKED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; MASKED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; MASKED-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; MASKED-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; MASKED: while.end:
+; MASKED-NEXT: [[FINAL_VAL:%.*]] = phi i32 [ [[D]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; MASKED-NEXT: store i32 [[FINAL_VAL]], ptr [[C]], align 4
+; MASKED-NEXT: ret i32 [[FINAL_VAL]]
+;
+ entry:
+ br label %while.cond
+
+ while.cond:
+ %len.addr = phi i32 [ %len, %entry ], [ %inc, %while.body ]
+ %inc = add i32 %len.addr, 1
+ %cmp.not = icmp eq i32 %inc, %n
+ br i1 %cmp.not, label %while.end, label %while.body
+
+ while.body:
+ %idxprom = zext i32 %inc to i64
+ %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom
+ %0 = load i8, ptr %arrayidx
+ %arrayidx2 = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ %1 = load i8, ptr %arrayidx2
+ %cmp.not2 = icmp eq i8 %0, %1
+ br i1 %cmp.not2, label %while.cond, label %while.end
+
+ while.end:
+ %final_val = phi i32 [ %d, %while.body ], [ %inc, %while.cond ]
+ store i32 %final_val, ptr %c
+ ret i32 %final_val
+}
+
+; Disable the optimization when noimplicitfloat is present.
+define i32 @no_implicit_float(ptr %a, ptr %b, i32 %len, i32 %n) noimplicitfloat {
+; CHECK-LABEL: define i32 @no_implicit_float(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[WHILE_COND:%.*]]
+; CHECK: while.cond:
+; CHECK-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; CHECK-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; CHECK: while.body:
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; CHECK-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; CHECK: while.end:
+; CHECK-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; CHECK-NEXT: ret i32 [[INC_LCSSA]]
+;
+; LMUL8-LABEL: define i32 @no_implicit_float(
+; LMUL8-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR1:[0-9]+]] {
+; LMUL8-NEXT: entry:
+; LMUL8-NEXT: br label [[WHILE_COND:%.*]]
+; LMUL8: while.cond:
+; LMUL8-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; LMUL8-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; LMUL8-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; LMUL8-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; LMUL8: while.body:
+; LMUL8-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; LMUL8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; LMUL8-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; LMUL8-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; LMUL8-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; LMUL8-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; LMUL8: while.end:
+; LMUL8-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; LMUL8-NEXT: ret i32 [[INC_LCSSA]]
+;
+; LOOP-DEL-LABEL: define i32 @no_implicit_float(
+; LOOP-DEL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR1:[0-9]+]] {
+; LOOP-DEL-NEXT: entry:
+; LOOP-DEL-NEXT: br label [[WHILE_COND:%.*]]
+; LOOP-DEL: while.cond:
+; LOOP-DEL-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; LOOP-DEL-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; LOOP-DEL-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; LOOP-DEL-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; LOOP-DEL: while.body:
+; LOOP-DEL-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; LOOP-DEL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; LOOP-DEL-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; LOOP-DEL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; LOOP-DEL-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; LOOP-DEL-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; LOOP-DEL-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; LOOP-DEL: while.end:
+; LOOP-DEL-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; LOOP-DEL-NEXT: ret i32 [[INC_LCSSA]]
+;
+; MASKED-LABEL: define i32 @no_implicit_float(
+; MASKED-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i32 [[LEN:%.*]], i32 [[N:%.*]]) #[[ATTR1:[0-9]+]] {
+; MASKED-NEXT: entry:
+; MASKED-NEXT: br label [[WHILE_COND:%.*]]
+; MASKED: while.cond:
+; MASKED-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
+; MASKED-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
+; MASKED-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; MASKED-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END:%.*]], label [[WHILE_BODY]]
+; MASKED: while.body:
+; MASKED-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
+; MASKED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; MASKED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
+; MASKED-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; MASKED-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
+; MASKED-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; MASKED: while.end:
+; MASKED-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ]
+; MASKED-NEXT: ret i32 [[INC_LCSSA]]
+;
+entry:
+ br label %while.cond
+
+while.cond:
+ %len.addr = phi i32 [ %len, %entry ], [ %inc, %while.body ]
+ %inc = add i32 %len.addr, 1
+ %cmp.not = icmp eq i32 %inc, %n
+ br i1 %cmp.not, label %while.end, label %while.body
+
+while.body:
+ %idxprom = zext i32 %inc to i64
+ %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom
+ %0 = load i8, ptr %arrayidx
+ %arrayidx2 = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ %1 = load i8, ptr %arrayidx2
+ %cmp.not2 = icmp eq i8 %0, %1
+ br i1 %cmp.not2, label %while.cond, label %while.end
+
+while.end:
+ %inc.lcssa = phi i32 [ %inc, %while.body ], [ %inc, %while.cond ]
+ ret i32 %inc.lcssa
+}
>From 2360c410fa501cb0a1606fc279e61abcda54326c Mon Sep 17 00:00:00 2001
From: Min Hsu <min.hsu at sifive.com>
Date: Tue, 2 Jul 2024 19:18:59 -0700
Subject: [PATCH 063/246] [RISCV] Add the missing dependency on Vectorize
Caused by #94082.
---
llvm/lib/Target/RISCV/CMakeLists.txt | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index 8715403f3839a..f24f3ba4e964d 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -80,6 +80,7 @@ add_llvm_target(RISCVCodeGen
Target
TargetParser
TransformUtils
+ Vectorize
ADD_TO_COMPONENT
RISCV
>From 3641efcf8cb256ddbd20f4add5ce55800cad5399 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Tue, 2 Jul 2024 19:24:53 -0700
Subject: [PATCH 064/246] [CodeGen] Use range-based for loops (NFC) (#97500)
---
.../CodeGen/AssignmentTrackingAnalysis.cpp | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
index cbec482d4cdc5..146276b4fd0bb 100644
--- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
+++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
@@ -572,11 +572,10 @@ class MemLocFragmentFill {
bool FirstMeet = true;
// LiveIn locs for BB is the meet of the already-processed preds' LiveOut
// locs.
- for (auto I = pred_begin(&BB), E = pred_end(&BB); I != E; I++) {
+ for (const BasicBlock *Pred : predecessors(&BB)) {
// Ignore preds that haven't been processed yet. This is essentially the
// same as initialising all variables to implicit top value (⊤) which is
// the identity value for the meet operation.
- const BasicBlock *Pred = *I;
if (!Visited.count(Pred))
continue;
@@ -941,10 +940,10 @@ class MemLocFragmentFill {
LLVM_DEBUG(dbgs() << BB->getName()
<< " has new OutLocs, add succs to worklist: [ ");
LiveOut[BB] = std::move(LiveSet);
- for (auto I = succ_begin(BB), E = succ_end(BB); I != E; I++) {
- if (OnPending.insert(*I).second) {
- LLVM_DEBUG(dbgs() << I->getName() << " ");
- Pending.push(BBToOrder[*I]);
+ for (BasicBlock *Succ : successors(BB)) {
+ if (OnPending.insert(Succ).second) {
+ LLVM_DEBUG(dbgs() << Succ->getName() << " ");
+ Pending.push(BBToOrder[Succ]);
}
}
LLVM_DEBUG(dbgs() << "]\n");
@@ -2360,10 +2359,10 @@ bool AssignmentTrackingLowering::run(FunctionVarLocsBuilder *FnVarLocsBuilder) {
LLVM_DEBUG(dbgs() << BB->getName()
<< " has new OutLocs, add succs to worklist: [ ");
LiveOut[BB] = std::move(LiveSet);
- for (auto I = succ_begin(BB), E = succ_end(BB); I != E; I++) {
- if (OnPending.insert(*I).second) {
- LLVM_DEBUG(dbgs() << I->getName() << " ");
- Pending.push(BBToOrder[*I]);
+ for (BasicBlock *Succ : successors(BB)) {
+ if (OnPending.insert(Succ).second) {
+ LLVM_DEBUG(dbgs() << Succ->getName() << " ");
+ Pending.push(BBToOrder[Succ]);
}
}
LLVM_DEBUG(dbgs() << "]\n");
>From 0f9fbbb63cfcd2069441aa2ebef622c9716f8dbb Mon Sep 17 00:00:00 2001
From: Hongyu Chen <hongyuchy at google.com>
Date: Tue, 2 Jul 2024 20:32:45 -0700
Subject: [PATCH 065/246] [lld][ELF] Support LLVM repository and LLVM revision
information (#97323)
Added LLVM repository and LLVM revision information for
`lld::getLLDVersion()`
Before this change:
```
hongyuchy at hongyuchy:~/llvm-project/.build_lld_version$ bin/ld.lld --version
LLD 19.0.0 (compatible with GNU linkers)
```
After this change with LLVM_APPEND_VC_REV=on
```
hongyuchy at hongyuchy:~/llvm-project/.build_lld_version$ bin/ld.lld --version
LLD 19.0.0 (https://github.com/yugier/llvm-project.git 4134b33c6a362cb462b335177d6d9e8235f04309), compatible with GNU linkers
```
with LLVM_APPEND_VC_REV=off
```
hongyuchy at hongyuchy:~/llvm-project/.build_lld_version$ bin/ld.lld --version
LLD 19.0.0, compatible with GNU linkers
```
---
lld/Common/Version.cpp | 7 ++++++-
lld/ELF/Driver.cpp | 2 +-
lld/MinGW/Driver.cpp | 2 +-
lld/test/ELF/version.test | 2 +-
lld/test/MinGW/driver.test | 2 +-
5 files changed, 10 insertions(+), 5 deletions(-)
diff --git a/lld/Common/Version.cpp b/lld/Common/Version.cpp
index ec6eda6a6748f..78f7c6b69b505 100644
--- a/lld/Common/Version.cpp
+++ b/lld/Common/Version.cpp
@@ -11,8 +11,8 @@
//===----------------------------------------------------------------------===//
#include "lld/Common/Version.h"
-
#include "VCSVersion.inc"
+#include "llvm/Support/VCSRevision.h"
// Returns a version string, e.g.:
// LLD 14.0.0 (https://github.com/llvm/llvm-project.git
@@ -23,6 +23,11 @@ std::string lld::getLLDVersion() {
#else
#define LLD_VENDOR_DISPLAY
#endif
+#if defined(LLVM_REPOSITORY) || defined(LLVM_REVISION)
+ return LLD_VENDOR_DISPLAY "LLD " LLD_VERSION_STRING " (" LLVM_REPOSITORY
+ " " LLVM_REVISION ")";
+#else
return LLD_VENDOR_DISPLAY "LLD " LLD_VERSION_STRING;
+#endif
#undef LLD_VENDOR_DISPLAY
}
diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index a4863d6717efb..7800c2919a2bd 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -631,7 +631,7 @@ void LinkerDriver::linkerMain(ArrayRef<const char *> argsArr) {
// of Libtool. We cannot convince every software developer to migrate to
// the latest version and re-generate scripts. So we have this hack.
if (args.hasArg(OPT_v) || args.hasArg(OPT_version))
- message(getLLDVersion() + " (compatible with GNU linkers)");
+ message(getLLDVersion() + ", compatible with GNU linkers");
if (const char *path = getReproduceOption(args)) {
// Note that --reproduce is a debug option so you can ignore it
diff --git a/lld/MinGW/Driver.cpp b/lld/MinGW/Driver.cpp
index 35fd478a21905..1fd120ad3601d 100644
--- a/lld/MinGW/Driver.cpp
+++ b/lld/MinGW/Driver.cpp
@@ -199,7 +199,7 @@ bool link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS,
// a GNU compatible linker. As long as an output for the -v option
// contains "GNU" or "with BFD", they recognize us as GNU-compatible.
if (args.hasArg(OPT_v) || args.hasArg(OPT_version))
- message(getLLDVersion() + " (compatible with GNU linkers)");
+ message(getLLDVersion() + ", compatible with GNU linkers");
// The behavior of -v or --version is a bit strange, but this is
// needed for compatibility with GNU linkers.
diff --git a/lld/test/ELF/version.test b/lld/test/ELF/version.test
index cdeeb4795e185..383c1ac976d96 100644
--- a/lld/test/ELF/version.test
+++ b/lld/test/ELF/version.test
@@ -7,4 +7,4 @@
# RUN: ld.lld -V 2>&1 | FileCheck %s
# RUN: not ld.lld -V %t/not-exist 2>&1 | FileCheck %s
-# CHECK: LLD {{.*}} (compatible with GNU linkers)
+# CHECK: LLD {{.*}}, compatible with GNU linkers
diff --git a/lld/test/MinGW/driver.test b/lld/test/MinGW/driver.test
index b723c0ad98749..44ec58818e0bf 100644
--- a/lld/test/MinGW/driver.test
+++ b/lld/test/MinGW/driver.test
@@ -268,7 +268,7 @@ APPCONTAINER: -appcontainer
RUN: ld.lld -m i386pep --version 2>&1 | FileCheck -check-prefix=VERSION %s
RUN: ld.lld -m i386pep -v 2>&1 | FileCheck -check-prefix=VERSION %s
RUN: not ld.lld -m i386pep -v xyz 2>&1 | FileCheck -check-prefix=VERSION %s
-VERSION: LLD {{.*}} (compatible with GNU linkers)
+VERSION: LLD {{.*}}, compatible with GNU linkers
RUN: ld.lld -m i386pep --help 2>&1 | FileCheck -check-prefix=HELP %s
HELP: USAGE:
>From e860c166556105c6f9275e130a0c27ae117a5f12 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Wed, 3 Jul 2024 11:58:53 +0800
Subject: [PATCH 066/246] [Docs][RISCV] Document RISC-V vector codegen (#96740)
This is a revival of https://reviews.llvm.org/D142348, and attempts to
document how RVV semantics can be expressed in LLVM IR as well as how
codegen works in the backend.
Parts of this are taken from the original RFC
https://lists.llvm.org/pipermail/llvm-dev/2020-October/145850.html, but
I've largely rewritten this from the original differential revision to
exclude explaining the specification itself and instead just focus on
the LLVM specific bits. (I figured that there's better material
available elsewhere for learning about RVV itself)
I've also updated it to include as much as I know about fixed vector
codegen as well as the recent changes to vsetvli insertion.
---
llvm/docs/RISCV/RISCVVectorExtension.rst | 334 +++++++++++++++++++++++
llvm/docs/UserGuides.rst | 4 +
2 files changed, 338 insertions(+)
create mode 100644 llvm/docs/RISCV/RISCVVectorExtension.rst
diff --git a/llvm/docs/RISCV/RISCVVectorExtension.rst b/llvm/docs/RISCV/RISCVVectorExtension.rst
new file mode 100644
index 0000000000000..39836a4b1ab9c
--- /dev/null
+++ b/llvm/docs/RISCV/RISCVVectorExtension.rst
@@ -0,0 +1,334 @@
+=========================
+ RISC-V Vector Extension
+=========================
+
+.. contents::
+ :local:
+
+The RISC-V target supports the 1.0 version of the `RISC-V Vector Extension (RVV) <https://github.com/riscv/riscv-v-spec/blob/v1.0/v-spec.adoc>`_.
+This guide gives an overview of how it's modelled in LLVM IR and how the backend generates code for it.
+
+Mapping to LLVM IR types
+========================
+
+RVV adds 32 VLEN sized registers, where VLEN is an unknown constant to the compiler. To be able to represent VLEN sized values, the RISC-V backend takes the same approach as AArch64's SVE and uses `scalable vector types <https://llvm.org/docs/LangRef.html#t-vector>`_.
+
+Scalable vector types are of the form ``<vscale x n x ty>``, which indicates a vector with a multiple of ``n`` elements of type ``ty``.
+On RISC-V ``n`` and ``ty`` control LMUL and SEW respectively.
+
+LLVM only supports ELEN=32 or ELEN=64, so ``vscale`` is defined as VLEN/64 (see ``RISCV::RVVBitsPerBlock``).
+Note this means that VLEN must be at least 64, so VLEN=32 isn't currently supported.
+
++-------------------+---------------+----------------+------------------+-------------------+-------------------+-------------------+-------------------+
+| | LMUL=⅛ | LMUL=¼ | LMUL=½ | LMUL=1 | LMUL=2 | LMUL=4 | LMUL=8 |
++===================+===============+================+==================+===================+===================+===================+===================+
+| i64 (ELEN=64) | N/A | N/A | N/A | <v x 1 x i64> | <v x 2 x i64> | <v x 4 x i64> | <v x 8 x i64> |
++-------------------+---------------+----------------+------------------+-------------------+-------------------+-------------------+-------------------+
+| i32 | N/A | N/A | <v x 1 x i32> | <v x 2 x i32> | <v x 4 x i32> | <v x 8 x i32> | <v x 16 x i32> |
++-------------------+---------------+----------------+------------------+-------------------+-------------------+-------------------+-------------------+
+| i16 | N/A | <v x 1 x i16> | <v x 2 x i16> | <v x 4 x i16> | <v x 8 x i16> | <v x 16 x i16> | <v x 32 x i16> |
++-------------------+---------------+----------------+------------------+-------------------+-------------------+-------------------+-------------------+
+| i8 | <v x 1 x i8> | <v x 2 x i8> | <v x 4 x i8> | <v x 8 x i8> | <v x 16 x i8> | <v x 32 x i8> | <v x 64 x i8> |
++-------------------+---------------+----------------+------------------+-------------------+-------------------+-------------------+-------------------+
+| double (ELEN=64) | N/A | N/A | N/A | <v x 1 x double> | <v x 2 x double> | <v x 4 x double> | <v x 8 x double> |
++-------------------+---------------+----------------+------------------+-------------------+-------------------+-------------------+-------------------+
+| float | N/A | N/A | <v x 1 x float> | <v x 2 x float> | <v x 4 x float> | <v x 8 x float> | <v x 16 x float> |
++-------------------+---------------+----------------+------------------+-------------------+-------------------+-------------------+-------------------+
+| half | N/A | <v x 1 x half> | <v x 2 x half> | <v x 4 x half> | <v x 8 x half> | <v x 16 x half> | <v x 32 x half> |
++-------------------+---------------+----------------+------------------+-------------------+-------------------+-------------------+-------------------+
+
+(Read ``<v x k x ty>`` as ``<vscale x k x ty>``)
+
+
+Mask vector types
+-----------------
+
+Mask vectors are physically represented using a layout of densely packed bits in a vector register.
+They are mapped to the following LLVM IR types:
+
+- ``<vscale x 1 x i1>``
+- ``<vscale x 2 x i1>``
+- ``<vscale x 4 x i1>``
+- ``<vscale x 8 x i1>``
+- ``<vscale x 16 x i1>``
+- ``<vscale x 32 x i1>``
+- ``<vscale x 64 x i1>``
+
+Two types with the same SEW/LMUL ratio will have the same related mask type.
+For instance, two different comparisons one under SEW=64, LMUL=2 and the other under SEW=32, LMUL=1 will both generate a mask ``<vscale x 2 x i1>``.
+
+Representation in LLVM IR
+=========================
+
+Vector instructions can be represented in three main ways in LLVM IR:
+
+1. Regular instructions on both scalable and fixed-length vector types
+
+ .. code-block:: llvm
+
+ %c = add <vscale x 4 x i32> %a, %b
+ %f = add <4 x i32> %d, %e
+
+2. RISC-V vector intrinsics, which mirror the `C intrinsics specification <https://github.com/riscv-non-isa/rvv-intrinsic-doc>`_
+
+ These come in unmasked variants:
+
+ .. code-block:: llvm
+
+ %c = call @llvm.riscv.vadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %passthru,
+ <vscale x 4 x i32> %a,
+ <vscale x 4 x i32> %b,
+ i64 %avl
+ )
+
+ As well as masked variants:
+
+ .. code-block:: llvm
+
+ %c = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %passthru,
+ <vscale x 4 x i32> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i1> %mask,
+ i64 %avl,
+ i64 0 ; policy (must be an immediate)
+ )
+
+ Both allow setting the AVL as well as controlling the inactive/tail elements via the passthru operand, but the masked variant also provides operands for the mask and ``vta``/``vma`` policy bits.
+
+ The only valid types are scalable vector types.
+
+3. :ref:`Vector predication (VP) intrinsics <int_vp>`
+
+ .. code-block:: llvm
+
+ %c = call @llvm.vp.add.nxv4i32(
+ <vscale x 4 x i32> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i1> %m
+ i32 %evl
+ )
+
+ Unlike RISC-V intrinsics, VP intrinsics are target agnostic so they can be emitted from other optimisation passes in the middle-end (like the loop vectorizer). They also support fixed-length vector types.
+
+ VP intrinsics also don't have passthru operands, but tail/mask undisturbed behaviour can be emulated by using the output in a ``@llvm.vp.merge``.
+ It will get lowered as a ``vmerge``, but will be merged back into the underlying instruction's mask via ``RISCVDAGToDAGISel::performCombineVMergeAndVOps``.
+
+
+The different properties of the above representations are summarized below:
+
++----------------------+--------------+-----------------+----------+------------------+----------------------+-----------------+
+| | AVL | Masking | Passthru | Scalable vectors | Fixed-length vectors | Target agnostic |
++======================+==============+=================+==========+==================+======================+=================+
+| LLVM IR instructions | Always VLMAX | No | None | Yes | Yes | Yes |
++----------------------+--------------+-----------------+----------+------------------+----------------------+-----------------+
+| RVV intrinsics | Yes | Yes | Yes | Yes | No | No |
++----------------------+--------------+-----------------+----------+------------------+----------------------+-----------------+
+| VP intrinsics | Yes (EVL) | Yes | No | Yes | Yes | Yes |
++----------------------+--------------+-----------------+----------+------------------+----------------------+-----------------+
+
+SelectionDAG lowering
+=====================
+
+For most regular **scalable** vector LLVM IR instructions, their corresponding SelectionDAG nodes are legal on RISC-V and don't require any custom lowering.
+
+.. code-block::
+
+ t5: nxv4i32 = add t2, t4
+
+RISC-V vector intrinsics also don't require any custom lowering.
+
+.. code-block::
+
+ t12: nxv4i32 = llvm.riscv.vadd TargetConstant:i64<10056>, undef:nxv4i32, t2, t4, t6
+
+Fixed-length vectors
+--------------------
+
+Because there are no fixed-length vector patterns, fixed-length vectors need to be custom lowered and performed in a scalable "container" type:
+
+1. The fixed-length vector operands are inserted into scalable containers with ``insert_subvector`` nodes. The container type is chosen such that its minimum size will fit the fixed-length vector (see ``getContainerForFixedLengthVector``).
+2. The operation is then performed on the container type via a **VL (vector length) node**. These are custom nodes defined in ``RISCVInstrInfoVVLPatterns.td`` that mirror target agnostic SelectionDAG nodes, as well as some RVV instructions. They contain an AVL operand, which is set to the number of elements in the fixed-length vector.
+ Some nodes also have a passthru or mask operand, which will usually be set to ``undef`` and all ones when lowering fixed-length vectors.
+3. The result is put back into a fixed-length vector via ``extract_subvector``.
+
+.. code-block::
+
+ t2: nxv2i32,ch = CopyFromReg t0, Register:nxv2i32 %0
+ t6: nxv2i32,ch = CopyFromReg t0, Register:nxv2i32 %1
+ t4: v4i32 = extract_subvector t2, Constant:i64<0>
+ t7: v4i32 = extract_subvector t6, Constant:i64<0>
+ t8: v4i32 = add t4, t7
+
+ // is custom lowered to:
+
+ t2: nxv2i32,ch = CopyFromReg t0, Register:nxv2i32 %0
+ t6: nxv2i32,ch = CopyFromReg t0, Register:nxv2i32 %1
+ t15: nxv2i1 = RISCVISD::VMSET_VL Constant:i64<4>
+ t16: nxv2i32 = RISCVISD::ADD_VL t2, t6, undef:nxv2i32, t15, Constant:i64<4>
+ t17: v4i32 = extract_subvector t16, Constant:i64<0>
+
+VL nodes often have a passthru or mask operand, which are usually set to ``undef`` and all ones for fixed-length vectors.
+
+The ``insert_subvector`` and ``extract_subvector`` nodes responsible for wrapping and unwrapping will get combined away, and eventually we will lower all fixed-length vector types to scalable. Note that fixed-length vectors at the interface of a function are passed in a scalable vector container.
+
+.. note::
+
+ The only ``insert_subvector`` and ``extract_subvector`` nodes that make it through lowering are those that can be performed as an exact subregister insert or extract. This means that any fixed-length vector ``insert_subvector`` and ``extract_subvector`` nodes that aren't legalized must lie on a register group boundary, so the exact VLEN must be known at compile time (i.e., compiled with ``-mrvv-vector-bits=zvl`` or ``-mllvm -riscv-v-vector-bits-max=VLEN``, or have an exact ``vscale_range`` attribute).
+
+Vector predication intrinsics
+-----------------------------
+
+VP intrinsics also get custom lowered via VL nodes.
+
+.. code-block::
+
+ t12: nxv2i32 = vp_add t2, t4, t6, Constant:i64<8>
+
+ // is custom lowered to:
+
+ t18: nxv2i32 = RISCVISD::ADD_VL t2, t4, undef:nxv2i32, t6, Constant:i64<8>
+
+The VP EVL and mask are used for the VL node's AVL and mask respectively, whilst the passthru is set to ``undef``.
+
+Instruction selection
+=====================
+
+``vl`` and ``vtype`` need to be configured correctly, so we can't just directly select the underlying vector ``MachineInstr``. Instead pseudo instructions are selected, which carry the extra information needed to emit the necessary ``vsetvli``\s later.
+
+.. code-block::
+
+ %c:vrm2 = PseudoVADD_VV_M2 %passthru:vrm2(tied-def 0), %a:vrm2, %b:vrm2, %vl:gpr, 5 /*sew*/, 3 /*policy*/
+
+Each vector instruction has multiple pseudo instructions defined in ``RISCVInstrInfoVPseudos.td``.
+There is a variant of each pseudo for each possible LMUL, as well as a masked variant. So a typical instruction like ``vadd.vv`` would have the following pseudos:
+
+.. code-block::
+
+ %rd:vr = PseudoVADD_VV_MF8 %passthru:vr(tied-def 0), %rs2:vr, %rs1:vr, %avl:gpr, sew:imm, policy:imm
+ %rd:vr = PseudoVADD_VV_MF4 %passthru:vr(tied-def 0), %rs2:vr, %rs1:vr, %avl:gpr, sew:imm, policy:imm
+ %rd:vr = PseudoVADD_VV_MF2 %passthru:vr(tied-def 0), %rs2:vr, %rs1:vr, %avl:gpr, sew:imm, policy:imm
+ %rd:vr = PseudoVADD_VV_M1 %passthru:vr(tied-def 0), %rs2:vr, %rs1:vr, %avl:gpr, sew:imm, policy:imm
+ %rd:vrm2 = PseudoVADD_VV_M2 %passthru:vrm2(tied-def 0), %rs2:vrm2, %rs1:vrm2, %avl:gpr, sew:imm, policy:imm
+ %rd:vrm4 = PseudoVADD_VV_M4 %passthru:vrm4(tied-def 0), %rs2:vrm4, %rs1:vrm4, %avl:gpr, sew:imm, policy:imm
+ %rd:vrm8 = PseudoVADD_VV_M8 %passthru:vrm8(tied-def 0), %rs2:vrm8, %rs1:vrm8, %avl:gpr, sew:imm, policy:imm
+ %rd:vr = PseudoVADD_VV_MF8_MASK %passthru:vr(tied-def 0), %rs2:vr, %rs1:vr, mask:$v0, %avl:gpr, sew:imm, policy:imm
+ %rd:vr = PseudoVADD_VV_MF4_MASK %passthru:vr(tied-def 0), %rs2:vr, %rs1:vr, mask:$v0, %avl:gpr, sew:imm, policy:imm
+ %rd:vr = PseudoVADD_VV_MF2_MASK %passthru:vr(tied-def 0), %rs2:vr, %rs1:vr, mask:$v0, %avl:gpr, sew:imm, policy:imm
+ %rd:vr = PseudoVADD_VV_M1_MASK %passthru:vr(tied-def 0), %rs2:vr, %rs1:vr, mask:$v0, %avl:gpr, sew:imm, policy:imm
+ %rd:vrm2 = PseudoVADD_VV_M2_MASK %passthru:vrm2(tied-def 0), %rs2:vrm2, %%rs1:vrm2, mask:$v0, %avl:gpr, sew:imm, policy:imm
+ %rd:vrm4 = PseudoVADD_VV_M4_MASK %passthru:vrm4(tied-def 0), %rs2:vrm4, %rs1:vrm4, mask:$v0, %avl:gpr, sew:imm, policy:imm
+ %rd:vrm8 = PseudoVADD_VV_M8_MASK %passthru:vrm8(tied-def 0), %rs2:vrm8, %rs1:vrm8, mask:$v0, %avl:gpr, sew:imm, policy:imm
+
+.. note::
+
+ Whilst the SEW can be encoded in an operand, we need to use separate pseudos for each LMUL since different register groups will require different register classes: see :ref:`rvv_register_allocation`.
+
+
+Pseudos have operands for the AVL and SEW (encoded as a power of 2), as well as potentially the mask, policy or rounding mode if applicable.
+The passthru operand is tied to the destination register which will determine the inactive/tail elements.
+
+For scalable vectors that should use VLMAX, the AVL is set to a sentinel value of ``-1``.
+
+There are patterns for target agnostic SelectionDAG nodes in ``RISCVInstrInfoVSDPatterns.td``, VL nodes in ``RISCVInstrInfoVVLPatterns.td`` and RVV intrinsics in ``RISCVInstrInfoVPseudos.td``.
+
+Mask patterns
+-------------
+
+For masked pseudos the mask operand is copied to the physical ``$v0`` register during instruction selection with a glued ``CopyToReg`` node:
+
+.. code-block::
+
+ t23: ch,glue = CopyToReg t0, Register:nxv4i1 $v0, t6
+ t25: nxv4i32 = PseudoVADD_VV_M2_MASK Register:nxv4i32 $noreg, t2, t4, Register:nxv4i1 $v0, TargetConstant:i64<8>, TargetConstant:i64<5>, TargetConstant:i64<1>, t23:1
+
+The patterns in ``RISCVInstrInfoVVLPatterns.td`` only match masked pseudos to reduce the size of the match table, even if the node's mask is all ones and could be an unmasked pseudo.
+``RISCVFoldMasks::convertToUnmasked`` will detect if the mask is all ones and convert it into its unmasked form.
+
+.. code-block::
+
+ $v0 = PseudoVMSET_M_B16 -1, 32
+ %rd:vrm2 = PseudoVADD_VV_M2_MASK %passthru:vrm2(tied-def 0), %rs2:vrm2, %rs1:vrm2, $v0, %avl:gpr, sew:imm, policy:imm
+
+ // gets optimized to:
+
+ %rd:vrm2 = PseudoVADD_VV_M2 %passthru:vrm2(tied-def 0), %rs2:vrm2, %rs1:vrm2, %avl:gpr, sew:imm, policy:imm
+
+.. note::
+
+ Any ``vmset.m`` can be treated as an all ones mask since the tail elements past AVL are ``undef`` and can be replaced with ones.
+
+.. _rvv_register_allocation:
+
+Register allocation
+===================
+
+Register allocation is split between vector and scalar registers, with vector allocation running first:
+
+.. code-block::
+
+ $v8m2 = PseudoVADD_VV_M2 $v8m2(tied-def 0), $v8m2, $v10m2, %vl:gpr, 5, 3
+
+.. note::
+
+ Register allocation is split so that :ref:`RISCVInsertVSETVLI` can run after vector register allocation, but before scalar register allocation. It needs to be run before scalar register allocation as it may need to create a new virtual register to set the AVL to VLMAX.
+
+ Performing ``RISCVInsertVSETVLI`` after vector register allocation imposes fewer constraints on the machine scheduler since it cannot schedule instructions past ``vsetvli``\s, and it allows us to emit further vector pseudos during spilling or constant rematerialization.
+
+There are four register classes for vectors:
+
+- ``VR`` for vector registers (``v0``, ``v1,``, ..., ``v32``). Used when :math:`\text{LMUL} \leq 1` and mask registers.
+- ``VRM2`` for vector groups of length 2 i.e., :math:`\text{LMUL}=2` (``v0m2``, ``v2m2``, ..., ``v30m2``)
+- ``VRM4`` for vector groups of length 4 i.e., :math:`\text{LMUL}=4` (``v0m4``, ``v4m4``, ..., ``v28m4``)
+- ``VRM8`` for vector groups of length 8 i.e., :math:`\text{LMUL}=8` (``v0m8``, ``v8m8``, ..., ``v24m8``)
+
+:math:`\text{LMUL} \lt 1` types and mask types do not benefit from having a dedicated class, so ``VR`` is used in their case.
+
+Some instructions have a constraint that a register operand cannot be ``V0`` or overlap with ``V0``, so for these cases we also have ``VRNoV0`` variants.
+
+.. _RISCVInsertVSETVLI:
+
+RISCVInsertVSETVLI
+==================
+
+After vector registers are allocated, the ``RISCVInsertVSETVLI`` pass will insert the necessary ``vsetvli``\s for the pseudos.
+
+.. code-block::
+
+ dead $x0 = PseudoVSETVLI %vl:gpr, 209, implicit-def $vl, implicit-def $vtype
+ $v8m2 = PseudoVADD_VV_M2 $v8m2(tied-def 0), $v8m2, $v10m2, $noreg, 5, implicit $vl, implicit $vtype
+
+The physical ``$vl`` and ``$vtype`` registers are implicitly defined by the ``PseudoVSETVLI``, and are implicitly used by the ``PseudoVADD``.
+The ``vtype`` operand (``209`` in this example) is encoded as per the specification via ``RISCVVType::encodeVTYPE``.
+
+``RISCVInsertVSETVLI`` performs dataflow analysis to emit as few ``vsetvli``\s as possible. It will also try to minimize the number of ``vsetvli``\s that set VL, i.e., it will emit ``vsetvli x0, x0`` if only ``vtype`` needs changed but ``vl`` doesn't.
+
+Pseudo expansion and printing
+=============================
+
+After scalar register allocation, the ``RISCVExpandPseudoInsts.cpp`` pass expands the ``PseudoVSETVLI`` instructions.
+
+.. code-block::
+
+ dead $x0 = VSETVLI $x1, 209, implicit-def $vtype, implicit-def $vl
+ renamable $v8m2 = PseudoVADD_VV_M2 $v8m2(tied-def 0), $v8m2, $v10m2, $noreg, 5, implicit $vl, implicit $vtype
+
+Note that the vector pseudo remains as it's needed to encode the register class for the LMUL. Its AVL and SEW operands are no longer used.
+
+``RISCVAsmPrinter`` will then lower the pseudo instructions into real ``MCInst``\s.
+
+.. code-block:: nasm
+
+ vsetvli a0, zero, e32, m2, ta, ma
+ vadd.vv v8, v8, v10
+
+
+
+See also
+========
+
+- `[llvm-dev] [RFC] Code generation for RISC-V V-extension <https://lists.llvm.org/pipermail/llvm-dev/2020-October/145850.html>`_
+- `2023 LLVM Dev Mtg - Vector codegen in the RISC-V backend <https://youtu.be/-ox8iJmbp0c?feature=shared>`_
+- `2023 LLVM Dev Mtg - How to add an C intrinsic and code-gen it, using the RISC-V vector C intrinsics <https://youtu.be/t17O_bU1jks?feature=shared>`_
+- `2021 LLVM Dev Mtg “Optimizing code for scalable vector architectures” <https://youtu.be/daWLCyhwrZ8?feature=shared>`_
diff --git a/llvm/docs/UserGuides.rst b/llvm/docs/UserGuides.rst
index 18d273a51daf6..bf7cdda89a009 100644
--- a/llvm/docs/UserGuides.rst
+++ b/llvm/docs/UserGuides.rst
@@ -64,6 +64,7 @@ intermediate LLVM representation.
Remarks
RemoveDIsDebugInfo
RISCVUsage
+ RISCV/RISCVVectorExtension
SourceLevelDebugging
SPIRVUsage
StackSafetyAnalysis
@@ -284,3 +285,6 @@ Additional Topics
:doc:`RISCVUsage`
This document describes using the RISCV-V target.
+
+:doc:`RISCV/RISCVVectorExtension`
+ This document describes how the RISC-V Vector extension can be expressed in LLVM IR and how code is generated for it in the backend.
>From 6a992bc89f5ca25d132abd044d78ecf27ae6e162 Mon Sep 17 00:00:00 2001
From: Chen Zheng <czhengsz at cn.ibm.com>
Date: Wed, 3 Jul 2024 00:19:18 -0400
Subject: [PATCH 067/246] [PowerPC] refactor CPU info in PPCTargetParser.def,
NFC
CPU features will be done in follow up patches.
---
clang/lib/Basic/Targets/PPC.cpp | 23 +--
clang/lib/CodeGen/CGBuiltin.cpp | 49 ++---
.../llvm/TargetParser/PPCTargetParser.def | 171 +++++++++---------
llvm/lib/Target/PowerPC/PPCInstrInfo.cpp | 2 +-
llvm/tools/clang | 1 +
5 files changed, 126 insertions(+), 120 deletions(-)
create mode 120000 llvm/tools/clang
diff --git a/clang/lib/Basic/Targets/PPC.cpp b/clang/lib/Basic/Targets/PPC.cpp
index a1e5f20f7dbe2..89c5566f7ad09 100644
--- a/clang/lib/Basic/Targets/PPC.cpp
+++ b/clang/lib/Basic/Targets/PPC.cpp
@@ -928,17 +928,18 @@ bool PPCTargetInfo::validateCpuSupports(StringRef FeatureStr) const {
bool PPCTargetInfo::validateCpuIs(StringRef CPUName) const {
llvm::Triple Triple = getTriple();
- if (Triple.isOSAIX()) {
-#define PPC_AIX_CPU(NAME, SUPPORT_METHOD, INDEX, OP, VALUE) .Case(NAME, true)
- return llvm::StringSwitch<bool>(CPUName)
-#include "llvm/TargetParser/PPCTargetParser.def"
- .Default(false);
- }
-
- assert(Triple.isOSLinux() &&
+ assert((Triple.isOSAIX() || Triple.isOSLinux()) &&
"__builtin_cpu_is() is only supported for AIX and Linux.");
-#define PPC_LNX_CPU(NAME, NUM) .Case(NAME, true)
- return llvm::StringSwitch<bool>(CPUName)
+
+#define PPC_CPU(NAME, Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, \
+ AIXID) \
+ .Case(NAME, {Linux_SUPPORT_METHOD, AIX_SUPPORT_METHOD})
+
+ std::pair<unsigned, unsigned> SuppportMethod =
+ llvm::StringSwitch<std::pair<unsigned, unsigned>>(CPUName)
#include "llvm/TargetParser/PPCTargetParser.def"
- .Default(false);
+ .Default({BUILTIN_PPC_UNSUPPORTED, BUILTIN_PPC_UNSUPPORTED});
+ return Triple.isOSLinux()
+ ? (SuppportMethod.first != BUILTIN_PPC_UNSUPPORTED)
+ : (SuppportMethod.second != BUILTIN_PPC_UNSUPPORTED);
}
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index ed37267efe715..5b92f1837980c 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -16748,10 +16748,10 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
auto GenAIXPPCBuiltinCpuExpr = [&](unsigned SupportMethod, unsigned FieldIdx,
unsigned Mask, CmpInst::Predicate CompOp,
unsigned OpValue) -> Value * {
- if (SupportMethod == AIX_BUILTIN_PPC_FALSE)
+ if (SupportMethod == BUILTIN_PPC_FALSE)
return llvm::ConstantInt::getFalse(ConvertType(E->getType()));
- if (SupportMethod == AIX_BUILTIN_PPC_TRUE)
+ if (SupportMethod == BUILTIN_PPC_TRUE)
return llvm::ConstantInt::getTrue(ConvertType(E->getType()));
assert(SupportMethod <= SYS_CALL && "Invalid value for SupportMethod.");
@@ -16803,34 +16803,39 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
llvm::Triple Triple = getTarget().getTriple();
- if (Triple.isOSAIX()) {
- unsigned SupportMethod, FieldIdx, CpuIdValue;
- CmpInst::Predicate CompareOp;
- typedef std::tuple<unsigned, unsigned, CmpInst::Predicate, unsigned>
- CPUType;
- std::tie(SupportMethod, FieldIdx, CompareOp, CpuIdValue) =
- static_cast<CPUType>(StringSwitch<CPUType>(CPUStr)
-#define PPC_AIX_CPU(NAME, SUPPORT_METHOD, INDEX, COMPARE_OP, VALUE) \
- .Case(NAME, {SUPPORT_METHOD, INDEX, COMPARE_OP, VALUE})
+ unsigned LinuxSupportMethod, LinuxIDValue, AIXSupportMethod, AIXIDValue;
+ typedef std::tuple<unsigned, unsigned, unsigned, unsigned> CPUInfo;
+
+ std::tie(LinuxSupportMethod, LinuxIDValue, AIXSupportMethod, AIXIDValue) =
+ static_cast<CPUInfo>(StringSwitch<CPUInfo>(CPUStr)
+#define PPC_CPU(NAME, Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, \
+ AIXID) \
+ .Case(NAME, {Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, AIXID})
#include "llvm/TargetParser/PPCTargetParser.def"
- .Default({AIX_BUILTIN_PPC_FALSE, 0,
- CmpInst::Predicate(), 0}));
- return GenAIXPPCBuiltinCpuExpr(SupportMethod, FieldIdx, 0, CompareOp,
- CpuIdValue);
+ .Default({BUILTIN_PPC_UNSUPPORTED, 0,
+ BUILTIN_PPC_UNSUPPORTED, 0}));
+
+ if (Triple.isOSAIX()) {
+ assert((AIXSupportMethod != BUILTIN_PPC_UNSUPPORTED) &&
+ "Invalid CPU name. Missed by SemaChecking?");
+ return GenAIXPPCBuiltinCpuExpr(AIXSupportMethod, AIX_SYSCON_IMPL_IDX, 0,
+ ICmpInst::ICMP_EQ, AIXIDValue);
}
assert(Triple.isOSLinux() &&
"__builtin_cpu_is() is only supported for AIX and Linux.");
- unsigned NumCPUID = StringSwitch<unsigned>(CPUStr)
-#define PPC_LNX_CPU(Name, NumericID) .Case(Name, NumericID)
-#include "llvm/TargetParser/PPCTargetParser.def"
- .Default(-1U);
- assert(NumCPUID < -1U && "Invalid CPU name. Missed by SemaChecking?");
+
+ assert((LinuxSupportMethod != BUILTIN_PPC_UNSUPPORTED) &&
+ "Invalid CPU name. Missed by SemaChecking?");
+
+ if (LinuxSupportMethod == BUILTIN_PPC_FALSE)
+ return llvm::ConstantInt::getFalse(ConvertType(E->getType()));
+
Value *Op0 = llvm::ConstantInt::get(Int32Ty, PPC_FAWORD_CPUID);
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_fixed_addr_ld);
Value *TheCall = Builder.CreateCall(F, {Op0}, "cpu_is");
return Builder.CreateICmpEQ(TheCall,
- llvm::ConstantInt::get(Int32Ty, NumCPUID));
+ llvm::ConstantInt::get(Int32Ty, LinuxIDValue));
}
case Builtin::BI__builtin_cpu_supports: {
llvm::Triple Triple = getTarget().getTriple();
@@ -16848,7 +16853,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
VALUE) \
.Case(NAME, {SUPPORT_METHOD, INDEX, MASK, COMP_OP, VALUE})
#include "llvm/TargetParser/PPCTargetParser.def"
- .Default({AIX_BUILTIN_PPC_FALSE, 0, 0,
+ .Default({BUILTIN_PPC_FALSE, 0, 0,
CmpInst::Predicate(), 0}));
return GenAIXPPCBuiltinCpuExpr(SupportMethod, FieldIdx, Mask, CompOp,
Value);
diff --git a/llvm/include/llvm/TargetParser/PPCTargetParser.def b/llvm/include/llvm/TargetParser/PPCTargetParser.def
index d462c9c9ffe69..44e97d56a059c 100644
--- a/llvm/include/llvm/TargetParser/PPCTargetParser.def
+++ b/llvm/include/llvm/TargetParser/PPCTargetParser.def
@@ -14,7 +14,7 @@
#ifdef PPC_TGT_PARSER_UNDEF_MACROS
#undef PPC_LNX_FEATURE
-#undef PPC_LNX_CPU
+#undef PPC_CPU
#undef PPC_FAWORD_HWCAP
#undef PPC_FAWORD_HWCAP2
#undef PPC_FAWORD_CPUID
@@ -30,12 +30,22 @@
#undef PPC_CPUID_OFFSET_LE64
#undef PPC_CPUID_OFFSET_BE32
#undef PPC_CPUID_OFFSET_BE64
+#undef BUILTIN_PPC_TRUE
+#undef BUILTIN_PPC_FALSE
+#undef USE_SYS_CONF
+#undef SYS_CALL
+#undef BUILTIN_PPC_UNSUPPORTED
+#undef AIX_SYSCON_IMPL_IDX
+#undef AIX_PPC7_VALUE
+#undef AIX_PPC8_VALUE
+#undef AIX_PPC9_VALUE
+#undef AIX_PPC10_VALUE
#else
#ifndef PPC_LNX_FEATURE
#define PPC_LNX_FEATURE(NAME, DESC, ENUMNAME, ENUMVAL, HWCAPN)
#endif
-#ifndef PPC_LNX_CPU
-#define PPC_LNX_CPU(NAME, NUM)
+#ifndef PPC_CPU
+#define PPC_CPU(Name, Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, AIXID)
#endif
#ifndef PPC_FAWORD_HWCAP
#define PPC_FAWORD_HWCAP 1
@@ -47,6 +57,56 @@
#define PPC_FAWORD_CPUID 3
#endif
+// PPC CPUs
+//
+// The value of SUPPORT_METHOD can be:
+// BUILTIN_PPC_TRUE : feature supported
+// BUILTIN_PPC_FALSE : feature not supported
+// USE_SYS_CONF : return value depends on comparing VALUE with the specified
+// data member of _system_configuration at INDEX, where the
+// data member is masked by Mask.
+// SYS_CALL : return value depends on comparing a VALUE with the return value
+// of calling `getsystemcfg` with the parameter INDEX, which is
+// then masked by Mask.
+//
+// USE_SYS_CONF is only a methond supported on AIX.
+//
+// Supported SUPPORT_METHOD values.
+#define BUILTIN_PPC_TRUE 1
+#define BUILTIN_PPC_FALSE 0
+#define USE_SYS_CONF 2
+#define SYS_CALL 3
+#define BUILTIN_PPC_UNSUPPORTED 4
+
+#define AIX_SYSCON_IMPL_IDX 1
+
+#define AIX_PPC7_VALUE 0x00008000
+#define AIX_PPC8_VALUE 0x00010000
+#define AIX_PPC9_VALUE 0x00020000
+#define AIX_PPC10_VALUE 0x00040000
+
+// __builtin_cpu_is() and __builtin_cpu_supports() are supported only on Power7 and up on AIX.
+// PPC_CPU(Name, Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, AIXID)
+PPC_CPU("power4",SYS_CALL,32,BUILTIN_PPC_FALSE,0)
+PPC_CPU("ppc970",SYS_CALL,33,BUILTIN_PPC_FALSE,0)
+PPC_CPU("power5",SYS_CALL,34,BUILTIN_PPC_FALSE,0)
+PPC_CPU("power5+",SYS_CALL,35,BUILTIN_PPC_FALSE,0)
+PPC_CPU("power6",SYS_CALL,36,BUILTIN_PPC_FALSE,0)
+PPC_CPU("ppc-cell-be",SYS_CALL,37,BUILTIN_PPC_FALSE,0)
+PPC_CPU("power6x",SYS_CALL,38,BUILTIN_PPC_FALSE,0)
+PPC_CPU("power7",SYS_CALL,39,USE_SYS_CONF,AIX_PPC7_VALUE)
+PPC_CPU("ppca2",SYS_CALL,40,BUILTIN_PPC_FALSE,0)
+PPC_CPU("ppc405",SYS_CALL,41,BUILTIN_PPC_FALSE,0)
+PPC_CPU("ppc440",SYS_CALL,42,BUILTIN_PPC_FALSE,0)
+PPC_CPU("ppc464",SYS_CALL,43,BUILTIN_PPC_FALSE,0)
+PPC_CPU("ppc476",SYS_CALL,44,BUILTIN_PPC_FALSE,0)
+PPC_CPU("power8",SYS_CALL,45,USE_SYS_CONF,AIX_PPC8_VALUE)
+PPC_CPU("power9",SYS_CALL,46,USE_SYS_CONF,AIX_PPC9_VALUE)
+PPC_CPU("power10",SYS_CALL,47,USE_SYS_CONF,AIX_PPC10_VALUE)
+#undef PPC_CPU
+
+// PPC features on Linux:
+//
// PPC_LNX_FEATURE(Name, Description, EnumName, BitMask, PPC_FAWORD_WORD)
PPC_LNX_FEATURE("4xxmac","4xx CPU has a Multiply Accumulator",PPCF_4XXMAC,0x02000000,PPC_FAWORD_HWCAP)
PPC_LNX_FEATURE("altivec","CPU has a SIMD/Vector Unit",PPCF_ALTIVEC,0x10000000,PPC_FAWORD_HWCAP)
@@ -92,23 +152,6 @@ PPC_LNX_FEATURE("ucache","CPU has unified I/D cache",PPCF_UCACHE,0x01000000,PPC_
PPC_LNX_FEATURE("vcrypto","CPU supports the vector cryptography instructions",PPCF_VCRYPTO,0x02000000,PPC_FAWORD_HWCAP2)
PPC_LNX_FEATURE("vsx","CPU supports the vector-scalar extension",PPCF_VSX,0x00000080,PPC_FAWORD_HWCAP)
-// PPC_LNX_CPU(Name, NumericID)
-PPC_LNX_CPU("power4",32)
-PPC_LNX_CPU("ppc970",33)
-PPC_LNX_CPU("power5",34)
-PPC_LNX_CPU("power5+",35)
-PPC_LNX_CPU("power6",36)
-PPC_LNX_CPU("ppc-cell-be",37)
-PPC_LNX_CPU("power6x",38)
-PPC_LNX_CPU("power7",39)
-PPC_LNX_CPU("ppca2",40)
-PPC_LNX_CPU("ppc405",41)
-PPC_LNX_CPU("ppc440",42)
-PPC_LNX_CPU("ppc464",43)
-PPC_LNX_CPU("ppc476",44)
-PPC_LNX_CPU("power8",45)
-PPC_LNX_CPU("power9",46)
-PPC_LNX_CPU("power10",47)
#ifdef PPC_LNX_DEFINE_OFFSETS
# define PPC_HWCAP_OFFSET_LE32 -0x703C
# define PPC_HWCAP_OFFSET_LE64 -0x7064
@@ -125,13 +168,13 @@ PPC_LNX_CPU("power10",47)
#endif
#undef PPC_LNX_DEFINE_OFFSETS
#undef PPC_LNX_FEATURE
-#undef PPC_LNX_CPU
+// PPC features on AIX
+//
// Definition of the following values are found in the AIX header
// file: </usr/include/sys/systemcfg.h>.
#ifndef AIX_POWERPC_USE_SYS_CONF
#define AIX_POWERPC_USE_SYS_CONF
- #define AIX_SYSCON_IMPL_IDX 1
#define AIX_SYSCON_CACHE_IDX 5
#define AIX_SYSCON_SMT_IDX 44
#define AIX_SYSCON_VMX_IDX 46
@@ -139,89 +182,45 @@ PPC_LNX_CPU("power10",47)
#define SYS_CALL_TM_VER 59
#define SYS_CALL_MMA_VER 62
-
- #define AIX_PPC7_VALUE 0x00008000
- #define AIX_PPC8_VALUE 0x00010000
- #define AIX_PPC9_VALUE 0x00020000
- #define AIX_PPC10_VALUE 0x00040000
-
- // Supported SUPPORT_METHOD values.
- #define AIX_BUILTIN_PPC_TRUE 1
- #define AIX_BUILTIN_PPC_FALSE 0
- #define USE_SYS_CONF 2
- #define SYS_CALL 3
-#endif
-
-// The value of SUPPORT_METHOD can be:
-// AIX_BUILTIN_PPC_TRUE : feature supported
-// AIX_BUILTIN_PPC_FALSE : feature not supported
-// USE_SYS_CONF : return value depends on comparing VALUE with the specified
-// data member of _system_configuration at INDEX, where the
-// data member is masked by Mask.
-// SYS_CALL : return value depends on comparing a VALUE with the return value
-// of calling `getsystemcfg` with the parameter INDEX, which is
-// then masked by Mask.
-
-#ifndef PPC_AIX_CPU
- #define PPC_AIX_CPU(NAME, SUPPORT_METHOD, INDEX, COMPARE_OP, VALUE)
#endif
-// __builtin_cpu_is() and __builtin_cpu_supports() are supported only on Power7 and up.
-PPC_AIX_CPU("power4",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("ppc970",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("power5",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("power5+",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("power6",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("ppc-cell-be",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("power6x",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("ppca2",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("ppc405",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("ppc440",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("ppc464",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("ppc476",AIX_BUILTIN_PPC_FALSE,0,CmpInst::Predicate(),0)
-PPC_AIX_CPU("power7",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,ICmpInst::ICMP_EQ,AIX_PPC7_VALUE)
-PPC_AIX_CPU("power8",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,ICmpInst::ICMP_EQ,AIX_PPC8_VALUE)
-PPC_AIX_CPU("power9",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,ICmpInst::ICMP_EQ,AIX_PPC9_VALUE)
-PPC_AIX_CPU("power10",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,ICmpInst::ICMP_EQ,AIX_PPC10_VALUE)
-#undef PPC_AIX_CPU
-
#ifndef PPC_AIX_FEATURE
#define PPC_AIX_FEATURE(NAME,DESC,SUPPORT_METHOD,INDEX,MASK,COMPARE_OP,VALUE)
#endif
-PPC_AIX_FEATURE("4xxmac","4xx CPU has a Multiply Accumulator",AIX_BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("4xxmac","4xx CPU has a Multiply Accumulator",BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
PPC_AIX_FEATURE("altivec","CPU has a SIMD/Vector Unit",USE_SYS_CONF,AIX_SYSCON_VMX_IDX,0,ICmpInst::ICMP_UGT,0)
-PPC_AIX_FEATURE("arch_2_05","CPU supports ISA 205 (eg, POWER6)",AIX_BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("arch_2_05","CPU supports ISA 205 (eg, POWER6)",BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
PPC_AIX_FEATURE("arch_2_06","CPU supports ISA 206 (eg, POWER7)",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,0,ICmpInst::ICMP_UGE,AIX_PPC7_VALUE)
PPC_AIX_FEATURE("arch_2_07","CPU supports ISA 207 (eg, POWER8)",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,0,ICmpInst::ICMP_UGE,AIX_PPC8_VALUE)
PPC_AIX_FEATURE("arch_3_00","CPU supports ISA 30 (eg, POWER9)", USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,0,ICmpInst::ICMP_UGE,AIX_PPC9_VALUE)
PPC_AIX_FEATURE("arch_3_1","CPU supports ISA 31 (eg, POWER10)", USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,0,ICmpInst::ICMP_UGE,AIX_PPC10_VALUE)
-PPC_AIX_FEATURE("booke","CPU supports the Embedded ISA category",AIX_BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("cellbe","CPU has a CELL broadband engine",AIX_BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("booke","CPU supports the Embedded ISA category",BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("cellbe","CPU has a CELL broadband engine",BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
PPC_AIX_FEATURE("darn","CPU supports the darn (deliver a random number) instruction",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,0,ICmpInst::ICMP_UGE,AIX_PPC9_VALUE)
PPC_AIX_FEATURE("dfp","CPU has a decimal floating point unit",USE_SYS_CONF,AIX_SYSCON_DFP_IDX,0,ICmpInst::ICMP_NE,0)
PPC_AIX_FEATURE("dscr","CPU supports the data stream control register",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,0,ICmpInst::ICMP_UGE,AIX_PPC8_VALUE)
PPC_AIX_FEATURE("ebb","CPU supports event base branching",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,0,ICmpInst::ICMP_UGE,AIX_PPC8_VALUE)
-PPC_AIX_FEATURE("efpsingle","CPU has a SPE single precision floating point unit",AIX_BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("efpdouble","CPU has a SPE double precision floating point unit",AIX_BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("fpu","CPU has a floating point unit",AIX_BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("efpsingle","CPU has a SPE single precision floating point unit",BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("efpdouble","CPU has a SPE double precision floating point unit",BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("fpu","CPU has a floating point unit",BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
PPC_AIX_FEATURE("htm","CPU has hardware transaction memory instructions",SYS_CALL,SYS_CALL_TM_VER,0,ICmpInst::ICMP_UGT,0)
-PPC_AIX_FEATURE("isel","CPU supports the integer select instruction",AIX_BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("isel","CPU supports the integer select instruction",BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
PPC_AIX_FEATURE("mma","CPU supports the matrix-multiply assist instructions",SYS_CALL,SYS_CALL_MMA_VER,0,ICmpInst::ICMP_UGT,0)
-PPC_AIX_FEATURE("mmu","CPU has a memory management unit",AIX_BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("pa6t","CPU supports the PA Semi 6T CORE ISA",AIX_BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("power4","CPU supports ISA 200 (eg, POWER4)",AIX_BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("power5","CPU supports ISA 202 (eg, POWER5)",AIX_BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("power5+","CPU supports ISA 203 (eg, POWER5+)",AIX_BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("power6x","CPU supports ISA 205 (eg, POWER6)",AIX_BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("ppc32","CPU supports 32-bit mode execution",AIX_BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("ppc601","CPU supports the old POWER ISA (eg, 601)",AIX_BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("ppc64","CPU supports 64-bit mode execution",AIX_BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
-PPC_AIX_FEATURE("ppcle","CPU supports a little-endian mode that uses address swizzling",AIX_BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("mmu","CPU has a memory management unit",BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("pa6t","CPU supports the PA Semi 6T CORE ISA",BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("power4","CPU supports ISA 200 (eg, POWER4)",BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("power5","CPU supports ISA 202 (eg, POWER5)",BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("power5+","CPU supports ISA 203 (eg, POWER5+)",BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("power6x","CPU supports ISA 205 (eg, POWER6)",BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("ppc32","CPU supports 32-bit mode execution",BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("ppc601","CPU supports the old POWER ISA (eg, 601)",BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("ppc64","CPU supports 64-bit mode execution",BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("ppcle","CPU supports a little-endian mode that uses address swizzling",BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
PPC_AIX_FEATURE("smt","CPU supports simultaneous multi-threading",USE_SYS_CONF,AIX_SYSCON_SMT_IDX,0x3,ICmpInst::ICMP_EQ,0x3)
-PPC_AIX_FEATURE("spe","CPU has a signal processing extension unit",AIX_BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("spe","CPU has a signal processing extension unit",BUILTIN_PPC_FALSE,0,0,CmpInst::Predicate(),0)
PPC_AIX_FEATURE("tar","CPU supports the target address register",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,0,ICmpInst::ICMP_UGE,AIX_PPC8_VALUE)
-PPC_AIX_FEATURE("true_le","CPU supports true little-endian mode",AIX_BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
+PPC_AIX_FEATURE("true_le","CPU supports true little-endian mode",BUILTIN_PPC_TRUE,0,0,CmpInst::Predicate(),0)
PPC_AIX_FEATURE("ucache","CPU has unified I/D cache",USE_SYS_CONF,AIX_SYSCON_CACHE_IDX,0x00000002,ICmpInst::ICMP_EQ,0x00000002)
PPC_AIX_FEATURE("vsx","CPU supports the vector-scalar extension",USE_SYS_CONF,AIX_SYSCON_VMX_IDX,0,ICmpInst::ICMP_UGT,1)
#undef PPC_AIX_FEATURE
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index e3f59f3bd28df..d2195cfbdc5c9 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -3124,7 +3124,7 @@ bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
MI.setDesc(get(PPC::LWZ));
uint64_t FAType = MI.getOperand(1).getImm();
#undef PPC_LNX_FEATURE
-#undef PPC_LNX_CPU
+#undef PPC_CPU
#define PPC_LNX_DEFINE_OFFSETS
#include "llvm/TargetParser/PPCTargetParser.def"
bool IsLE = Subtarget.isLittleEndian();
diff --git a/llvm/tools/clang b/llvm/tools/clang
new file mode 120000
index 0000000000000..7700edcd10231
--- /dev/null
+++ b/llvm/tools/clang
@@ -0,0 +1 @@
+../../clang
\ No newline at end of file
>From a9c44fd66a1a2f79c96cb5509634ca67a04b061a Mon Sep 17 00:00:00 2001
From: Vitaly Buka <vitalybuka at google.com>
Date: Tue, 2 Jul 2024 21:26:10 -0700
Subject: [PATCH 068/246] [asan] Disable flaky test on Android (#97516)
Issue #97515
---
compiler-rt/test/asan/TestCases/printf-5.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/compiler-rt/test/asan/TestCases/printf-5.c b/compiler-rt/test/asan/TestCases/printf-5.c
index 19ff182acce67..180ac6e037b9b 100644
--- a/compiler-rt/test/asan/TestCases/printf-5.c
+++ b/compiler-rt/test/asan/TestCases/printf-5.c
@@ -7,6 +7,9 @@
// FIXME: printf is not intercepted on Windows yet.
// XFAIL: target={{.*windows-(msvc.*|gnu)}}
+// FIXME: The test is flaky after build bot upgrade. #97515
+// UNSUPPORTED: android
+
#include <stdio.h>
#include <string.h>
int main() {
>From dd220853081400db6b4f85027030645115229ba0 Mon Sep 17 00:00:00 2001
From: Slava Zakharin <szakharin at nvidia.com>
Date: Tue, 2 Jul 2024 21:30:37 -0700
Subject: [PATCH 069/246] [flang][runtime] Split MATMUL[_TRANSPOSE] into
separate entries. (#97406)
Device compilation is much faster for separate MATMUL[_TRANPOSE]
entries than for a single one that covers all data types.
The lowering changes and the removal of the generic entries will follow.
---
.../flang/Runtime/matmul-instances.inc | 261 ++++++++++++++++++
.../include/flang/Runtime/matmul-transpose.h | 17 ++
flang/include/flang/Runtime/matmul.h | 17 ++
flang/runtime/matmul-transpose.cpp | 42 +++
flang/runtime/matmul.cpp | 50 +++-
flang/unittests/Runtime/Matmul.cpp | 121 ++++++++
flang/unittests/Runtime/MatmulTranspose.cpp | 140 ++++++++++
7 files changed, 646 insertions(+), 2 deletions(-)
create mode 100644 flang/include/flang/Runtime/matmul-instances.inc
diff --git a/flang/include/flang/Runtime/matmul-instances.inc b/flang/include/flang/Runtime/matmul-instances.inc
new file mode 100644
index 0000000000000..970b03339cd5e
--- /dev/null
+++ b/flang/include/flang/Runtime/matmul-instances.inc
@@ -0,0 +1,261 @@
+//===-- include/flang/Runtime/matmul-instances.inc --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Helper macros to instantiate MATMUL/MATMUL_TRANSPOSE definitions
+// for different data types of the input arguments.
+//===----------------------------------------------------------------------===//
+
+#ifndef MATMUL_INSTANCE
+#error "Define MATMUL_INSTANCE before including this file"
+#endif
+
+#ifndef MATMUL_DIRECT_INSTANCE
+#error "Define MATMUL_DIRECT_INSTANCE before including this file"
+#endif
+
+// clang-format off
+
+#define FOREACH_MATMUL_TYPE_PAIR(macro) \
+ macro(Integer, 1, Integer, 1) \
+ macro(Integer, 1, Integer, 2) \
+ macro(Integer, 1, Integer, 4) \
+ macro(Integer, 1, Integer, 8) \
+ macro(Integer, 2, Integer, 1) \
+ macro(Integer, 2, Integer, 2) \
+ macro(Integer, 2, Integer, 4) \
+ macro(Integer, 2, Integer, 8) \
+ macro(Integer, 4, Integer, 1) \
+ macro(Integer, 4, Integer, 2) \
+ macro(Integer, 4, Integer, 4) \
+ macro(Integer, 4, Integer, 8) \
+ macro(Integer, 8, Integer, 1) \
+ macro(Integer, 8, Integer, 2) \
+ macro(Integer, 8, Integer, 4) \
+ macro(Integer, 8, Integer, 8) \
+ macro(Integer, 1, Real, 4) \
+ macro(Integer, 1, Real, 8) \
+ macro(Integer, 2, Real, 4) \
+ macro(Integer, 2, Real, 8) \
+ macro(Integer, 4, Real, 4) \
+ macro(Integer, 4, Real, 8) \
+ macro(Integer, 8, Real, 4) \
+ macro(Integer, 8, Real, 8) \
+ macro(Integer, 1, Complex, 4) \
+ macro(Integer, 1, Complex, 8) \
+ macro(Integer, 2, Complex, 4) \
+ macro(Integer, 2, Complex, 8) \
+ macro(Integer, 4, Complex, 4) \
+ macro(Integer, 4, Complex, 8) \
+ macro(Integer, 8, Complex, 4) \
+ macro(Integer, 8, Complex, 8) \
+ macro(Real, 4, Integer, 1) \
+ macro(Real, 4, Integer, 2) \
+ macro(Real, 4, Integer, 4) \
+ macro(Real, 4, Integer, 8) \
+ macro(Real, 8, Integer, 1) \
+ macro(Real, 8, Integer, 2) \
+ macro(Real, 8, Integer, 4) \
+ macro(Real, 8, Integer, 8) \
+ macro(Real, 4, Real, 4) \
+ macro(Real, 4, Real, 8) \
+ macro(Real, 8, Real, 4) \
+ macro(Real, 8, Real, 8) \
+ macro(Real, 4, Complex, 4) \
+ macro(Real, 4, Complex, 8) \
+ macro(Real, 8, Complex, 4) \
+ macro(Real, 8, Complex, 8) \
+ macro(Complex, 4, Integer, 1) \
+ macro(Complex, 4, Integer, 2) \
+ macro(Complex, 4, Integer, 4) \
+ macro(Complex, 4, Integer, 8) \
+ macro(Complex, 8, Integer, 1) \
+ macro(Complex, 8, Integer, 2) \
+ macro(Complex, 8, Integer, 4) \
+ macro(Complex, 8, Integer, 8) \
+ macro(Complex, 4, Real, 4) \
+ macro(Complex, 4, Real, 8) \
+ macro(Complex, 8, Real, 4) \
+ macro(Complex, 8, Real, 8) \
+ macro(Complex, 4, Complex, 4) \
+ macro(Complex, 4, Complex, 8) \
+ macro(Complex, 8, Complex, 4) \
+ macro(Complex, 8, Complex, 8) \
+
+FOREACH_MATMUL_TYPE_PAIR(MATMUL_INSTANCE)
+FOREACH_MATMUL_TYPE_PAIR(MATMUL_DIRECT_INSTANCE)
+
+#if defined __SIZEOF_INT128__ && !AVOID_NATIVE_UINT128_T
+#define FOREACH_MATMUL_TYPE_PAIR_WITH_INT16(macro) \
+ macro(Integer, 16, Integer, 1) \
+ macro(Integer, 16, Integer, 2) \
+ macro(Integer, 16, Integer, 4) \
+ macro(Integer, 16, Integer, 8) \
+ macro(Integer, 16, Integer, 16) \
+ macro(Integer, 16, Real, 4) \
+ macro(Integer, 16, Real, 8) \
+ macro(Integer, 16, Complex, 4) \
+ macro(Integer, 16, Complex, 8) \
+ macro(Real, 4, Integer, 16) \
+ macro(Real, 8, Integer, 16) \
+ macro(Complex, 4, Integer, 16) \
+ macro(Complex, 8, Integer, 16) \
+
+FOREACH_MATMUL_TYPE_PAIR_WITH_INT16(MATMUL_INSTANCE)
+FOREACH_MATMUL_TYPE_PAIR_WITH_INT16(MATMUL_DIRECT_INSTANCE)
+
+#if LDBL_MANT_DIG == 64
+MATMUL_INSTANCE(Integer, 16, Real, 10)
+MATMUL_INSTANCE(Integer, 16, Complex, 10)
+MATMUL_INSTANCE(Real, 10, Integer, 16)
+MATMUL_INSTANCE(Complex, 10, Integer, 16)
+MATMUL_DIRECT_INSTANCE(Integer, 16, Real, 10)
+MATMUL_DIRECT_INSTANCE(Integer, 16, Complex, 10)
+MATMUL_DIRECT_INSTANCE(Real, 10, Integer, 16)
+MATMUL_DIRECT_INSTANCE(Complex, 10, Integer, 16)
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+MATMUL_INSTANCE(Integer, 16, Real, 16)
+MATMUL_INSTANCE(Integer, 16, Complex, 16)
+MATMUL_INSTANCE(Real, 16, Integer, 16)
+MATMUL_INSTANCE(Complex, 16, Integer, 16)
+MATMUL_DIRECT_INSTANCE(Integer, 16, Real, 16)
+MATMUL_DIRECT_INSTANCE(Integer, 16, Complex, 16)
+MATMUL_DIRECT_INSTANCE(Real, 16, Integer, 16)
+MATMUL_DIRECT_INSTANCE(Complex, 16, Integer, 16)
+#endif
+#endif // defined __SIZEOF_INT128__ && !AVOID_NATIVE_UINT128_T
+
+#if LDBL_MANT_DIG == 64
+#define FOREACH_MATMUL_TYPE_PAIR_WITH_REAL10(macro) \
+ macro(Integer, 1, Real, 10) \
+ macro(Integer, 1, Complex, 10) \
+ macro(Integer, 2, Real, 10) \
+ macro(Integer, 2, Complex, 10) \
+ macro(Integer, 4, Real, 10) \
+ macro(Integer, 4, Complex, 10) \
+ macro(Integer, 8, Real, 10) \
+ macro(Integer, 8, Complex, 10) \
+ macro(Real, 4, Real, 10) \
+ macro(Real, 4, Complex, 10) \
+ macro(Real, 8, Real, 10) \
+ macro(Real, 8, Complex, 10) \
+ macro(Real, 10, Integer, 1) \
+ macro(Real, 10, Integer, 2) \
+ macro(Real, 10, Integer, 4) \
+ macro(Real, 10, Integer, 8) \
+ macro(Real, 10, Real, 4) \
+ macro(Real, 10, Real, 8) \
+ macro(Real, 10, Real, 10) \
+ macro(Real, 10, Complex, 4) \
+ macro(Real, 10, Complex, 8) \
+ macro(Real, 10, Complex, 10) \
+ macro(Complex, 4, Real, 10) \
+ macro(Complex, 4, Complex, 10) \
+ macro(Complex, 8, Real, 10) \
+ macro(Complex, 8, Complex, 10) \
+ macro(Complex, 10, Integer, 1) \
+ macro(Complex, 10, Integer, 2) \
+ macro(Complex, 10, Integer, 4) \
+ macro(Complex, 10, Integer, 8) \
+ macro(Complex, 10, Real, 4) \
+ macro(Complex, 10, Real, 8) \
+ macro(Complex, 10, Real, 10) \
+ macro(Complex, 10, Complex, 4) \
+ macro(Complex, 10, Complex, 8) \
+ macro(Complex, 10, Complex, 10) \
+
+FOREACH_MATMUL_TYPE_PAIR_WITH_REAL10(MATMUL_INSTANCE)
+FOREACH_MATMUL_TYPE_PAIR_WITH_REAL10(MATMUL_DIRECT_INSTANCE)
+
+#if HAS_FLOAT128
+MATMUL_INSTANCE(Real, 10, Real, 16)
+MATMUL_INSTANCE(Real, 10, Complex, 16)
+MATMUL_INSTANCE(Real, 16, Real, 10)
+MATMUL_INSTANCE(Real, 16, Complex, 10)
+MATMUL_INSTANCE(Complex, 10, Real, 16)
+MATMUL_INSTANCE(Complex, 10, Complex, 16)
+MATMUL_INSTANCE(Complex, 16, Real, 10)
+MATMUL_INSTANCE(Complex, 16, Complex, 10)
+MATMUL_DIRECT_INSTANCE(Real, 10, Real, 16)
+MATMUL_DIRECT_INSTANCE(Real, 10, Complex, 16)
+MATMUL_DIRECT_INSTANCE(Real, 16, Real, 10)
+MATMUL_DIRECT_INSTANCE(Real, 16, Complex, 10)
+MATMUL_DIRECT_INSTANCE(Complex, 10, Real, 16)
+MATMUL_DIRECT_INSTANCE(Complex, 10, Complex, 16)
+MATMUL_DIRECT_INSTANCE(Complex, 16, Real, 10)
+MATMUL_DIRECT_INSTANCE(Complex, 16, Complex, 10)
+#endif
+#endif // LDBL_MANT_DIG == 64
+
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+#define FOREACH_MATMUL_TYPE_PAIR_WITH_REAL16(macro) \
+ macro(Integer, 1, Real, 16) \
+ macro(Integer, 1, Complex, 16) \
+ macro(Integer, 2, Real, 16) \
+ macro(Integer, 2, Complex, 16) \
+ macro(Integer, 4, Real, 16) \
+ macro(Integer, 4, Complex, 16) \
+ macro(Integer, 8, Real, 16) \
+ macro(Integer, 8, Complex, 16) \
+ macro(Real, 4, Real, 16) \
+ macro(Real, 4, Complex, 16) \
+ macro(Real, 8, Real, 16) \
+ macro(Real, 8, Complex, 16) \
+ macro(Real, 16, Integer, 1) \
+ macro(Real, 16, Integer, 2) \
+ macro(Real, 16, Integer, 4) \
+ macro(Real, 16, Integer, 8) \
+ macro(Real, 16, Real, 4) \
+ macro(Real, 16, Real, 8) \
+ macro(Real, 16, Real, 16) \
+ macro(Real, 16, Complex, 4) \
+ macro(Real, 16, Complex, 8) \
+ macro(Real, 16, Complex, 16) \
+ macro(Complex, 4, Real, 16) \
+ macro(Complex, 4, Complex, 16) \
+ macro(Complex, 8, Real, 16) \
+ macro(Complex, 8, Complex, 16) \
+ macro(Complex, 16, Integer, 1) \
+ macro(Complex, 16, Integer, 2) \
+ macro(Complex, 16, Integer, 4) \
+ macro(Complex, 16, Integer, 8) \
+ macro(Complex, 16, Real, 4) \
+ macro(Complex, 16, Real, 8) \
+ macro(Complex, 16, Real, 16) \
+ macro(Complex, 16, Complex, 4) \
+ macro(Complex, 16, Complex, 8) \
+ macro(Complex, 16, Complex, 16) \
+
+FOREACH_MATMUL_TYPE_PAIR_WITH_REAL16(MATMUL_INSTANCE)
+FOREACH_MATMUL_TYPE_PAIR_WITH_REAL16(MATMUL_DIRECT_INSTANCE)
+#endif // LDBL_MANT_DIG == 113 || HAS_FLOAT128
+
+#define FOREACH_MATMUL_LOGICAL_TYPE_PAIR(macro) \
+ macro(Logical, 1, Logical, 1) \
+ macro(Logical, 1, Logical, 2) \
+ macro(Logical, 1, Logical, 4) \
+ macro(Logical, 1, Logical, 8) \
+ macro(Logical, 2, Logical, 1) \
+ macro(Logical, 2, Logical, 2) \
+ macro(Logical, 2, Logical, 4) \
+ macro(Logical, 2, Logical, 8) \
+ macro(Logical, 4, Logical, 1) \
+ macro(Logical, 4, Logical, 2) \
+ macro(Logical, 4, Logical, 4) \
+ macro(Logical, 4, Logical, 8) \
+ macro(Logical, 8, Logical, 1) \
+ macro(Logical, 8, Logical, 2) \
+ macro(Logical, 8, Logical, 4) \
+ macro(Logical, 8, Logical, 8) \
+
+FOREACH_MATMUL_LOGICAL_TYPE_PAIR(MATMUL_INSTANCE)
+FOREACH_MATMUL_LOGICAL_TYPE_PAIR(MATMUL_DIRECT_INSTANCE)
+
+#undef MATMUL_INSTANCE
+#undef MATMUL_DIRECT_INSTANCE
+
+// clang-format on
diff --git a/flang/include/flang/Runtime/matmul-transpose.h b/flang/include/flang/Runtime/matmul-transpose.h
index 5eb5896972e0f..d0a5005a1a8bd 100644
--- a/flang/include/flang/Runtime/matmul-transpose.h
+++ b/flang/include/flang/Runtime/matmul-transpose.h
@@ -10,6 +10,8 @@
#ifndef FORTRAN_RUNTIME_MATMUL_TRANSPOSE_H_
#define FORTRAN_RUNTIME_MATMUL_TRANSPOSE_H_
+#include "flang/Common/float128.h"
+#include "flang/Common/uint128.h"
#include "flang/Runtime/entry-names.h"
namespace Fortran::runtime {
class Descriptor;
@@ -25,6 +27,21 @@ void RTDECL(MatmulTranspose)(Descriptor &, const Descriptor &,
// and have a valid base address.
void RTDECL(MatmulTransposeDirect)(const Descriptor &, const Descriptor &,
const Descriptor &, const char *sourceFile = nullptr, int line = 0);
+
+// MATMUL(TRANSPOSE()) versions specialized by the categories of the operand
+// types. The KIND and shape information is taken from the argument's
+// descriptors.
+#define MATMUL_INSTANCE(XCAT, XKIND, YCAT, YKIND) \
+ void RTDECL(MatmulTranspose##XCAT##XKIND##YCAT##YKIND)(Descriptor & result, \
+ const Descriptor &x, const Descriptor &y, const char *sourceFile, \
+ int line);
+#define MATMUL_DIRECT_INSTANCE(XCAT, XKIND, YCAT, YKIND) \
+ void RTDECL(MatmulTransposeDirect##XCAT##XKIND##YCAT##YKIND)( \
+ Descriptor & result, const Descriptor &x, const Descriptor &y, \
+ const char *sourceFile, int line);
+
+#include "matmul-instances.inc"
+
} // extern "C"
} // namespace Fortran::runtime
#endif // FORTRAN_RUNTIME_MATMUL_TRANSPOSE_H_
diff --git a/flang/include/flang/Runtime/matmul.h b/flang/include/flang/Runtime/matmul.h
index 40581d44de9e2..1a5e39eb8813f 100644
--- a/flang/include/flang/Runtime/matmul.h
+++ b/flang/include/flang/Runtime/matmul.h
@@ -10,6 +10,8 @@
#ifndef FORTRAN_RUNTIME_MATMUL_H_
#define FORTRAN_RUNTIME_MATMUL_H_
+#include "flang/Common/float128.h"
+#include "flang/Common/uint128.h"
#include "flang/Runtime/entry-names.h"
namespace Fortran::runtime {
class Descriptor;
@@ -24,6 +26,21 @@ void RTDECL(Matmul)(Descriptor &, const Descriptor &, const Descriptor &,
// and have a valid base address.
void RTDECL(MatmulDirect)(const Descriptor &, const Descriptor &,
const Descriptor &, const char *sourceFile = nullptr, int line = 0);
+
+// MATMUL versions specialized by the categories of the operand types.
+// The KIND and shape information is taken from the argument's
+// descriptors.
+#define MATMUL_INSTANCE(XCAT, XKIND, YCAT, YKIND) \
+ void RTDECL(Matmul##XCAT##XKIND##YCAT##YKIND)(Descriptor & result, \
+ const Descriptor &x, const Descriptor &y, const char *sourceFile, \
+ int line);
+#define MATMUL_DIRECT_INSTANCE(XCAT, XKIND, YCAT, YKIND) \
+ void RTDECL(MatmulDirect##XCAT##XKIND##YCAT##YKIND)(Descriptor & result, \
+ const Descriptor &x, const Descriptor &y, const char *sourceFile, \
+ int line);
+
+#include "matmul-instances.inc"
+
} // extern "C"
} // namespace Fortran::runtime
#endif // FORTRAN_RUNTIME_MATMUL_H_
diff --git a/flang/runtime/matmul-transpose.cpp b/flang/runtime/matmul-transpose.cpp
index a12d188266f7c..1c998fa8cf6c1 100644
--- a/flang/runtime/matmul-transpose.cpp
+++ b/flang/runtime/matmul-transpose.cpp
@@ -384,6 +384,30 @@ template <bool IS_ALLOCATING> struct MatmulTranspose {
x, y, terminator, yCatKind->first, yCatKind->second);
}
};
+
+template <bool IS_ALLOCATING, TypeCategory XCAT, int XKIND, TypeCategory YCAT,
+ int YKIND>
+struct MatmulTransposeHelper {
+ using ResultDescriptor =
+ std::conditional_t<IS_ALLOCATING, Descriptor, const Descriptor>;
+ RT_API_ATTRS void operator()(ResultDescriptor &result, const Descriptor &x,
+ const Descriptor &y, const char *sourceFile, int line) const {
+ Terminator terminator{sourceFile, line};
+ auto xCatKind{x.type().GetCategoryAndKind()};
+ auto yCatKind{y.type().GetCategoryAndKind()};
+ RUNTIME_CHECK(terminator, xCatKind.has_value() && yCatKind.has_value());
+ RUNTIME_CHECK(terminator, xCatKind->first == XCAT);
+ RUNTIME_CHECK(terminator, yCatKind->first == YCAT);
+ if constexpr (constexpr auto resultType{
+ GetResultType(XCAT, XKIND, YCAT, YKIND)}) {
+ return DoMatmulTranspose<IS_ALLOCATING, resultType->first,
+ resultType->second, CppTypeFor<XCAT, XKIND>, CppTypeFor<YCAT, YKIND>>(
+ result, x, y, terminator);
+ }
+ terminator.Crash("MATMUL-TRANSPOSE: bad operand types (%d(%d), %d(%d))",
+ static_cast<int>(XCAT), XKIND, static_cast<int>(YCAT), YKIND);
+ }
+};
} // namespace
namespace Fortran::runtime {
@@ -399,6 +423,24 @@ void RTDEF(MatmulTransposeDirect)(const Descriptor &result, const Descriptor &x,
MatmulTranspose<false>{}(result, x, y, sourceFile, line);
}
+#define MATMUL_INSTANCE(XCAT, XKIND, YCAT, YKIND) \
+ void RTDEF(MatmulTranspose##XCAT##XKIND##YCAT##YKIND)(Descriptor & result, \
+ const Descriptor &x, const Descriptor &y, const char *sourceFile, \
+ int line) { \
+ MatmulTransposeHelper<true, TypeCategory::XCAT, XKIND, TypeCategory::YCAT, \
+ YKIND>{}(result, x, y, sourceFile, line); \
+ }
+
+#define MATMUL_DIRECT_INSTANCE(XCAT, XKIND, YCAT, YKIND) \
+ void RTDEF(MatmulTransposeDirect##XCAT##XKIND##YCAT##YKIND)( \
+ Descriptor & result, const Descriptor &x, const Descriptor &y, \
+ const char *sourceFile, int line) { \
+ MatmulTransposeHelper<false, TypeCategory::XCAT, XKIND, \
+ TypeCategory::YCAT, YKIND>{}(result, x, y, sourceFile, line); \
+ }
+
+#include "flang/Runtime/matmul-instances.inc"
+
RT_EXT_API_GROUP_END
} // extern "C"
} // namespace Fortran::runtime
diff --git a/flang/runtime/matmul.cpp b/flang/runtime/matmul.cpp
index 8f9b50a549e1f..504d1aa4dc4a4 100644
--- a/flang/runtime/matmul.cpp
+++ b/flang/runtime/matmul.cpp
@@ -28,7 +28,8 @@
#include "flang/Runtime/descriptor.h"
#include <cstring>
-namespace Fortran::runtime {
+namespace {
+using namespace Fortran::runtime;
// Suppress the warnings about calling __host__-only std::complex operators,
// defined in C++ STD header files, from __device__ code.
@@ -455,7 +456,8 @@ template <bool IS_ALLOCATING> struct Matmul {
Terminator &terminator) const {
if constexpr (constexpr auto resultType{
GetResultType(XCAT, XKIND, YCAT, YKIND)}) {
- if constexpr (common::IsNumericTypeCategory(resultType->first) ||
+ if constexpr (Fortran::common::IsNumericTypeCategory(
+ resultType->first) ||
resultType->first == TypeCategory::Logical) {
return DoMatmul<IS_ALLOCATING, resultType->first,
resultType->second, CppTypeFor<XCAT, XKIND>,
@@ -483,6 +485,32 @@ template <bool IS_ALLOCATING> struct Matmul {
}
};
+template <bool IS_ALLOCATING, TypeCategory XCAT, int XKIND, TypeCategory YCAT,
+ int YKIND>
+struct MatmulHelper {
+ using ResultDescriptor =
+ std::conditional_t<IS_ALLOCATING, Descriptor, const Descriptor>;
+ RT_API_ATTRS void operator()(ResultDescriptor &result, const Descriptor &x,
+ const Descriptor &y, const char *sourceFile, int line) const {
+ Terminator terminator{sourceFile, line};
+ auto xCatKind{x.type().GetCategoryAndKind()};
+ auto yCatKind{y.type().GetCategoryAndKind()};
+ RUNTIME_CHECK(terminator, xCatKind.has_value() && yCatKind.has_value());
+ RUNTIME_CHECK(terminator, xCatKind->first == XCAT);
+ RUNTIME_CHECK(terminator, yCatKind->first == YCAT);
+ if constexpr (constexpr auto resultType{
+ GetResultType(XCAT, XKIND, YCAT, YKIND)}) {
+ return DoMatmul<IS_ALLOCATING, resultType->first, resultType->second,
+ CppTypeFor<XCAT, XKIND>, CppTypeFor<YCAT, YKIND>>(
+ result, x, y, terminator);
+ }
+ terminator.Crash("MATMUL: bad operand types (%d(%d), %d(%d))",
+ static_cast<int>(XCAT), XKIND, static_cast<int>(YCAT), YKIND);
+ }
+};
+} // namespace
+
+namespace Fortran::runtime {
extern "C" {
RT_EXT_API_GROUP_BEGIN
@@ -495,6 +523,24 @@ void RTDEF(MatmulDirect)(const Descriptor &result, const Descriptor &x,
Matmul<false>{}(result, x, y, sourceFile, line);
}
+#define MATMUL_INSTANCE(XCAT, XKIND, YCAT, YKIND) \
+ void RTDEF(Matmul##XCAT##XKIND##YCAT##YKIND)(Descriptor & result, \
+ const Descriptor &x, const Descriptor &y, const char *sourceFile, \
+ int line) { \
+ MatmulHelper<true, TypeCategory::XCAT, XKIND, TypeCategory::YCAT, \
+ YKIND>{}(result, x, y, sourceFile, line); \
+ }
+
+#define MATMUL_DIRECT_INSTANCE(XCAT, XKIND, YCAT, YKIND) \
+ void RTDEF(MatmulDirect##XCAT##XKIND##YCAT##YKIND)(Descriptor & result, \
+ const Descriptor &x, const Descriptor &y, const char *sourceFile, \
+ int line) { \
+ MatmulHelper<false, TypeCategory::XCAT, XKIND, TypeCategory::YCAT, \
+ YKIND>{}(result, x, y, sourceFile, line); \
+ }
+
+#include "flang/Runtime/matmul-instances.inc"
+
RT_EXT_API_GROUP_END
} // extern "C"
} // namespace Fortran::runtime
diff --git a/flang/unittests/Runtime/Matmul.cpp b/flang/unittests/Runtime/Matmul.cpp
index 1d6c5ccc609b4..226dbc5ae9eeb 100644
--- a/flang/unittests/Runtime/Matmul.cpp
+++ b/flang/unittests/Runtime/Matmul.cpp
@@ -63,6 +63,29 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
+ RTNAME(MatmulInteger4Integer2)(result, *x, *y, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(1).Extent(), 2);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
+
+ std::memset(
+ result.raw().base_addr, 0, result.Elements() * result.ElementBytes());
+ result.GetDimension(0).SetLowerBound(0);
+ result.GetDimension(1).SetLowerBound(2);
+ RTNAME(MatmulDirectInteger4Integer2)(result, *x, *y, __FILE__, __LINE__);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
+ result.Destroy();
+
RTNAME(Matmul)(result, *v, *x, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -73,6 +96,16 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -14);
result.Destroy();
+ RTNAME(MatmulInteger8Integer4)(result, *v, *x, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 1);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 3);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -2);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -8);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -14);
+ result.Destroy();
+
RTNAME(Matmul)(result, *y, *v, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -83,6 +116,16 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
result.Destroy();
+ RTNAME(MatmulInteger2Integer8)(result, *y, *v, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 1);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 3);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -24);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -27);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
+ result.Destroy();
+
// Test non-contiguous sections.
static constexpr int sectionRank{2};
StaticDescriptor<sectionRank> sectionStaticDescriptorX2;
@@ -129,6 +172,19 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
+ RTNAME(MatmulInteger4Integer2)(result, sectionX2, *y, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(1).Extent(), 2);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
+ result.Destroy();
+
RTNAME(Matmul)(result, *x, sectionY2, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -142,6 +198,19 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
+ RTNAME(MatmulInteger4Integer2)(result, *x, sectionY2, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(1).Extent(), 2);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
+ result.Destroy();
+
RTNAME(Matmul)(result, sectionX2, sectionY2, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -155,6 +224,20 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
+ RTNAME(MatmulInteger4Integer2)
+ (result, sectionX2, sectionY2, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(1).Extent(), 2);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
+ result.Destroy();
+
RTNAME(Matmul)(result, *v, sectionX2, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -165,6 +248,16 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -14);
result.Destroy();
+ RTNAME(MatmulInteger8Integer4)(result, *v, sectionX2, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 1);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 3);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -2);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -8);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -14);
+ result.Destroy();
+
RTNAME(Matmul)(result, sectionY2, *v, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -175,6 +268,16 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
result.Destroy();
+ RTNAME(MatmulInteger2Integer8)(result, sectionY2, *v, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 1);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 3);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -24);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -27);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
+ result.Destroy();
+
// X F F T Y F T
// F T T F T
// F F
@@ -197,4 +300,22 @@ TEST(Matmul, Basic) {
static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(2)));
EXPECT_TRUE(
static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(3)));
+ result.Destroy();
+
+ RTNAME(MatmulLogical1Logical2)(result, *xLog, *yLog, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(1).Extent(), 2);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Logical, 2}));
+ EXPECT_FALSE(
+ static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(0)));
+ EXPECT_FALSE(
+ static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(1)));
+ EXPECT_FALSE(
+ static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(2)));
+ EXPECT_TRUE(
+ static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(3)));
+ result.Destroy();
}
diff --git a/flang/unittests/Runtime/MatmulTranspose.cpp b/flang/unittests/Runtime/MatmulTranspose.cpp
index fe946f6d5a201..391c2e1b144ea 100644
--- a/flang/unittests/Runtime/MatmulTranspose.cpp
+++ b/flang/unittests/Runtime/MatmulTranspose.cpp
@@ -69,6 +69,30 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
+ RTNAME(MatmulTransposeInteger4Integer2)(result, *x, *y, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(1).Extent(), 2);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
+
+ std::memset(
+ result.raw().base_addr, 0, result.Elements() * result.ElementBytes());
+ result.GetDimension(0).SetLowerBound(0);
+ result.GetDimension(1).SetLowerBound(2);
+ RTNAME(MatmulTransposeDirectInteger4Integer2)
+ (result, *x, *y, __FILE__, __LINE__);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
+ result.Destroy();
+
RTNAME(MatmulTranspose)(result, *z, *v, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -79,6 +103,16 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
result.Destroy();
+ RTNAME(MatmulTransposeInteger2Integer8)(result, *z, *v, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 1);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 3);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -24);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -27);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
+ result.Destroy();
+
RTNAME(MatmulTranspose)(result, *m, *z, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
ASSERT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -100,6 +134,27 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(11), 19);
result.Destroy();
+ RTNAME(MatmulTransposeInteger2Integer2)(result, *m, *z, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 2);
+ ASSERT_EQ(result.GetDimension(0).LowerBound(), 1);
+ ASSERT_EQ(result.GetDimension(0).UpperBound(), 4);
+ ASSERT_EQ(result.GetDimension(1).LowerBound(), 1);
+ ASSERT_EQ(result.GetDimension(1).UpperBound(), 3);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 2}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(0), 0);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(1), 9);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(2), 6);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(3), 15);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(4), 0);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(5), 10);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(6), 7);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(7), 17);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(8), 0);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(9), 11);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(10), 8);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(11), 19);
+ result.Destroy();
+
// Test non-contiguous sections.
static constexpr int sectionRank{2};
StaticDescriptor<sectionRank> sectionStaticDescriptorX2;
@@ -162,6 +217,20 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
+ RTNAME(MatmulTransposeInteger4Integer2)
+ (result, sectionX2, *y, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(1).Extent(), 2);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
+ result.Destroy();
+
RTNAME(MatmulTranspose)(result, *x, sectionY2, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -175,6 +244,20 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
+ RTNAME(MatmulTransposeInteger4Integer2)
+ (result, *x, sectionY2, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(1).Extent(), 2);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
+ result.Destroy();
+
RTNAME(MatmulTranspose)(result, sectionX2, sectionY2, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -188,6 +271,20 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
+ RTNAME(MatmulTransposeInteger4Integer2)
+ (result, sectionX2, sectionY2, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(1).Extent(), 2);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
+ result.Destroy();
+
RTNAME(MatmulTranspose)(result, sectionZ2, *v, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -198,6 +295,17 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
result.Destroy();
+ RTNAME(MatmulTransposeInteger2Integer8)
+ (result, sectionZ2, *v, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 1);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 3);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -24);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -27);
+ EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
+ result.Destroy();
+
// X F F Y F T V T F T
// T F F T
// T T F F
@@ -222,6 +330,25 @@ TEST(MatmulTranspose, Basic) {
static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(2)));
EXPECT_FALSE(
static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(3)));
+ result.Destroy();
+
+ RTNAME(MatmulTransposeLogical1Logical2)
+ (result, *xLog, *yLog, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(1).Extent(), 2);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Logical, 2}));
+ EXPECT_FALSE(
+ static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(0)));
+ EXPECT_FALSE(
+ static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(1)));
+ EXPECT_TRUE(
+ static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(2)));
+ EXPECT_FALSE(
+ static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(3)));
+ result.Destroy();
RTNAME(MatmulTranspose)(result, *yLog, *vLog, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
@@ -232,4 +359,17 @@ TEST(MatmulTranspose, Basic) {
static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(0)));
EXPECT_TRUE(
static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(1)));
+ result.Destroy();
+
+ RTNAME(MatmulTransposeLogical2Logical1)
+ (result, *yLog, *vLog, __FILE__, __LINE__);
+ ASSERT_EQ(result.rank(), 1);
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(0).Extent(), 2);
+ ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Logical, 2}));
+ EXPECT_FALSE(
+ static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(0)));
+ EXPECT_TRUE(
+ static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(1)));
+ result.Destroy();
}
>From 03579455bd941da6278f883ed8827ef0fbeb5e50 Mon Sep 17 00:00:00 2001
From: agozillon <Andrew.Gozillon at amd.com>
Date: Wed, 3 Jul 2024 07:07:53 +0200
Subject: [PATCH 070/246] [Flang][OpenMP] More elegantly handle declare target
in unnamed program (#95834)
This PR is related to the following issue:
https://github.com/llvm/llvm-project/issues/63362
It tries to solve the crash (which is now slightly different, since the
issue has been languishing for a while sorry about that I missed the
original issue ping).
The crash occurs due to trying to access the symbol of an
undefined/unnamed main when trying to find a declare target symbol that
has not been specified (but can be assumed based on it's residence in a
function or interface).
The solution in this PR will check if we're trying to retrieve a main
symbol, and then if that is the case, we make sure it exists (due to
being named) before we attempt to retrieve it, this avoids the crash.
However, that's only part of the issue in the above example, the other
is the significant amount of nested directives, I think we are still a
little while away from handling this, I have added a reduced variation
of the test in the issue as a replicator which contains a lesser number
of nesting directives. To push the issue along further, it will likely
be a case of working through a number of variations of nested directives
in conjunction with target + parallel.
However, this PR pushes the issue above to the point where the issue
encountered is identical to the following:
https://github.com/llvm/llvm-project/issues/67231
---
flang/lib/Lower/OpenMP/OpenMP.cpp | 4 +-
.../OpenMP/declare-target-unnamed-main.f90 | 41 +++++++++++++++++++
2 files changed, 44 insertions(+), 1 deletion(-)
create mode 100644 flang/test/Lower/OpenMP/declare-target-unnamed-main.f90
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index d8679fb693659..9ad092a1a00bd 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -296,7 +296,9 @@ static void getDeclareTargetInfo(
} else if (const auto *clauseList{
parser::Unwrap<parser::OmpClauseList>(spec.u)}) {
List<Clause> clauses = makeClauses(*clauseList, semaCtx);
- if (clauses.empty()) {
+ if (clauses.empty() &&
+ (!eval.getOwningProcedure()->isMainProgram() ||
+ eval.getOwningProcedure()->getMainProgramSymbol())) {
// Case: declare target, implicit capture of function
symbolAndClause.emplace_back(
mlir::omp::DeclareTargetCaptureClause::to,
diff --git a/flang/test/Lower/OpenMP/declare-target-unnamed-main.f90 b/flang/test/Lower/OpenMP/declare-target-unnamed-main.f90
new file mode 100644
index 0000000000000..b7d6d2fa232ad
--- /dev/null
+++ b/flang/test/Lower/OpenMP/declare-target-unnamed-main.f90
@@ -0,0 +1,41 @@
+!RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s
+!RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-is-device %s -o - | FileCheck %s
+
+! This test is a reduced version of the example in issue 63362.
+! It aims to test that no crash occurs when declare target is
+! utilised within an unnamed main program and that we still
+! appropriately mark the function as declare target, even when
+! unused within the target region.
+
+!CHECK: func.func @_QPfoo(%{{.*}}: !fir.ref<f32>{{.*}}) -> f32 attributes {{{.*}}omp.declare_target = #omp.declaretarget<device_type = (any), capture_clause = (to)>{{.*}}}
+
+interface
+real function foo (x)
+ !$omp declare target
+ real, intent(in) :: x
+end function foo
+end interface
+integer, parameter :: n = 1000
+integer, parameter :: c = 100
+integer :: i, j
+real :: a(n)
+do i = 1, n
+a(i) = i
+end do
+do i = 1, n, c
+ !$omp target map(a(i:i+c-1))
+ !$omp parallel do
+ do j = i, i + c - 1
+ a(j) = a(j)
+ end do
+ !$omp end target
+end do
+do i = 1, n
+if (a(i) /= i + 1) stop 1
+end do
+end
+real function foo (x)
+!$omp declare target
+real, intent(in) :: x
+foo = x + 1
+end function foo
>From c785eaec373aecf669807d29478ddefd56c82228 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 2 Jul 2024 22:10:31 -0700
Subject: [PATCH 071/246] [RISCV] Add wrapper function for getStringError in
RISCVISAInfo. NFC (#97478)
We use the same error code for all errors. Add a wrapper so we don't
have to repeat it.
---
llvm/lib/TargetParser/RISCVISAInfo.cpp | 166 ++++++++++---------------
1 file changed, 65 insertions(+), 101 deletions(-)
diff --git a/llvm/lib/TargetParser/RISCVISAInfo.cpp b/llvm/lib/TargetParser/RISCVISAInfo.cpp
index b4fd067a1ed7a..133c6852bc85e 100644
--- a/llvm/lib/TargetParser/RISCVISAInfo.cpp
+++ b/llvm/lib/TargetParser/RISCVISAInfo.cpp
@@ -282,15 +282,17 @@ std::vector<std::string> RISCVISAInfo::toFeatures(bool AddAllExtensions,
return Features;
}
-static Error getStringErrorForInvalidExt(StringRef ExtName) {
+static Error getError(const Twine &Message) {
+ return createStringError(errc::invalid_argument, Message);
+}
+
+static Error getErrorForInvalidExt(StringRef ExtName) {
if (ExtName.size() == 1) {
- return createStringError(errc::invalid_argument,
- "unsupported standard user-level extension '" +
- ExtName + "'");
+ return getError("unsupported standard user-level extension '" + ExtName +
+ "'");
}
- return createStringError(errc::invalid_argument,
- "unsupported " + getExtensionTypeDesc(ExtName) +
- " '" + ExtName + "'");
+ return getError("unsupported " + getExtensionTypeDesc(ExtName) + " '" +
+ ExtName + "'");
}
// Extensions may have a version number, and may be separated by
@@ -315,21 +317,18 @@ static Error getExtensionVersion(StringRef Ext, StringRef In, unsigned &Major,
// Expected 'p' to be followed by minor version number.
if (MinorStr.empty()) {
- return createStringError(
- errc::invalid_argument,
- "minor version number missing after 'p' for extension '" + Ext + "'");
+ return getError("minor version number missing after 'p' for extension '" +
+ Ext + "'");
}
}
if (!MajorStr.empty() && MajorStr.getAsInteger(10, Major))
- return createStringError(
- errc::invalid_argument,
- "Failed to parse major version number for extension '" + Ext + "'");
+ return getError("Failed to parse major version number for extension '" +
+ Ext + "'");
if (!MinorStr.empty() && MinorStr.getAsInteger(10, Minor))
- return createStringError(
- errc::invalid_argument,
- "Failed to parse minor version number for extension '" + Ext + "'");
+ return getError("Failed to parse minor version number for extension '" +
+ Ext + "'");
ConsumeLength = MajorStr.size();
@@ -340,24 +339,21 @@ static Error getExtensionVersion(StringRef Ext, StringRef In, unsigned &Major,
// subsequent characters (i.e. must either end string or be followed by
// an underscore).
if (Ext.size() > 1 && In.size())
- return createStringError(
- errc::invalid_argument,
+ return getError(
"multi-character extensions must be separated by underscores");
// If experimental extension, require use of current version number
if (auto ExperimentalExtension = isExperimentalExtension(Ext)) {
if (!EnableExperimentalExtension)
- return createStringError(errc::invalid_argument,
- "requires '-menable-experimental-extensions' "
- "for experimental extension '" +
- Ext + "'");
+ return getError("requires '-menable-experimental-extensions' "
+ "for experimental extension '" +
+ Ext + "'");
if (ExperimentalExtensionVersionCheck &&
(MajorStr.empty() && MinorStr.empty()))
- return createStringError(
- errc::invalid_argument,
+ return getError(
"experimental extension requires explicit version number `" + Ext +
- "`");
+ "`");
auto SupportedVers = *ExperimentalExtension;
if (ExperimentalExtensionVersionCheck &&
@@ -368,7 +364,7 @@ static Error getExtensionVersion(StringRef Ext, StringRef In, unsigned &Major,
Error += " for experimental extension '" + Ext.str() +
"' (this compiler supports " + utostr(SupportedVers.Major) +
"." + utostr(SupportedVers.Minor) + ")";
- return createStringError(errc::invalid_argument, Error);
+ return getError(Error);
}
return Error::success();
}
@@ -392,13 +388,13 @@ static Error getExtensionVersion(StringRef Ext, StringRef In, unsigned &Major,
return Error::success();
if (!RISCVISAInfo::isSupportedExtension(Ext))
- return getStringErrorForInvalidExt(Ext);
+ return getErrorForInvalidExt(Ext);
- std::string Error = "unsupported version number " + std::string(MajorStr);
+ std::string Error = "unsupported version number " + MajorStr.str();
if (!MinorStr.empty())
Error += "." + MinorStr.str();
Error += " for extension '" + Ext.str() + "'";
- return createStringError(errc::invalid_argument, Error);
+ return getError(Error);
}
llvm::Expected<std::unique_ptr<RISCVISAInfo>>
@@ -439,8 +435,7 @@ RISCVISAInfo::parseNormalizedArchString(StringRef Arch) {
// RISC-V ISA strings must be [a-z0-9_]
if (!llvm::all_of(
Arch, [](char C) { return isDigit(C) || isLower(C) || C == '_'; }))
- return createStringError(errc::invalid_argument,
- "string may only contain [a-z0-9_]");
+ return getError("string may only contain [a-z0-9_]");
// Must start with a valid base ISA name.
unsigned XLen = 0;
@@ -450,8 +445,7 @@ RISCVISAInfo::parseNormalizedArchString(StringRef Arch) {
XLen = 64;
if (XLen == 0 || Arch.empty() || (Arch[0] != 'i' && Arch[0] != 'e'))
- return createStringError(errc::invalid_argument,
- "arch string must begin with valid base ISA");
+ return getError("arch string must begin with valid base ISA");
std::unique_ptr<RISCVISAInfo> ISAInfo(new RISCVISAInfo(XLen));
@@ -461,8 +455,7 @@ RISCVISAInfo::parseNormalizedArchString(StringRef Arch) {
while (!Arch.empty()) {
if (Arch[0] == '_') {
if (Arch.size() == 1 || Arch[1] == '_')
- return createStringError(errc::invalid_argument,
- "extension name missing after separator '_'");
+ return getError("extension name missing after separator '_'");
Arch = Arch.drop_front();
}
@@ -473,12 +466,10 @@ RISCVISAInfo::parseNormalizedArchString(StringRef Arch) {
StringRef Prefix, MinorVersionStr;
std::tie(Prefix, MinorVersionStr) = Ext.rsplit('p');
if (MinorVersionStr.empty())
- return createStringError(errc::invalid_argument,
- "extension lacks version in expected format");
+ return getError("extension lacks version in expected format");
unsigned MajorVersion, MinorVersion;
if (MinorVersionStr.getAsInteger(10, MinorVersion))
- return createStringError(errc::invalid_argument,
- "failed to parse minor version number");
+ return getError("failed to parse minor version number");
// Split Prefix into the extension name and the major version number
// (the trailing digits of Prefix).
@@ -489,32 +480,27 @@ RISCVISAInfo::parseNormalizedArchString(StringRef Arch) {
--VersionStart;
}
if (VersionStart == Prefix.size())
- return createStringError(errc::invalid_argument,
- "extension lacks version in expected format");
+ return getError("extension lacks version in expected format");
if (VersionStart == 0)
- return createStringError(errc::invalid_argument,
- "missing extension name");
+ return getError("missing extension name");
StringRef ExtName = Prefix.slice(0, VersionStart);
StringRef MajorVersionStr = Prefix.slice(VersionStart, StringRef::npos);
if (MajorVersionStr.getAsInteger(10, MajorVersion))
- return createStringError(errc::invalid_argument,
- "failed to parse major version number");
+ return getError("failed to parse major version number");
if ((ExtName[0] == 'z' || ExtName[0] == 's' || ExtName[0] == 'x') &&
(ExtName.size() == 1 || isDigit(ExtName[1])))
- return createStringError(errc::invalid_argument,
- "'" + Twine(ExtName[0]) +
- "' must be followed by a letter");
+ return getError("'" + Twine(ExtName[0]) +
+ "' must be followed by a letter");
if (!ISAInfo->Exts
.emplace(
ExtName.str(),
RISCVISAUtils::ExtensionVersion{MajorVersion, MinorVersion})
.second)
- return createStringError(errc::invalid_argument,
- "duplicate extension '" + ExtName + "'");
+ return getError("duplicate extension '" + ExtName + "'");
}
ISAInfo->updateImpliedLengths();
return std::move(ISAInfo);
@@ -526,8 +512,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
// RISC-V ISA strings must be [a-z0-9_]
if (!llvm::all_of(
Arch, [](char C) { return isDigit(C) || isLower(C) || C == '_'; }))
- return createStringError(errc::invalid_argument,
- "string may only contain [a-z0-9_]");
+ return getError("string may only contain [a-z0-9_]");
// ISA string must begin with rv32, rv64, or a profile.
unsigned XLen = 0;
@@ -548,10 +533,9 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
FoundProfile = (I != std::begin(SupportedExperimentalProfiles) &&
Arch.starts_with(std::prev(I)->Name));
if (FoundProfile && !EnableExperimentalExtension) {
- return createStringError(errc::invalid_argument,
- "requires '-menable-experimental-extensions' "
- "for profile '" +
- std::prev(I)->Name + "'");
+ return getError("requires '-menable-experimental-extensions' "
+ "for profile '" +
+ std::prev(I)->Name + "'");
}
}
if (FoundProfile) {
@@ -560,9 +544,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
StringRef ArchWithoutProfile = Arch.drop_front(I->Name.size());
if (!ArchWithoutProfile.empty()) {
if (ArchWithoutProfile.front() != '_')
- return createStringError(
- errc::invalid_argument,
- "additional extensions must be after separator '_'");
+ return getError("additional extensions must be after separator '_'");
NewArch += ArchWithoutProfile.str();
}
return parseArchString(NewArch, EnableExperimentalExtension,
@@ -571,8 +553,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
}
if (XLen == 0 || Arch.empty())
- return createStringError(
- errc::invalid_argument,
+ return getError(
"string must begin with rv32{i,e,g}, rv64{i,e,g}, or a supported "
"profile name");
@@ -592,9 +573,8 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
// First letter should be 'e', 'i' or 'g'.
switch (Baseline) {
default:
- return createStringError(errc::invalid_argument,
- "first letter after \'rv" + Twine(XLen) +
- "\' should be 'e', 'i' or 'g'");
+ return getError("first letter after \'rv" + Twine(XLen) +
+ "\' should be 'e', 'i' or 'g'");
case 'e':
case 'i':
// Baseline is `i` or `e`
@@ -609,8 +589,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
case 'g':
// g expands to extensions in RISCVGImplications.
if (Arch.size() > 1 && isDigit(Arch[1]))
- return createStringError(errc::invalid_argument,
- "version not supported for 'g'");
+ return getError("version not supported for 'g'");
// Versions for g are disallowed, and this was checked for previously.
ConsumeLength = 0;
@@ -634,8 +613,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
while (!Exts.empty()) {
if (Exts.front() == '_') {
if (Exts.size() == 1 || Exts[1] == '_')
- return createStringError(errc::invalid_argument,
- "extension name missing after separator '_'");
+ return getError("extension name missing after separator '_'");
Exts = Exts.drop_front();
}
@@ -669,12 +647,10 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
assert(!Type.empty() && "Empty type?");
if (Name.size() == Type.size())
- return createStringError(errc::invalid_argument,
- Desc + " name missing after '" + Type + "'");
+ return getError(Desc + " name missing after '" + Type + "'");
} else {
- return createStringError(errc::invalid_argument,
- "invalid standard user-level extension '" +
- Twine(Ext.front()) + "'");
+ return getError("invalid standard user-level extension '" +
+ Twine(Ext.front()) + "'");
}
unsigned Major, Minor, ConsumeLength;
@@ -688,8 +664,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
// Check if duplicated extension.
if (SeenExtMap.contains(Name.str()))
- return createStringError(errc::invalid_argument,
- "duplicated " + Desc + " '" + Name + "'");
+ return getError("duplicated " + Desc + " '" + Name + "'");
SeenExtMap[Name.str()] = {Major, Minor};
} while (!Ext.empty());
@@ -700,7 +675,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
const std::string &ExtName = SeenExtAndVers.first;
if (!RISCVISAInfo::isSupportedExtension(ExtName))
- return getStringErrorForInvalidExt(ExtName);
+ return getErrorForInvalidExt(ExtName);
ISAInfo->Exts[ExtName] = SeenExtAndVers.second;
}
@@ -718,60 +693,49 @@ Error RISCVISAInfo::checkDependency() {
bool HasZcmt = Exts.count("zcmt") != 0;
if (HasI && HasE)
- return createStringError(errc::invalid_argument,
- "'I' and 'E' extensions are incompatible");
+ return getError("'I' and 'E' extensions are incompatible");
if (HasF && HasZfinx)
- return createStringError(errc::invalid_argument,
- "'f' and 'zfinx' extensions are incompatible");
+ return getError("'f' and 'zfinx' extensions are incompatible");
if (HasZvl && !HasVector)
- return createStringError(
- errc::invalid_argument,
+ return getError(
"'zvl*b' requires 'v' or 'zve*' extension to also be specified");
if (Exts.count("zvbb") && !HasVector)
- return createStringError(
- errc::invalid_argument,
+ return getError(
"'zvbb' requires 'v' or 'zve*' extension to also be specified");
if (Exts.count("zvbc") && !Exts.count("zve64x"))
- return createStringError(
- errc::invalid_argument,
+ return getError(
"'zvbc' requires 'v' or 'zve64*' extension to also be specified");
if ((Exts.count("zvkb") || Exts.count("zvkg") || Exts.count("zvkned") ||
Exts.count("zvknha") || Exts.count("zvksed") || Exts.count("zvksh")) &&
!HasVector)
- return createStringError(
- errc::invalid_argument,
+ return getError(
"'zvk*' requires 'v' or 'zve*' extension to also be specified");
if (Exts.count("zvknhb") && !Exts.count("zve64x"))
- return createStringError(
- errc::invalid_argument,
+ return getError(
"'zvknhb' requires 'v' or 'zve64*' extension to also be specified");
if ((HasZcmt || Exts.count("zcmp")) && Exts.count("d") &&
(HasC || Exts.count("zcd")))
- return createStringError(
- errc::invalid_argument,
- Twine("'") + (HasZcmt ? "zcmt" : "zcmp") +
- "' extension is incompatible with '" + (HasC ? "c" : "zcd") +
- "' extension when 'd' extension is enabled");
+ return getError(Twine("'") + (HasZcmt ? "zcmt" : "zcmp") +
+ "' extension is incompatible with '" +
+ (HasC ? "c" : "zcd") +
+ "' extension when 'd' extension is enabled");
if (XLen != 32 && Exts.count("zcf"))
- return createStringError(errc::invalid_argument,
- "'zcf' is only supported for 'rv32'");
+ return getError("'zcf' is only supported for 'rv32'");
if (Exts.count("zacas") && !(Exts.count("a") || Exts.count("zaamo")))
- return createStringError(
- errc::invalid_argument,
+ return getError(
"'zacas' requires 'a' or 'zaamo' extension to also be specified");
if (Exts.count("zabha") && !(Exts.count("a") || Exts.count("zaamo")))
- return createStringError(
- errc::invalid_argument,
+ return getError(
"'zabha' requires 'a' or 'zaamo' extension to also be specified");
return Error::success();
>From 0cfd03ac0d3f9713090a581bda07584754c73a49 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Tue, 2 Jul 2024 22:58:26 -0700
Subject: [PATCH 072/246] [ProfileData] Use ArrayRef in PatchItem (NFC)
(#97379)
Packaging an array and its size as ArrayRef in PatchItem allows us to
get rid of things like std::size(Header) and HeaderOffsets.size().
---
llvm/lib/ProfileData/InstrProfWriter.cpp | 31 ++++++++++++------------
1 file changed, 16 insertions(+), 15 deletions(-)
diff --git a/llvm/lib/ProfileData/InstrProfWriter.cpp b/llvm/lib/ProfileData/InstrProfWriter.cpp
index 1ba229147c1f9..1a3721bf10350 100644
--- a/llvm/lib/ProfileData/InstrProfWriter.cpp
+++ b/llvm/lib/ProfileData/InstrProfWriter.cpp
@@ -39,9 +39,8 @@ using namespace llvm;
// A struct to define how the data stream should be patched. For Indexed
// profiling, only uint64_t data type is needed.
struct PatchItem {
- uint64_t Pos; // Where to patch.
- uint64_t *D; // Pointer to an array of source data.
- int N; // Number of elements in \c D array.
+ uint64_t Pos; // Where to patch.
+ ArrayRef<uint64_t> D; // An array of source data.
};
namespace llvm {
@@ -71,8 +70,8 @@ class ProfOStream {
const uint64_t LastPos = FDOStream.tell();
for (const auto &K : P) {
FDOStream.seek(K.Pos);
- for (int I = 0; I < K.N; I++)
- write(K.D[I]);
+ for (uint64_t Elem : K.D)
+ write(Elem);
}
// Reset the stream to the last position after patching so that users
// don't accidentally overwrite data. This makes it consistent with
@@ -82,7 +81,7 @@ class ProfOStream {
raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
std::string &Data = SOStream.str(); // with flush
for (const auto &K : P) {
- for (int I = 0; I < K.N; I++) {
+ for (int I = 0, E = K.D.size(); I != E; I++) {
uint64_t Bytes =
endian::byte_swap<uint64_t, llvm::endianness::little>(K.D[I]);
Data.replace(K.Pos + I * sizeof(uint64_t), sizeof(uint64_t),
@@ -612,7 +611,7 @@ static Error writeMemProfV0(ProfOStream &OS,
uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfData.Frames);
uint64_t Header[] = {RecordTableOffset, FramePayloadOffset, FrameTableOffset};
- OS.patch({{HeaderUpdatePos, Header, std::size(Header)}});
+ OS.patch({{HeaderUpdatePos, Header}});
return Error::success();
}
@@ -647,7 +646,7 @@ static Error writeMemProfV1(ProfOStream &OS,
uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfData.Frames);
uint64_t Header[] = {RecordTableOffset, FramePayloadOffset, FrameTableOffset};
- OS.patch({{HeaderUpdatePos, Header, std::size(Header)}});
+ OS.patch({{HeaderUpdatePos, Header}});
return Error::success();
}
@@ -697,7 +696,7 @@ static Error writeMemProfV2(ProfOStream &OS,
RecordTableOffset, FramePayloadOffset, FrameTableOffset,
CallStackPayloadOffset, CallStackTableOffset,
};
- OS.patch({{HeaderUpdatePos, Header, std::size(Header)}});
+ OS.patch({{HeaderUpdatePos, Header}});
return Error::success();
}
@@ -751,7 +750,7 @@ static Error writeMemProfV3(ProfOStream &OS,
RecordPayloadOffset,
RecordTableOffset,
};
- OS.patch({{HeaderUpdatePos, Header, std::size(Header)}});
+ OS.patch({{HeaderUpdatePos, Header}});
return Error::success();
}
@@ -989,12 +988,14 @@ Error InstrProfWriter::writeImpl(ProfOStream &OS) {
PatchItem PatchItems[] = {
// Patch the Header fields
- {BackPatchStartOffset, HeaderOffsets.data(), (int)HeaderOffsets.size()},
+ {BackPatchStartOffset, HeaderOffsets},
// Patch the summary data.
- {SummaryOffset, reinterpret_cast<uint64_t *>(TheSummary.get()),
- (int)(SummarySize / sizeof(uint64_t))},
- {CSSummaryOffset, reinterpret_cast<uint64_t *>(TheCSSummary.get()),
- (int)CSSummarySize}};
+ {SummaryOffset,
+ ArrayRef<uint64_t>(reinterpret_cast<uint64_t *>(TheSummary.get()),
+ SummarySize / sizeof(uint64_t))},
+ {CSSummaryOffset,
+ ArrayRef<uint64_t>(reinterpret_cast<uint64_t *>(TheCSSummary.get()),
+ CSSummarySize)}};
OS.patch(PatchItems);
>From 4e78d3a6b1560fb5debf1b518b4bd62924e900e5 Mon Sep 17 00:00:00 2001
From: srcarroll <50210727+srcarroll at users.noreply.github.com>
Date: Wed, 3 Jul 2024 01:27:19 -0500
Subject: [PATCH 073/246] Revert "Refactor LoopFuseSiblingOp and support
parallel fusion (#94391)" (#97523)
This reverts commit 6820b0871807abff07df118659e0de2ca741cb0b.
---
mlir/include/mlir/Dialect/SCF/IR/SCFOps.td | 3 +-
mlir/include/mlir/Dialect/SCF/Utils/Utils.h | 20 --
.../mlir/Interfaces/LoopLikeInterface.h | 20 --
mlir/lib/Dialect/SCF/IR/SCF.cpp | 38 ---
.../SCF/TransformOps/SCFTransformOps.cpp | 140 +++++++--
.../SCF/Transforms/ParallelLoopFusion.cpp | 80 ++++-
mlir/lib/Dialect/SCF/Utils/Utils.cpp | 279 ++++++------------
mlir/lib/Interfaces/LoopLikeInterface.cpp | 55 ----
.../SCF/transform-loop-fuse-sibling.mlir | 234 +--------------
9 files changed, 283 insertions(+), 586 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
index bf95fbe6721cf..f35ea962bea16 100644
--- a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
+++ b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
@@ -303,8 +303,7 @@ def ForallOp : SCF_Op<"forall", [
DeclareOpInterfaceMethods<LoopLikeOpInterface,
["getInitsMutable", "getRegionIterArgs", "getLoopInductionVars",
"getLoopLowerBounds", "getLoopUpperBounds", "getLoopSteps",
- "replaceWithAdditionalYields", "promoteIfSingleIteration",
- "yieldTiledValuesAndReplace"]>,
+ "promoteIfSingleIteration", "yieldTiledValuesAndReplace"]>,
RecursiveMemoryEffects,
SingleBlockImplicitTerminator<"scf::InParallelOp">,
DeclareOpInterfaceMethods<RegionBranchOpInterface>,
diff --git a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
index 6a40304e2eeba..de807c3e4e1f8 100644
--- a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
@@ -181,16 +181,6 @@ Loops tilePerfectlyNested(scf::ForOp rootForOp, ArrayRef<Value> sizes);
void getPerfectlyNestedLoops(SmallVectorImpl<scf::ForOp> &nestedLoops,
scf::ForOp root);
-//===----------------------------------------------------------------------===//
-// Fusion related helpers
-//===----------------------------------------------------------------------===//
-
-/// Check structural compatibility between two loops such as iteration space
-/// and dominance.
-bool checkFusionStructuralLegality(LoopLikeOpInterface target,
- LoopLikeOpInterface source,
- Diagnostic &diag);
-
/// Given two scf.forall loops, `target` and `source`, fuses `target` into
/// `source`. Assumes that the given loops are siblings and are independent of
/// each other.
@@ -212,16 +202,6 @@ scf::ForallOp fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForOp fuseIndependentSiblingForLoops(scf::ForOp target, scf::ForOp source,
RewriterBase &rewriter);
-/// Given two scf.parallel loops, `target` and `source`, fuses `target` into
-/// `source`. Assumes that the given loops are siblings and are independent of
-/// each other.
-///
-/// This function does not perform any legality checks and simply fuses the
-/// loops. The caller is responsible for ensuring that the loops are legal to
-/// fuse.
-scf::ParallelOp fuseIndependentSiblingParallelLoops(scf::ParallelOp target,
- scf::ParallelOp source,
- RewriterBase &rewriter);
} // namespace mlir
#endif // MLIR_DIALECT_SCF_UTILS_UTILS_H_
diff --git a/mlir/include/mlir/Interfaces/LoopLikeInterface.h b/mlir/include/mlir/Interfaces/LoopLikeInterface.h
index d08e097a9b4af..9925fc6ce6ca9 100644
--- a/mlir/include/mlir/Interfaces/LoopLikeInterface.h
+++ b/mlir/include/mlir/Interfaces/LoopLikeInterface.h
@@ -90,24 +90,4 @@ struct JamBlockGatherer {
/// Include the generated interface declarations.
#include "mlir/Interfaces/LoopLikeInterface.h.inc"
-namespace mlir {
-/// A function that rewrites `target`'s terminator as a teminator obtained by
-/// fusing `source` into `target`.
-using FuseTerminatorFn =
- function_ref<void(RewriterBase &rewriter, LoopLikeOpInterface source,
- LoopLikeOpInterface &target, IRMapping mapping)>;
-
-/// Returns a fused `LoopLikeOpInterface` created by fusing `source` to
-/// `target`. The `NewYieldValuesFn` callback is used to pass to the
-/// `replaceWithAdditionalYields` interface method to replace the loop with a
-/// new loop with (possibly) additional yields, while the `FuseTerminatorFn`
-/// callback is repsonsible for updating the fused loop terminator.
-LoopLikeOpInterface createFused(LoopLikeOpInterface target,
- LoopLikeOpInterface source,
- RewriterBase &rewriter,
- NewYieldValuesFn newYieldValuesFn,
- FuseTerminatorFn fuseTerminatorFn);
-
-} // namespace mlir
-
#endif // MLIR_INTERFACES_LOOPLIKEINTERFACE_H_
diff --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp
index cb15e0ecebf05..907d7f794593d 100644
--- a/mlir/lib/Dialect/SCF/IR/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp
@@ -618,44 +618,6 @@ void ForOp::getSuccessorRegions(RegionBranchPoint point,
SmallVector<Region *> ForallOp::getLoopRegions() { return {&getRegion()}; }
-FailureOr<LoopLikeOpInterface> ForallOp::replaceWithAdditionalYields(
- RewriterBase &rewriter, ValueRange newInitOperands,
- bool replaceInitOperandUsesInLoop,
- const NewYieldValuesFn &newYieldValuesFn) {
- // Create a new loop before the existing one, with the extra operands.
- OpBuilder::InsertionGuard g(rewriter);
- rewriter.setInsertionPoint(getOperation());
- SmallVector<Value> inits(getOutputs());
- llvm::append_range(inits, newInitOperands);
- scf::ForallOp newLoop = rewriter.create<scf::ForallOp>(
- getLoc(), getMixedLowerBound(), getMixedUpperBound(), getMixedStep(),
- inits, getMapping(),
- /*bodyBuilderFn =*/[](OpBuilder &, Location, ValueRange) {});
-
- // Move the loop body to the new op.
- rewriter.mergeBlocks(getBody(), newLoop.getBody(),
- newLoop.getBody()->getArguments().take_front(
- getBody()->getNumArguments()));
-
- if (replaceInitOperandUsesInLoop) {
- // Replace all uses of `newInitOperands` with the corresponding basic block
- // arguments.
- for (auto &&[newOperand, oldOperand] :
- llvm::zip(newInitOperands, newLoop.getBody()->getArguments().take_back(
- newInitOperands.size()))) {
- rewriter.replaceUsesWithIf(newOperand, oldOperand, [&](OpOperand &use) {
- Operation *user = use.getOwner();
- return newLoop->isProperAncestor(user);
- });
- }
- }
-
- // Replace the old loop.
- rewriter.replaceOp(getOperation(),
- newLoop->getResults().take_front(getNumResults()));
- return cast<LoopLikeOpInterface>(newLoop.getOperation());
-}
-
/// Promotes the loop body of a forallOp to its containing block if it can be
/// determined that the loop has a single iteration.
LogicalResult scf::ForallOp::promoteIfSingleIteration(RewriterBase &rewriter) {
diff --git a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
index 41834fea3bb84..56ff2709a589e 100644
--- a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
+++ b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
@@ -261,10 +261,8 @@ loopScheduling(scf::ForOp forOp,
return 1;
};
- std::optional<int64_t> ubConstant =
- getConstantIntValue(forOp.getUpperBound());
- std::optional<int64_t> lbConstant =
- getConstantIntValue(forOp.getLowerBound());
+ std::optional<int64_t> ubConstant = getConstantIntValue(forOp.getUpperBound());
+ std::optional<int64_t> lbConstant = getConstantIntValue(forOp.getLowerBound());
DenseMap<Operation *, unsigned> opCycles;
std::map<unsigned, std::vector<Operation *>> wrappedSchedule;
for (Operation &op : forOp.getBody()->getOperations()) {
@@ -449,6 +447,113 @@ void transform::TakeAssumedBranchOp::getEffects(
// LoopFuseSiblingOp
//===----------------------------------------------------------------------===//
+/// Check if `target` and `source` are siblings, in the context that `target`
+/// is being fused into `source`.
+///
+/// This is a simple check that just checks if both operations are in the same
+/// block and some checks to ensure that the fused IR does not violate
+/// dominance.
+static DiagnosedSilenceableFailure isOpSibling(Operation *target,
+ Operation *source) {
+ // Check if both operations are same.
+ if (target == source)
+ return emitSilenceableFailure(source)
+ << "target and source need to be different loops";
+
+ // Check if both operations are in the same block.
+ if (target->getBlock() != source->getBlock())
+ return emitSilenceableFailure(source)
+ << "target and source are not in the same block";
+
+ // Check if fusion will violate dominance.
+ DominanceInfo domInfo(source);
+ if (target->isBeforeInBlock(source)) {
+ // Since `target` is before `source`, all users of results of `target`
+ // need to be dominated by `source`.
+ for (Operation *user : target->getUsers()) {
+ if (!domInfo.properlyDominates(source, user, /*enclosingOpOk=*/false)) {
+ return emitSilenceableFailure(target)
+ << "user of results of target should be properly dominated by "
+ "source";
+ }
+ }
+ } else {
+ // Since `target` is after `source`, all values used by `target` need
+ // to dominate `source`.
+
+ // Check if operands of `target` are dominated by `source`.
+ for (Value operand : target->getOperands()) {
+ Operation *operandOp = operand.getDefiningOp();
+ // Operands without defining operations are block arguments. When `target`
+ // and `source` occur in the same block, these operands dominate `source`.
+ if (!operandOp)
+ continue;
+
+ // Operand's defining operation should properly dominate `source`.
+ if (!domInfo.properlyDominates(operandOp, source,
+ /*enclosingOpOk=*/false))
+ return emitSilenceableFailure(target)
+ << "operands of target should be properly dominated by source";
+ }
+
+ // Check if values used by `target` are dominated by `source`.
+ bool failed = false;
+ OpOperand *failedValue = nullptr;
+ visitUsedValuesDefinedAbove(target->getRegions(), [&](OpOperand *operand) {
+ Operation *operandOp = operand->get().getDefiningOp();
+ if (operandOp && !domInfo.properlyDominates(operandOp, source,
+ /*enclosingOpOk=*/false)) {
+ // `operand` is not an argument of an enclosing block and the defining
+ // op of `operand` is outside `target` but does not dominate `source`.
+ failed = true;
+ failedValue = operand;
+ }
+ });
+
+ if (failed)
+ return emitSilenceableFailure(failedValue->getOwner())
+ << "values used inside regions of target should be properly "
+ "dominated by source";
+ }
+
+ return DiagnosedSilenceableFailure::success();
+}
+
+/// Check if `target` scf.forall can be fused into `source` scf.forall.
+///
+/// This simply checks if both loops have the same bounds, steps and mapping.
+/// No attempt is made at checking that the side effects of `target` and
+/// `source` are independent of each other.
+static bool isForallWithIdenticalConfiguration(Operation *target,
+ Operation *source) {
+ auto targetOp = dyn_cast<scf::ForallOp>(target);
+ auto sourceOp = dyn_cast<scf::ForallOp>(source);
+ if (!targetOp || !sourceOp)
+ return false;
+
+ return targetOp.getMixedLowerBound() == sourceOp.getMixedLowerBound() &&
+ targetOp.getMixedUpperBound() == sourceOp.getMixedUpperBound() &&
+ targetOp.getMixedStep() == sourceOp.getMixedStep() &&
+ targetOp.getMapping() == sourceOp.getMapping();
+}
+
+/// Check if `target` scf.for can be fused into `source` scf.for.
+///
+/// This simply checks if both loops have the same bounds and steps. No attempt
+/// is made at checking that the side effects of `target` and `source` are
+/// independent of each other.
+static bool isForWithIdenticalConfiguration(Operation *target,
+ Operation *source) {
+ auto targetOp = dyn_cast<scf::ForOp>(target);
+ auto sourceOp = dyn_cast<scf::ForOp>(source);
+ if (!targetOp || !sourceOp)
+ return false;
+
+ return targetOp.getLowerBound() == sourceOp.getLowerBound() &&
+ targetOp.getUpperBound() == sourceOp.getUpperBound() &&
+ targetOp.getStep() == sourceOp.getStep();
+}
+
DiagnosedSilenceableFailure
transform::LoopFuseSiblingOp::apply(transform::TransformRewriter &rewriter,
transform::TransformResults &results,
@@ -464,32 +569,25 @@ transform::LoopFuseSiblingOp::apply(transform::TransformRewriter &rewriter,
<< "source handle (got " << llvm::range_size(sourceOps) << ")";
}
- auto target = dyn_cast<LoopLikeOpInterface>(*targetOps.begin());
- auto source = dyn_cast<LoopLikeOpInterface>(*sourceOps.begin());
- if (!target || !source)
- return emitSilenceableFailure(target->getLoc())
- << "target or source is not a loop op";
+ Operation *target = *targetOps.begin();
+ Operation *source = *sourceOps.begin();
- // Check if loops can be fused
- Diagnostic diag(target.getLoc(), DiagnosticSeverity::Error);
- if (!mlir::checkFusionStructuralLegality(target, source, diag))
- return DiagnosedSilenceableFailure::silenceableFailure(std::move(diag));
+ // Check if the target and source are siblings.
+ DiagnosedSilenceableFailure diag = isOpSibling(target, source);
+ if (!diag.succeeded())
+ return diag;
Operation *fusedLoop;
- // TODO: Support fusion for loop-like ops besides scf.for, scf.forall
- // and scf.parallel.
- if (isa<scf::ForOp>(target) && isa<scf::ForOp>(source)) {
+ /// TODO: Support fusion for loop-like ops besides scf.for and scf.forall.
+ if (isForWithIdenticalConfiguration(target, source)) {
fusedLoop = fuseIndependentSiblingForLoops(
cast<scf::ForOp>(target), cast<scf::ForOp>(source), rewriter);
- } else if (isa<scf::ForallOp>(target) && isa<scf::ForallOp>(source)) {
+ } else if (isForallWithIdenticalConfiguration(target, source)) {
fusedLoop = fuseIndependentSiblingForallLoops(
cast<scf::ForallOp>(target), cast<scf::ForallOp>(source), rewriter);
- } else if (isa<scf::ParallelOp>(target) && isa<scf::ParallelOp>(source)) {
- fusedLoop = fuseIndependentSiblingParallelLoops(
- cast<scf::ParallelOp>(target), cast<scf::ParallelOp>(source), rewriter);
} else
return emitSilenceableFailure(target->getLoc())
- << "unsupported loop type for fusion";
+ << "operations cannot be fused";
assert(fusedLoop && "failed to fuse operations");
diff --git a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
index b775f988576e3..5934d85373b03 100644
--- a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
@@ -16,7 +16,6 @@
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/SCF/Transforms/Transforms.h"
-#include "mlir/Dialect/SCF/Utils/Utils.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/OpDefinition.h"
@@ -38,6 +37,24 @@ static bool hasNestedParallelOp(ParallelOp ploop) {
return walkResult.wasInterrupted();
}
+/// Verify equal iteration spaces.
+static bool equalIterationSpaces(ParallelOp firstPloop,
+ ParallelOp secondPloop) {
+ if (firstPloop.getNumLoops() != secondPloop.getNumLoops())
+ return false;
+
+ auto matchOperands = [&](const OperandRange &lhs,
+ const OperandRange &rhs) -> bool {
+ // TODO: Extend this to support aliases and equal constants.
+ return std::equal(lhs.begin(), lhs.end(), rhs.begin());
+ };
+ return matchOperands(firstPloop.getLowerBound(),
+ secondPloop.getLowerBound()) &&
+ matchOperands(firstPloop.getUpperBound(),
+ secondPloop.getUpperBound()) &&
+ matchOperands(firstPloop.getStep(), secondPloop.getStep());
+}
+
/// Checks if the parallel loops have mixed access to the same buffers. Returns
/// `true` if the first parallel loop writes to the same indices that the second
/// loop reads.
@@ -136,10 +153,9 @@ verifyDependencies(ParallelOp firstPloop, ParallelOp secondPloop,
static bool isFusionLegal(ParallelOp firstPloop, ParallelOp secondPloop,
const IRMapping &firstToSecondPloopIndices,
llvm::function_ref<bool(Value, Value)> mayAlias) {
- Diagnostic diag(firstPloop.getLoc(), DiagnosticSeverity::Remark);
return !hasNestedParallelOp(firstPloop) &&
!hasNestedParallelOp(secondPloop) &&
- checkFusionStructuralLegality(firstPloop, secondPloop, diag) &&
+ equalIterationSpaces(firstPloop, secondPloop) &&
succeeded(verifyDependencies(firstPloop, secondPloop,
firstToSecondPloopIndices, mayAlias));
}
@@ -158,9 +174,61 @@ static void fuseIfLegal(ParallelOp firstPloop, ParallelOp &secondPloop,
mayAlias))
return;
- IRRewriter rewriter(builder);
- secondPloop = mlir::fuseIndependentSiblingParallelLoops(
- firstPloop, secondPloop, rewriter);
+ DominanceInfo dom;
+ // We are fusing first loop into second, make sure there are no users of the
+ // first loop results between loops.
+ for (Operation *user : firstPloop->getUsers())
+ if (!dom.properlyDominates(secondPloop, user, /*enclosingOpOk*/ false))
+ return;
+
+ ValueRange inits1 = firstPloop.getInitVals();
+ ValueRange inits2 = secondPloop.getInitVals();
+
+ SmallVector<Value> newInitVars(inits1.begin(), inits1.end());
+ newInitVars.append(inits2.begin(), inits2.end());
+
+ IRRewriter b(builder);
+ b.setInsertionPoint(secondPloop);
+ auto newSecondPloop = b.create<ParallelOp>(
+ secondPloop.getLoc(), secondPloop.getLowerBound(),
+ secondPloop.getUpperBound(), secondPloop.getStep(), newInitVars);
+
+ Block *newBlock = newSecondPloop.getBody();
+ auto term1 = cast<ReduceOp>(block1->getTerminator());
+ auto term2 = cast<ReduceOp>(block2->getTerminator());
+
+ b.inlineBlockBefore(block2, newBlock, newBlock->begin(),
+ newBlock->getArguments());
+ b.inlineBlockBefore(block1, newBlock, newBlock->begin(),
+ newBlock->getArguments());
+
+ ValueRange results = newSecondPloop.getResults();
+ if (!results.empty()) {
+ b.setInsertionPointToEnd(newBlock);
+
+ ValueRange reduceArgs1 = term1.getOperands();
+ ValueRange reduceArgs2 = term2.getOperands();
+ SmallVector<Value> newReduceArgs(reduceArgs1.begin(), reduceArgs1.end());
+ newReduceArgs.append(reduceArgs2.begin(), reduceArgs2.end());
+
+ auto newReduceOp = b.create<scf::ReduceOp>(term2.getLoc(), newReduceArgs);
+
+ for (auto &&[i, reg] : llvm::enumerate(llvm::concat<Region>(
+ term1.getReductions(), term2.getReductions()))) {
+ Block &oldRedBlock = reg.front();
+ Block &newRedBlock = newReduceOp.getReductions()[i].front();
+ b.inlineBlockBefore(&oldRedBlock, &newRedBlock, newRedBlock.begin(),
+ newRedBlock.getArguments());
+ }
+
+ firstPloop.replaceAllUsesWith(results.take_front(inits1.size()));
+ secondPloop.replaceAllUsesWith(results.take_back(inits2.size()));
+ }
+ term1->erase();
+ term2->erase();
+ firstPloop.erase();
+ secondPloop.erase();
+ secondPloop = newSecondPloop;
}
void mlir::scf::naivelyFuseParallelOps(
diff --git a/mlir/lib/Dialect/SCF/Utils/Utils.cpp b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
index abfc9a1b4d444..c0ee9d2afe91c 100644
--- a/mlir/lib/Dialect/SCF/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
@@ -17,7 +17,6 @@
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/IR/BuiltinOps.h"
-#include "mlir/IR/Dominance.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/PatternMatch.h"
@@ -1263,131 +1262,54 @@ TileLoops mlir::extractFixedOuterLoops(scf::ForOp rootForOp,
return tileLoops;
}
-//===----------------------------------------------------------------------===//
-// Fusion related helpers
-//===----------------------------------------------------------------------===//
-
-/// Check if `target` and `source` are siblings, in the context that `target`
-/// is being fused into `source`.
-///
-/// This is a simple check that just checks if both operations are in the same
-/// block and some checks to ensure that the fused IR does not violate
-/// dominance.
-static bool isOpSibling(Operation *target, Operation *source,
- Diagnostic &diag) {
- // Check if both operations are same.
- if (target == source) {
- diag << "target and source need to be different loops";
- return false;
- }
-
- // Check if both operations are in the same block.
- if (target->getBlock() != source->getBlock()) {
- diag << "target and source are not in the same block";
- return false;
- }
-
- // Check if fusion will violate dominance.
- DominanceInfo domInfo(source);
- if (target->isBeforeInBlock(source)) {
- // Since `target` is before `source`, all users of results of `target`
- // need to be dominated by `source`.
- for (Operation *user : target->getUsers()) {
- if (!domInfo.properlyDominates(source, user, /*enclosingOpOk=*/false)) {
- diag << "user of results of target should "
- "be properly dominated by source";
- return false;
- }
- }
- } else {
- // Since `target` is after `source`, all values used by `target` need
- // to dominate `source`.
-
- // Check if operands of `target` are dominated by `source`.
- for (Value operand : target->getOperands()) {
- Operation *operandOp = operand.getDefiningOp();
- // Operands without defining operations are block arguments. When `target`
- // and `source` occur in the same block, these operands dominate `source`.
- if (!operandOp)
- continue;
-
- // Operand's defining operation should properly dominate `source`.
- if (!domInfo.properlyDominates(operandOp, source,
- /*enclosingOpOk=*/false)) {
- diag << "operands of target should be properly dominated by source";
- return false;
- }
- }
-
- // Check if values used by `target` are dominated by `source`.
- bool failed = false;
- OpOperand *failedValue = nullptr;
- visitUsedValuesDefinedAbove(target->getRegions(), [&](OpOperand *operand) {
- Operation *operandOp = operand->get().getDefiningOp();
- if (operandOp && !domInfo.properlyDominates(operandOp, source,
- /*enclosingOpOk=*/false)) {
- // `operand` is not an argument of an enclosing block and the defining
- // op of `operand` is outside `target` but does not dominate `source`.
- failed = true;
- failedValue = operand;
- }
- });
-
- if (failed) {
- diag << "values used inside regions of target should be properly "
- "dominated by source";
- diag.attachNote(failedValue->getOwner()->getLoc()) << "see operation";
- return false;
- }
- }
-
- return true;
-}
-
-bool mlir::checkFusionStructuralLegality(LoopLikeOpInterface target,
- LoopLikeOpInterface source,
- Diagnostic &diag) {
- if (target->getName() != source->getName()) {
- diag << "target and source must be same loop type";
- return false;
- }
-
- bool iterSpaceEq =
- target.getLoopLowerBounds() == source.getLoopLowerBounds() &&
- target.getLoopUpperBounds() == source.getLoopUpperBounds() &&
- target.getLoopSteps() == source.getLoopSteps();
- // TODO: Decouple checks on concrete loop types and move this function
- // somewhere for general utility for `LoopLikeOpInterface`
- if (auto forAllTarget = dyn_cast<scf::ForallOp>(*target))
- iterSpaceEq = iterSpaceEq && forAllTarget.getMapping() ==
- cast<scf::ForallOp>(*source).getMapping();
- if (!iterSpaceEq) {
- diag << "target and source iteration spaces must be equal";
- return false;
- }
- return isOpSibling(target, source, diag);
-}
-
scf::ForallOp mlir::fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForallOp source,
RewriterBase &rewriter) {
- scf::ForallOp fusedLoop = cast<scf::ForallOp>(createFused(
- target, source, rewriter,
- [&](OpBuilder &b, Location loc, ArrayRef<BlockArgument> newBBArgs) {
- // `ForallOp` does not have yields, rather an `InParallelOp` terminator.
- return ValueRange{};
- },
- [&](RewriterBase &b, LoopLikeOpInterface source,
- LoopLikeOpInterface &target, IRMapping mapping) {
- auto sourceForall = cast<scf::ForallOp>(source);
- auto targetForall = cast<scf::ForallOp>(target);
- scf::InParallelOp fusedTerm = targetForall.getTerminator();
- b.setInsertionPointToEnd(fusedTerm.getBody());
- for (Operation &op : sourceForall.getTerminator().getYieldingOps())
- b.clone(op, mapping);
- }));
- rewriter.replaceOp(source,
- fusedLoop.getResults().take_back(source.getNumResults()));
+ unsigned numTargetOuts = target.getNumResults();
+ unsigned numSourceOuts = source.getNumResults();
+
+ // Create fused shared_outs.
+ SmallVector<Value> fusedOuts;
+ llvm::append_range(fusedOuts, target.getOutputs());
+ llvm::append_range(fusedOuts, source.getOutputs());
+
+ // Create a new scf.forall op after the source loop.
+ rewriter.setInsertionPointAfter(source);
+ scf::ForallOp fusedLoop = rewriter.create<scf::ForallOp>(
+ source.getLoc(), source.getMixedLowerBound(), source.getMixedUpperBound(),
+ source.getMixedStep(), fusedOuts, source.getMapping());
+
+ // Map control operands.
+ IRMapping mapping;
+ mapping.map(target.getInductionVars(), fusedLoop.getInductionVars());
+ mapping.map(source.getInductionVars(), fusedLoop.getInductionVars());
+
+ // Map shared outs.
+ mapping.map(target.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
+ mapping.map(source.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
+
+ // Append everything except the terminator into the fused operation.
+ rewriter.setInsertionPointToStart(fusedLoop.getBody());
+ for (Operation &op : target.getBody()->without_terminator())
+ rewriter.clone(op, mapping);
+ for (Operation &op : source.getBody()->without_terminator())
+ rewriter.clone(op, mapping);
+
+ // Fuse the old terminator in_parallel ops into the new one.
+ scf::InParallelOp targetTerm = target.getTerminator();
+ scf::InParallelOp sourceTerm = source.getTerminator();
+ scf::InParallelOp fusedTerm = fusedLoop.getTerminator();
+ rewriter.setInsertionPointToStart(fusedTerm.getBody());
+ for (Operation &op : targetTerm.getYieldingOps())
+ rewriter.clone(op, mapping);
+ for (Operation &op : sourceTerm.getYieldingOps())
+ rewriter.clone(op, mapping);
+
+ // Replace old loops by substituting their uses by results of the fused loop.
+ rewriter.replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
+ rewriter.replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
return fusedLoop;
}
@@ -1395,74 +1317,49 @@ scf::ForallOp mlir::fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForOp mlir::fuseIndependentSiblingForLoops(scf::ForOp target,
scf::ForOp source,
RewriterBase &rewriter) {
- scf::ForOp fusedLoop = cast<scf::ForOp>(createFused(
- target, source, rewriter,
- [&](OpBuilder &b, Location loc, ArrayRef<BlockArgument> newBBArgs) {
- return source.getYieldedValues();
- },
- [&](RewriterBase &b, LoopLikeOpInterface source,
- LoopLikeOpInterface &target, IRMapping mapping) {
- auto targetFor = cast<scf::ForOp>(target);
- auto newTerm = b.clone(*targetFor.getBody()->getTerminator(), mapping);
- b.replaceOp(targetFor.getBody()->getTerminator(), newTerm);
- }));
- rewriter.replaceOp(source,
- fusedLoop.getResults().take_back(source.getNumResults()));
- return fusedLoop;
-}
-
-// TODO: Finish refactoring this a la the above, but likely requires additional
-// interface methods.
-scf::ParallelOp mlir::fuseIndependentSiblingParallelLoops(
- scf::ParallelOp target, scf::ParallelOp source, RewriterBase &rewriter) {
- OpBuilder::InsertionGuard guard(rewriter);
- Block *block1 = target.getBody();
- Block *block2 = source.getBody();
- auto term1 = cast<scf::ReduceOp>(block1->getTerminator());
- auto term2 = cast<scf::ReduceOp>(block2->getTerminator());
-
- ValueRange inits1 = target.getInitVals();
- ValueRange inits2 = source.getInitVals();
-
- SmallVector<Value> newInitVars(inits1.begin(), inits1.end());
- newInitVars.append(inits2.begin(), inits2.end());
-
- rewriter.setInsertionPoint(source);
- auto fusedLoop = rewriter.create<scf::ParallelOp>(
- rewriter.getFusedLoc(target.getLoc(), source.getLoc()),
- source.getLowerBound(), source.getUpperBound(), source.getStep(),
- newInitVars);
- Block *newBlock = fusedLoop.getBody();
- rewriter.inlineBlockBefore(block2, newBlock, newBlock->begin(),
- newBlock->getArguments());
- rewriter.inlineBlockBefore(block1, newBlock, newBlock->begin(),
- newBlock->getArguments());
-
- ValueRange results = fusedLoop.getResults();
- if (!results.empty()) {
- rewriter.setInsertionPointToEnd(newBlock);
-
- ValueRange reduceArgs1 = term1.getOperands();
- ValueRange reduceArgs2 = term2.getOperands();
- SmallVector<Value> newReduceArgs(reduceArgs1.begin(), reduceArgs1.end());
- newReduceArgs.append(reduceArgs2.begin(), reduceArgs2.end());
-
- auto newReduceOp = rewriter.create<scf::ReduceOp>(
- rewriter.getFusedLoc(term1.getLoc(), term2.getLoc()), newReduceArgs);
-
- for (auto &&[i, reg] : llvm::enumerate(llvm::concat<Region>(
- term1.getReductions(), term2.getReductions()))) {
- Block &oldRedBlock = reg.front();
- Block &newRedBlock = newReduceOp.getReductions()[i].front();
- rewriter.inlineBlockBefore(&oldRedBlock, &newRedBlock,
- newRedBlock.begin(),
- newRedBlock.getArguments());
- }
- }
- rewriter.replaceOp(target, results.take_front(inits1.size()));
- rewriter.replaceOp(source, results.take_back(inits2.size()));
- rewriter.eraseOp(term1);
- rewriter.eraseOp(term2);
+ unsigned numTargetOuts = target.getNumResults();
+ unsigned numSourceOuts = source.getNumResults();
+
+ // Create fused init_args, with target's init_args before source's init_args.
+ SmallVector<Value> fusedInitArgs;
+ llvm::append_range(fusedInitArgs, target.getInitArgs());
+ llvm::append_range(fusedInitArgs, source.getInitArgs());
+
+ // Create a new scf.for op after the source loop (with scf.yield terminator
+ // (without arguments) only in case its init_args is empty).
+ rewriter.setInsertionPointAfter(source);
+ scf::ForOp fusedLoop = rewriter.create<scf::ForOp>(
+ source.getLoc(), source.getLowerBound(), source.getUpperBound(),
+ source.getStep(), fusedInitArgs);
+
+ // Map original induction variables and operands to those of the fused loop.
+ IRMapping mapping;
+ mapping.map(target.getInductionVar(), fusedLoop.getInductionVar());
+ mapping.map(target.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
+ mapping.map(source.getInductionVar(), fusedLoop.getInductionVar());
+ mapping.map(source.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
+
+ // Merge target's body into the new (fused) for loop and then source's body.
+ rewriter.setInsertionPointToStart(fusedLoop.getBody());
+ for (Operation &op : target.getBody()->without_terminator())
+ rewriter.clone(op, mapping);
+ for (Operation &op : source.getBody()->without_terminator())
+ rewriter.clone(op, mapping);
+
+ // Build fused yield results by appropriately mapping original yield operands.
+ SmallVector<Value> yieldResults;
+ for (Value operand : target.getBody()->getTerminator()->getOperands())
+ yieldResults.push_back(mapping.lookupOrDefault(operand));
+ for (Value operand : source.getBody()->getTerminator()->getOperands())
+ yieldResults.push_back(mapping.lookupOrDefault(operand));
+ if (!yieldResults.empty())
+ rewriter.create<scf::YieldOp>(source.getLoc(), yieldResults);
+
+ // Replace old loops by substituting their uses by results of the fused loop.
+ rewriter.replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
+ rewriter.replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
return fusedLoop;
}
diff --git a/mlir/lib/Interfaces/LoopLikeInterface.cpp b/mlir/lib/Interfaces/LoopLikeInterface.cpp
index 6f0ebec0519be..1e0e87b64e811 100644
--- a/mlir/lib/Interfaces/LoopLikeInterface.cpp
+++ b/mlir/lib/Interfaces/LoopLikeInterface.cpp
@@ -8,8 +8,6 @@
#include "mlir/Interfaces/LoopLikeInterface.h"
-#include "mlir/IR/IRMapping.h"
-#include "mlir/IR/PatternMatch.h"
#include "mlir/Interfaces/FunctionInterfaces.h"
#include "llvm/ADT/DenseSet.h"
@@ -115,56 +113,3 @@ LogicalResult detail::verifyLoopLikeOpInterface(Operation *op) {
return success();
}
-
-LoopLikeOpInterface mlir::createFused(LoopLikeOpInterface target,
- LoopLikeOpInterface source,
- RewriterBase &rewriter,
- NewYieldValuesFn newYieldValuesFn,
- FuseTerminatorFn fuseTerminatorFn) {
- auto targetIterArgs = target.getRegionIterArgs();
- std::optional<SmallVector<Value>> targetInductionVar =
- target.getLoopInductionVars();
- SmallVector<Value> targetYieldOperands(target.getYieldedValues());
- auto sourceIterArgs = source.getRegionIterArgs();
- std::optional<SmallVector<Value>> sourceInductionVar =
- *source.getLoopInductionVars();
- SmallVector<Value> sourceYieldOperands(source.getYieldedValues());
- auto sourceRegion = source.getLoopRegions().front();
-
- FailureOr<LoopLikeOpInterface> maybeFusedLoop =
- target.replaceWithAdditionalYields(rewriter, source.getInits(),
- /*replaceInitOperandUsesInLoop=*/false,
- newYieldValuesFn);
- if (failed(maybeFusedLoop))
- llvm_unreachable("failed to replace loop");
- LoopLikeOpInterface fusedLoop = *maybeFusedLoop;
-
- // Map control operands.
- IRMapping mapping;
- std::optional<SmallVector<Value>> fusedInductionVar =
- fusedLoop.getLoopInductionVars();
- if (fusedInductionVar) {
- if (!targetInductionVar || !sourceInductionVar)
- llvm_unreachable("expected target and source loops to have induction vars");
- mapping.map(*targetInductionVar, *fusedInductionVar);
- mapping.map(*sourceInductionVar, *fusedInductionVar);
- }
- mapping.map(targetIterArgs,
- fusedLoop.getRegionIterArgs().take_front(targetIterArgs.size()));
- mapping.map(targetYieldOperands,
- fusedLoop.getYieldedValues().take_front(targetIterArgs.size()));
- mapping.map(sourceIterArgs,
- fusedLoop.getRegionIterArgs().take_back(sourceIterArgs.size()));
- mapping.map(sourceYieldOperands,
- fusedLoop.getYieldedValues().take_back(sourceIterArgs.size()));
- // Append everything except the terminator into the fused operation.
- rewriter.setInsertionPoint(
- fusedLoop.getLoopRegions().front()->front().getTerminator());
- for (Operation &op : sourceRegion->front().without_terminator())
- rewriter.clone(op, mapping);
-
- // TODO: Replace with corresponding interface method if added
- fuseTerminatorFn(rewriter, source, fusedLoop, mapping);
-
- return fusedLoop;
-}
diff --git a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
index 91ed2a5269d74..54dd2bdf953ca 100644
--- a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
+++ b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
@@ -47,169 +47,6 @@ module attributes {transform.with_named_sequence} {
// -----
-// CHECK-LABEL: func @fuse_two_parallel
-// CHECK-SAME: ([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}) {
-func.func @fuse_two_parallel(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
-// CHECK-DAG: [[C2:%.*]] = arith.constant 2 : index
-// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
-// CHECK-DAG: [[C1:%.*]] = arith.constant 1 : index
-// CHECK-DAG: [[C1FP:%.*]] = arith.constant 1.
- %c2 = arith.constant 2 : index
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %c1fp = arith.constant 1.0 : f32
-// CHECK: [[SUM:%.*]] = memref.alloc()
- %sum = memref.alloc() : memref<2x2xf32>
-// CHECK: scf.parallel ([[I:%.*]], [[J:%.*]]) = ([[C0]], [[C0]])
-// CHECK-SAME: to ([[C2]], [[C2]]) step ([[C1]], [[C1]]) {
-// CHECK: [[B_ELEM:%.*]] = memref.load [[B]]{{\[}}[[I]], [[J]]]
-// CHECK: [[SUM_ELEM:%.*]] = arith.addf [[B_ELEM]], [[C1FP]]
-// CHECK: memref.store [[SUM_ELEM]], [[SUM]]{{\[}}[[I]], [[J]]]
-// CHECK-NOT: scf.parallel
-// CHECK: [[SUM_ELEM_:%.*]] = memref.load [[SUM]]{{\[}}[[I]], [[J]]]
-// CHECK: [[A_ELEM:%.*]] = memref.load [[A]]{{\[}}[[I]], [[J]]]
-// CHECK: [[PRODUCT_ELEM:%.*]] = arith.mulf [[SUM_ELEM_]], [[A_ELEM]]
-// CHECK: memref.store [[PRODUCT_ELEM]], [[B]]{{\[}}[[I]], [[J]]]
-// CHECK: scf.reduce
-// CHECK: }
- scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
- %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
- %sum_elem = arith.addf %B_elem, %c1fp : f32
- memref.store %sum_elem, %sum[%i, %j] : memref<2x2xf32>
- scf.reduce
- }
- scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
- %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
- %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
- %product_elem = arith.mulf %sum_elem, %A_elem : f32
- memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
- scf.reduce
- }
-// CHECK: memref.dealloc [[SUM]]
- memref.dealloc %sum : memref<2x2xf32>
- return
-}
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-// CHECK-LABEL: func @fuse_two_parallel_reverse
-// CHECK-SAME: ([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}) {
-func.func @fuse_two_parallel_reverse(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
-// CHECK-DAG: [[C2:%.*]] = arith.constant 2 : index
-// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
-// CHECK-DAG: [[C1:%.*]] = arith.constant 1 : index
-// CHECK-DAG: [[C1FP:%.*]] = arith.constant 1.
- %c2 = arith.constant 2 : index
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %c1fp = arith.constant 1.0 : f32
-// CHECK: [[SUM:%.*]] = memref.alloc()
- %sum = memref.alloc() : memref<2x2xf32>
-// CHECK: scf.parallel ([[I:%.*]], [[J:%.*]]) = ([[C0]], [[C0]])
-// CHECK-SAME: to ([[C2]], [[C2]]) step ([[C1]], [[C1]]) {
-// CHECK: [[SUM_ELEM_:%.*]] = memref.load [[SUM]]{{\[}}[[I]], [[J]]]
-// CHECK: [[A_ELEM:%.*]] = memref.load [[A]]{{\[}}[[I]], [[J]]]
-// CHECK: [[PRODUCT_ELEM:%.*]] = arith.mulf [[SUM_ELEM_]], [[A_ELEM]]
-// CHECK: memref.store [[PRODUCT_ELEM]], [[B]]{{\[}}[[I]], [[J]]]
-// CHECK-NOT: scf.parallel
-// CHECK: [[B_ELEM:%.*]] = memref.load [[B]]{{\[}}[[I]], [[J]]]
-// CHECK: [[SUM_ELEM:%.*]] = arith.addf [[B_ELEM]], [[C1FP]]
-// CHECK: memref.store [[SUM_ELEM]], [[SUM]]{{\[}}[[I]], [[J]]]
-// CHECK: scf.reduce
-// CHECK: }
- scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
- %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
- %sum_elem = arith.addf %B_elem, %c1fp : f32
- memref.store %sum_elem, %sum[%i, %j] : memref<2x2xf32>
- scf.reduce
- }
- scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
- %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
- %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
- %product_elem = arith.mulf %sum_elem, %A_elem : f32
- memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
- scf.reduce
- }
-// CHECK: memref.dealloc [[SUM]]
- memref.dealloc %sum : memref<2x2xf32>
- return
-}
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- %fused = transform.loop.fuse_sibling %parallel#1 into %parallel#0 : (!transform.any_op,!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-// CHECK-LABEL: func @fuse_reductions_two
-// CHECK-SAME: (%[[A:.*]]: memref<2x2xf32>, %[[B:.*]]: memref<2x2xf32>) -> (f32, f32)
-func.func @fuse_reductions_two(%A: memref<2x2xf32>, %B: memref<2x2xf32>) -> (f32, f32) {
-// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
-// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: %[[INIT1:.*]] = arith.constant 1.000000e+00 : f32
-// CHECK-DAG: %[[INIT2:.*]] = arith.constant 2.000000e+00 : f32
-// CHECK: %[[RES:.*]]:2 = scf.parallel (%[[I:.*]], %[[J:.*]]) = (%[[C0]], %[[C0]])
-// CHECK-SAME: to (%[[C2]], %[[C2]]) step (%[[C1]], %[[C1]])
-// CHECK-SAME: init (%[[INIT1]], %[[INIT2]]) -> (f32, f32)
-// CHECK: %[[VAL_A:.*]] = memref.load %[[A]][%[[I]], %[[J]]]
-// CHECK: %[[VAL_B:.*]] = memref.load %[[B]][%[[I]], %[[J]]]
-// CHECK: scf.reduce(%[[VAL_A]], %[[VAL_B]] : f32, f32) {
-// CHECK: ^bb0(%[[LHS:.*]]: f32, %[[RHS:.*]]: f32):
-// CHECK: %[[R:.*]] = arith.addf %[[LHS]], %[[RHS]] : f32
-// CHECK: scf.reduce.return %[[R]] : f32
-// CHECK: }
-// CHECK: ^bb0(%[[LHS:.*]]: f32, %[[RHS:.*]]: f32):
-// CHECK: %[[R:.*]] = arith.mulf %[[LHS]], %[[RHS]] : f32
-// CHECK: scf.reduce.return %[[R]] : f32
-// CHECK: }
-// CHECK: return %[[RES]]#0, %[[RES]]#1 : f32, f32
- %c2 = arith.constant 2 : index
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %init1 = arith.constant 1.0 : f32
- %init2 = arith.constant 2.0 : f32
- %res1 = scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) init(%init1) -> f32 {
- %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
- scf.reduce(%A_elem : f32) {
- ^bb0(%lhs: f32, %rhs: f32):
- %1 = arith.addf %lhs, %rhs : f32
- scf.reduce.return %1 : f32
- }
- }
- %res2 = scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) init(%init2) -> f32 {
- %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
- scf.reduce(%B_elem : f32) {
- ^bb0(%lhs: f32, %rhs: f32):
- %1 = arith.mulf %lhs, %rhs : f32
- scf.reduce.return %1 : f32
- }
- }
- return %res1, %res2 : f32, f32
-}
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
// CHECK: func.func @fuse_2nd_for_into_1st([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
func.func @fuse_2nd_for_into_1st(%A: tensor<128xf32>, %B: tensor<128xf32>) -> (tensor<128xf32>, tensor<128xf32>) {
// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
@@ -445,9 +282,8 @@ func.func @target_for_region_uses_result_of_source_for_err(%A: tensor<128xf32>,
%6 = vector.transfer_write %5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
scf.yield %6 : tensor<128xf32>
}
- // expected-error @below {{values used inside regions of target should be properly dominated by source}}
%dup1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %B) -> (tensor<128xf32>) {
- // expected-note @below {{see operation}}
+ // expected-error @below {{values used inside regions of target should be properly dominated by source}}
%dup2 = vector.transfer_read %1[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
%dup3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
%dup5 = arith.addf %dup3, %dup2 : vector<16xf32>
@@ -492,74 +328,6 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
-
-// -----
-
-func.func @non_matching_iteration_spaces_err(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
- %c2 = arith.constant 2 : index
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %c1fp = arith.constant 1.0 : f32
- %sum = memref.alloc() : memref<2x2xf32>
- // expected-error @below {{target and source iteration spaces must be equal}}
- scf.parallel (%i) = (%c0) to (%c2) step (%c1) {
- %B_elem = memref.load %B[%i, %c0] : memref<2x2xf32>
- %sum_elem = arith.addf %B_elem, %c1fp : f32
- memref.store %sum_elem, %sum[%i, %c0] : memref<2x2xf32>
- scf.reduce
- }
- scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
- %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
- %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
- %product_elem = arith.mulf %sum_elem, %A_elem : f32
- memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
- scf.reduce
- }
- memref.dealloc %sum : memref<2x2xf32>
- return
-}
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-func.func @non_matching_loop_types_err(%A: memref<2xf32>, %B: memref<2xf32>) {
- %c2 = arith.constant 2 : index
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %c1fp = arith.constant 1.0 : f32
- %sum = memref.alloc() : memref<2xf32>
- // expected-error @below {{target and source must be same loop type}}
- scf.for %i = %c0 to %c2 step %c1 {
- %B_elem = memref.load %B[%i] : memref<2xf32>
- %sum_elem = arith.addf %B_elem, %c1fp : f32
- memref.store %sum_elem, %sum[%i] : memref<2xf32>
- }
- scf.parallel (%i) = (%c0) to (%c2) step (%c1) {
- %sum_elem = memref.load %sum[%i] : memref<2xf32>
- %A_elem = memref.load %A[%i] : memref<2xf32>
- %product_elem = arith.mulf %sum_elem, %A_elem : f32
- memref.store %product_elem, %B[%i] : memref<2xf32>
- scf.reduce
- }
- memref.dealloc %sum : memref<2xf32>
- return
-}
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["scf.for"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %fused = transform.loop.fuse_sibling %0 into %1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
// -----
// CHECK: func.func @foreach_loop_pair_fuse([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
>From e1094dd889c516da0c3181bf2be44ad631a84255 Mon Sep 17 00:00:00 2001
From: Fabian Ritter <fabian.ritter at amd.com>
Date: Wed, 3 Jul 2024 08:32:35 +0200
Subject: [PATCH 074/246] [AMDGPU][DAG] Enable ganging up of memcpy
loads/stores for AMDGPU (#96185)
In the SelectionDAG lowering of the memcpy intrinsic, this optimization
introduces additional chains between fixed-size groups of loads and the
corresponding stores. While initially introduced to ensure that wider
load/store-pair instructions are generated on AArch64, this optimization
also improves code generation for AMDGPU: Ganged loads are scheduled
into a clause; stores only await completion of their corresponding load.
The chosen value of 16 performed good in microbenchmarks, values of 8,
32, or 64 would perform similarly.
The testcase updates are autogenerated by
utils/update_llc_test_checks.py.
See also:
- PR introducing this optimization: https://reviews.llvm.org/D46477
Part of SWDEV-455845.
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 3 +
.../AMDGPU/gfx-callable-argument-types.ll | 16 +-
.../test/CodeGen/AMDGPU/memcpy-fixed-align.ll | 32 +-
llvm/test/CodeGen/AMDGPU/memcpy-libcall.ll | 4228 ++++----
.../AMDGPU/memcpy-param-combinations.ll | 9296 +++++++++++++++++
.../test/CodeGen/AMDGPU/memcpy-scalar-load.ll | 71 +
.../AMDGPU/memmove-param-combinations.ll | 8698 +++++++++++++++
.../CodeGen/AMDGPU/memmove-scalar-load.ll | 77 +
8 files changed, 20193 insertions(+), 2228 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/memcpy-param-combinations.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/memcpy-scalar-load.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/memmove-param-combinations.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/memmove-scalar-load.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 747cedb111d31..ef30bf6d993fa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -67,6 +67,9 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = ~0U;
MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = ~0U;
+ // Enable ganging up loads and stores in the memcpy DAG lowering.
+ MaxGluedStoresPerMemcpy = 16;
+
// Lower floating point store/load to integer store/load to reduce the number
// of patterns in tablegen.
setOperationAction(ISD::LOAD, MVT::f32, Promote);
diff --git a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
index a118fa388f86d..645e48f1bb1ab 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
@@ -9074,8 +9074,8 @@ define void @tail_call_byval_align16(<32 x i32> %val, double %tmp) #0 {
; GFX9-NEXT: s_or_saveexec_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s33 offset:20
-; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s33 offset:16
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s33 offset:16
+; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s33 offset:20
; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s33
; GFX9-NEXT: v_writelane_b32 v40, s30, 0
; GFX9-NEXT: v_writelane_b32 v40, s31, 1
@@ -9113,9 +9113,9 @@ define void @tail_call_byval_align16(<32 x i32> %val, double %tmp) #0 {
; GFX9-NEXT: s_mov_b32 s4, byval_align16_f64_arg at abs32@lo
; GFX9-NEXT: v_writelane_b32 v40, s63, 31
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:4
; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
; GFX9-NEXT: v_readlane_b32 s63, v40, 31
; GFX9-NEXT: v_readlane_b32 s62, v40, 30
@@ -9167,17 +9167,17 @@ define void @tail_call_byval_align16(<32 x i32> %val, double %tmp) #0 {
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_mov_b32 exec_lo, s4
; GFX10-NEXT: s_clause 0x2
-; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s33 offset:20
-; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s33 offset:16
+; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s33 offset:16
+; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s33 offset:20
; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s33
; GFX10-NEXT: v_writelane_b32 v40, s30, 0
; GFX10-NEXT: s_addk_i32 s32, 0x400
; GFX10-NEXT: s_mov_b32 s5, byval_align16_f64_arg at abs32@hi
; GFX10-NEXT: s_mov_b32 s4, byval_align16_f64_arg at abs32@lo
; GFX10-NEXT: s_waitcnt vmcnt(2)
-; GFX10-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:4
+; GFX10-NEXT: buffer_store_dword v32, off, s[0:3], s32
; GFX10-NEXT: s_waitcnt vmcnt(1)
-; GFX10-NEXT: buffer_store_dword v33, off, s[0:3], s32
+; GFX10-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:4
; GFX10-NEXT: v_writelane_b32 v40, s31, 1
; GFX10-NEXT: v_writelane_b32 v40, s34, 2
; GFX10-NEXT: v_writelane_b32 v40, s35, 3
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-fixed-align.ll b/llvm/test/CodeGen/AMDGPU/memcpy-fixed-align.ll
index a5e0ceaa6b329..343925528a520 100644
--- a/llvm/test/CodeGen/AMDGPU/memcpy-fixed-align.ll
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-fixed-align.ll
@@ -8,22 +8,22 @@ define void @memcpy_fixed_align(ptr addrspace(5) %dst, ptr addrspace(1) %src) {
; MUBUF: ; %bb.0:
; MUBUF-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; MUBUF-NEXT: global_load_dwordx2 v[11:12], v[1:2], off offset:32
-; MUBUF-NEXT: global_load_dwordx4 v[3:6], v[1:2], off offset:16
-; MUBUF-NEXT: global_load_dwordx4 v[7:10], v[1:2], off
+; MUBUF-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; MUBUF-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
; MUBUF-NEXT: v_lshrrev_b32_e64 v0, 6, s32
; MUBUF-NEXT: s_waitcnt vmcnt(2)
-; MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:36
; MUBUF-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:32
+; MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:36
; MUBUF-NEXT: s_waitcnt vmcnt(3)
-; MUBUF-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:28
-; MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:24
-; MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20
-; MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16
+; MUBUF-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:12
+; MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:8
+; MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:4
+; MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], s32
; MUBUF-NEXT: s_waitcnt vmcnt(6)
-; MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:12
-; MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:8
-; MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:4
-; MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], s32
+; MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:28
+; MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:24
+; MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:20
+; MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:16
; MUBUF-NEXT: ;;#ASMSTART
; MUBUF-NEXT: ; use v0
; MUBUF-NEXT: ;;#ASMEND
@@ -33,16 +33,16 @@ define void @memcpy_fixed_align(ptr addrspace(5) %dst, ptr addrspace(1) %src) {
; FLATSCR-LABEL: memcpy_fixed_align:
; FLATSCR: ; %bb.0:
; FLATSCR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; FLATSCR-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; FLATSCR-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
; FLATSCR-NEXT: global_load_dwordx2 v[11:12], v[1:2], off offset:32
-; FLATSCR-NEXT: global_load_dwordx4 v[3:6], v[1:2], off offset:16
-; FLATSCR-NEXT: global_load_dwordx4 v[7:10], v[1:2], off
; FLATSCR-NEXT: v_mov_b32_e32 v0, s32
; FLATSCR-NEXT: s_waitcnt vmcnt(2)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[11:12], s32 offset:32
+; FLATSCR-NEXT: scratch_store_dwordx4 off, v[3:6], s32
; FLATSCR-NEXT: s_waitcnt vmcnt(2)
-; FLATSCR-NEXT: scratch_store_dwordx4 off, v[3:6], s32 offset:16
+; FLATSCR-NEXT: scratch_store_dwordx4 off, v[7:10], s32 offset:16
; FLATSCR-NEXT: s_waitcnt vmcnt(2)
-; FLATSCR-NEXT: scratch_store_dwordx4 off, v[7:10], s32
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[11:12], s32 offset:32
; FLATSCR-NEXT: ;;#ASMSTART
; FLATSCR-NEXT: ; use v0
; FLATSCR-NEXT: ;;#ASMEND
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-libcall.ll b/llvm/test/CodeGen/AMDGPU/memcpy-libcall.ll
index 358f42dfe8dd5..ae1f31272a15f 100644
--- a/llvm/test/CodeGen/AMDGPU/memcpy-libcall.ll
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-libcall.ll
@@ -13,148 +13,105 @@ define amdgpu_kernel void @memcpy_p0_p0_minsize(ptr %dest, ptr readonly %src) #0
; CHECK-NEXT: v_mov_b32_e32 v0, s2
; CHECK-NEXT: v_mov_b32_e32 v1, s3
; CHECK-NEXT: flat_load_ubyte v4, v[0:1]
+; CHECK-NEXT: flat_load_ubyte v5, v[0:1] offset:1
+; CHECK-NEXT: flat_load_ubyte v6, v[0:1] offset:2
+; CHECK-NEXT: flat_load_ubyte v7, v[0:1] offset:3
+; CHECK-NEXT: flat_load_ubyte v8, v[0:1] offset:4
+; CHECK-NEXT: flat_load_ubyte v9, v[0:1] offset:5
+; CHECK-NEXT: flat_load_ubyte v10, v[0:1] offset:6
+; CHECK-NEXT: flat_load_ubyte v11, v[0:1] offset:7
+; CHECK-NEXT: flat_load_ubyte v12, v[0:1] offset:8
+; CHECK-NEXT: flat_load_ubyte v13, v[0:1] offset:9
+; CHECK-NEXT: flat_load_ubyte v14, v[0:1] offset:10
+; CHECK-NEXT: flat_load_ubyte v15, v[0:1] offset:11
+; CHECK-NEXT: flat_load_ubyte v16, v[0:1] offset:12
+; CHECK-NEXT: flat_load_ubyte v17, v[0:1] offset:13
+; CHECK-NEXT: flat_load_ubyte v18, v[0:1] offset:14
; CHECK-NEXT: v_mov_b32_e32 v3, s1
; CHECK-NEXT: v_mov_b32_e32 v2, s0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[2:3], v4
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:1
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:1
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:2
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:2
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:3
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:3
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:4
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:4
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:5
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:5
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:6
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:6
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:7
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:7
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:8
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:8
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:9
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:9
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:10
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:10
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:11
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:11
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:12
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:12
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:13
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:13
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:14
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:14
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:15
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:15
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:16
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:16
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:17
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:17
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:18
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:18
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:19
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:19
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:20
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:20
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:21
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:21
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:22
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:22
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:23
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:23
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:24
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:24
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:25
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:25
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:26
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:26
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:27
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:27
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:28
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:28
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:29
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:29
+; CHECK-NEXT: flat_store_byte v[2:3], v5 offset:1
+; CHECK-NEXT: flat_store_byte v[2:3], v6 offset:2
+; CHECK-NEXT: flat_store_byte v[2:3], v7 offset:3
+; CHECK-NEXT: flat_store_byte v[2:3], v8 offset:4
+; CHECK-NEXT: flat_store_byte v[2:3], v9 offset:5
+; CHECK-NEXT: flat_store_byte v[2:3], v10 offset:6
+; CHECK-NEXT: flat_store_byte v[2:3], v11 offset:7
+; CHECK-NEXT: flat_store_byte v[2:3], v12 offset:8
+; CHECK-NEXT: flat_store_byte v[2:3], v13 offset:9
+; CHECK-NEXT: flat_store_byte v[2:3], v14 offset:10
+; CHECK-NEXT: flat_store_byte v[2:3], v15 offset:11
+; CHECK-NEXT: flat_store_byte v[2:3], v16 offset:12
+; CHECK-NEXT: flat_store_byte v[2:3], v17 offset:13
+; CHECK-NEXT: flat_store_byte v[2:3], v18 offset:14
; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:30
+; CHECK-NEXT: flat_load_ubyte v5, v[0:1] offset:29
+; CHECK-NEXT: flat_load_ubyte v6, v[0:1] offset:28
+; CHECK-NEXT: flat_load_ubyte v7, v[0:1] offset:27
+; CHECK-NEXT: flat_load_ubyte v8, v[0:1] offset:26
+; CHECK-NEXT: flat_load_ubyte v9, v[0:1] offset:25
+; CHECK-NEXT: flat_load_ubyte v10, v[0:1] offset:24
+; CHECK-NEXT: flat_load_ubyte v11, v[0:1] offset:23
+; CHECK-NEXT: flat_load_ubyte v12, v[0:1] offset:22
+; CHECK-NEXT: flat_load_ubyte v13, v[0:1] offset:21
+; CHECK-NEXT: flat_load_ubyte v14, v[0:1] offset:20
+; CHECK-NEXT: flat_load_ubyte v15, v[0:1] offset:19
+; CHECK-NEXT: flat_load_ubyte v16, v[0:1] offset:18
+; CHECK-NEXT: flat_load_ubyte v17, v[0:1] offset:17
+; CHECK-NEXT: flat_load_ubyte v18, v[0:1] offset:16
+; CHECK-NEXT: flat_load_ubyte v19, v[0:1] offset:15
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:30
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:31
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:31
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:32
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:32
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:33
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:33
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:34
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:34
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:35
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:35
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:36
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:36
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:37
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:37
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:38
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:38
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:39
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:39
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:40
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:40
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:41
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:41
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:42
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:42
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:43
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:43
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:44
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:44
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:45
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:45
-; CHECK-NEXT: flat_load_ubyte v0, v[0:1] offset:46
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v0 offset:46
+; CHECK-NEXT: flat_store_byte v[2:3], v5 offset:29
+; CHECK-NEXT: flat_store_byte v[2:3], v6 offset:28
+; CHECK-NEXT: flat_store_byte v[2:3], v7 offset:27
+; CHECK-NEXT: flat_store_byte v[2:3], v8 offset:26
+; CHECK-NEXT: flat_store_byte v[2:3], v9 offset:25
+; CHECK-NEXT: flat_store_byte v[2:3], v10 offset:24
+; CHECK-NEXT: flat_store_byte v[2:3], v11 offset:23
+; CHECK-NEXT: flat_store_byte v[2:3], v12 offset:22
+; CHECK-NEXT: flat_store_byte v[2:3], v13 offset:21
+; CHECK-NEXT: flat_store_byte v[2:3], v14 offset:20
+; CHECK-NEXT: flat_store_byte v[2:3], v15 offset:19
+; CHECK-NEXT: flat_store_byte v[2:3], v16 offset:18
+; CHECK-NEXT: flat_store_byte v[2:3], v17 offset:17
+; CHECK-NEXT: flat_store_byte v[2:3], v18 offset:16
+; CHECK-NEXT: flat_store_byte v[2:3], v19 offset:15
+; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:46
+; CHECK-NEXT: flat_load_ubyte v5, v[0:1] offset:45
+; CHECK-NEXT: flat_load_ubyte v6, v[0:1] offset:44
+; CHECK-NEXT: flat_load_ubyte v7, v[0:1] offset:43
+; CHECK-NEXT: flat_load_ubyte v8, v[0:1] offset:42
+; CHECK-NEXT: flat_load_ubyte v9, v[0:1] offset:41
+; CHECK-NEXT: flat_load_ubyte v10, v[0:1] offset:40
+; CHECK-NEXT: flat_load_ubyte v11, v[0:1] offset:39
+; CHECK-NEXT: flat_load_ubyte v12, v[0:1] offset:38
+; CHECK-NEXT: flat_load_ubyte v13, v[0:1] offset:37
+; CHECK-NEXT: flat_load_ubyte v14, v[0:1] offset:36
+; CHECK-NEXT: flat_load_ubyte v15, v[0:1] offset:35
+; CHECK-NEXT: flat_load_ubyte v16, v[0:1] offset:34
+; CHECK-NEXT: flat_load_ubyte v17, v[0:1] offset:33
+; CHECK-NEXT: flat_load_ubyte v18, v[0:1] offset:32
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_load_ubyte v0, v[0:1] offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:46
+; CHECK-NEXT: flat_store_byte v[2:3], v5 offset:45
+; CHECK-NEXT: flat_store_byte v[2:3], v6 offset:44
+; CHECK-NEXT: flat_store_byte v[2:3], v7 offset:43
+; CHECK-NEXT: flat_store_byte v[2:3], v8 offset:42
+; CHECK-NEXT: flat_store_byte v[2:3], v9 offset:41
+; CHECK-NEXT: flat_store_byte v[2:3], v10 offset:40
+; CHECK-NEXT: flat_store_byte v[2:3], v11 offset:39
+; CHECK-NEXT: flat_store_byte v[2:3], v12 offset:38
+; CHECK-NEXT: flat_store_byte v[2:3], v13 offset:37
+; CHECK-NEXT: flat_store_byte v[2:3], v14 offset:36
+; CHECK-NEXT: flat_store_byte v[2:3], v15 offset:35
+; CHECK-NEXT: flat_store_byte v[2:3], v16 offset:34
+; CHECK-NEXT: flat_store_byte v[2:3], v17 offset:33
+; CHECK-NEXT: flat_store_byte v[2:3], v18 offset:32
+; CHECK-NEXT: flat_store_byte v[2:3], v0 offset:31
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p0.p0.i64(ptr %dest, ptr %src, i64 47, i1 false)
@@ -165,20 +122,20 @@ define amdgpu_kernel void @memcpy_p1_p1_minsize(ptr addrspace(1) %dest, ptr addr
; CHECK-LABEL: memcpy_p1_p1_minsize:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v12, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: global_load_dwordx2 v[0:1], v4, s[2:3] offset:32
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx2 v4, v[0:1], s[0:1] offset:32
-; CHECK-NEXT: global_load_dwordx2 v[0:1], v4, s[2:3] offset:39
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx2 v4, v[0:1], s[0:1] offset:39
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:16
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:16
+; CHECK-NEXT: global_load_dwordx2 v[8:9], v12, s[2:3] offset:32
+; CHECK-NEXT: global_load_dwordx2 v[10:11], v12, s[2:3] offset:39
+; CHECK-NEXT: global_load_dwordx4 v[0:3], v12, s[2:3]
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v12, s[2:3] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_dwordx2 v12, v[8:9], s[0:1] offset:32
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_dwordx2 v12, v[10:11], s[0:1] offset:39
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_dwordx4 v12, v[0:3], s[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_dwordx4 v12, v[4:7], s[0:1] offset:16
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) %dest, ptr addrspace(1) %src, i64 47, i1 false)
@@ -189,32 +146,32 @@ define amdgpu_kernel void @memcpy_p1_p4_minsize(ptr addrspace(1) %global, ptr ad
; CHECK-LABEL: memcpy_p1_p4_minsize:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CHECK-NEXT: v_mov_b32_e32 v4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:16
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:16
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:32
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:32
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:48
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:48
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:64
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:64
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:80
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:80
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:96
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:96
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:112
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:112
+; CHECK-NEXT: v_mov_b32_e32 v32, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[0:3], v32, s[2:3]
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v32, s[2:3] offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v32, s[2:3] offset:32
+; CHECK-NEXT: global_load_dwordx4 v[12:15], v32, s[2:3] offset:48
+; CHECK-NEXT: global_load_dwordx4 v[16:19], v32, s[2:3] offset:64
+; CHECK-NEXT: global_load_dwordx4 v[20:23], v32, s[2:3] offset:80
+; CHECK-NEXT: global_load_dwordx4 v[24:27], v32, s[2:3] offset:96
+; CHECK-NEXT: global_load_dwordx4 v[28:31], v32, s[2:3] offset:112
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) %global, ptr addrspace(4) %0, i64 128, i1 false)
@@ -231,394 +188,372 @@ define amdgpu_kernel void @memcpy_p5_p4_minsize(ptr addrspace(5) %local, ptr add
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: s_add_u32 s8, s8, s7
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1]
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:1
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:2
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:3
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:4
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:5
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:6
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:7
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:8
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:9
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:10
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:11
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:12
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:13
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:14
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:15
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:15
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:14
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:13
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:12
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:11
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:10
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:9
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:8
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:7
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:6
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:5
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:4
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:3
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:2
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:1
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1]
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:31
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:30
; CHECK-NEXT: s_addc_u32 s9, s9, 0
; CHECK-NEXT: v_mov_b32_e32 v1, s2
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:16
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:17
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:18
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:13
; CHECK-NEXT: s_waitcnt vmcnt(18)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:19
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:10
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:23
; CHECK-NEXT: s_waitcnt vmcnt(19)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:1
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:20
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:9
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:22
; CHECK-NEXT: s_waitcnt vmcnt(20)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:2
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:21
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:8
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:21
; CHECK-NEXT: s_waitcnt vmcnt(21)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:3
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:22
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:7
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:20
; CHECK-NEXT: s_waitcnt vmcnt(22)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:4
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:23
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:6
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:5
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:2
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:47
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:1
; CHECK-NEXT: s_waitcnt vmcnt(23)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:5
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:24
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:4
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:17
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:3
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:16
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:27
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:26
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:25
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:24
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:45
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:44
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:43
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:23
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:36
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:22
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:35
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:21
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:34
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:20
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:33
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:19
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:32
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:28
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:29
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:42
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:18
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:63
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:16
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:61
; CHECK-NEXT: s_waitcnt vmcnt(24)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:6
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:25
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:27
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:40
; CHECK-NEXT: s_waitcnt vmcnt(25)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:7
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:26
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:26
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:39
; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:8
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:27
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:25
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:38
; CHECK-NEXT: s_waitcnt vmcnt(27)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:9
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:28
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:24
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:37
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:44
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:57
; CHECK-NEXT: s_waitcnt vmcnt(28)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:10
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:29
-; CHECK-NEXT: s_waitcnt vmcnt(29)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:11
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:30
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:43
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:56
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:45
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:58
; CHECK-NEXT: s_waitcnt vmcnt(30)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:12
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:31
-; CHECK-NEXT: s_waitcnt vmcnt(31)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:13
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:32
-; CHECK-NEXT: s_waitcnt vmcnt(32)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:14
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:33
-; CHECK-NEXT: s_waitcnt vmcnt(33)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:15
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:34
-; CHECK-NEXT: s_waitcnt vmcnt(34)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:16
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:35
-; CHECK-NEXT: s_waitcnt vmcnt(35)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:17
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:36
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:18
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:37
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:19
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:38
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:20
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:39
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:21
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:40
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:22
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:41
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:23
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:42
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:24
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:43
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:25
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:44
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:26
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:45
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:27
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:46
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:28
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:47
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:29
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:48
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:30
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:49
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:31
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:50
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:32
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:51
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:33
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:52
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:34
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:53
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:35
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:54
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:36
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:55
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:37
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:56
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:38
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:57
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:39
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:58
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:40
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:59
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:41
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:60
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:42
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:61
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:43
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:62
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:44
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:63
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:45
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:64
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:46
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:65
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:47
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:66
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:48
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:67
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:49
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:68
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:50
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:69
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:51
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:70
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:52
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:71
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:53
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:72
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:54
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:73
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:55
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:74
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:56
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:75
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:57
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:76
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:58
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:77
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:59
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:78
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:60
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:79
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:61
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:80
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:62
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:81
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:63
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:82
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:64
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:83
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:65
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:84
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:66
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:85
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:67
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:86
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:68
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:87
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:69
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:88
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:70
-; CHECK-NEXT: s_waitcnt vmcnt(35)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:71
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:89
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:90
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:72
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:91
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:73
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:92
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:74
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:93
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:75
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:94
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:76
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:95
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:77
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:96
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:78
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:97
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:79
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:98
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:80
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:99
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:81
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:100
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:82
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:101
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:83
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:102
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:84
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:103
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:85
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:104
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:86
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:105
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:87
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:106
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:88
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:107
-; CHECK-NEXT: s_waitcnt vmcnt(35)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:89
-; CHECK-NEXT: s_waitcnt vmcnt(35)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:90
-; CHECK-NEXT: s_waitcnt vmcnt(34)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:91
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:36
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:49
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:35
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:48
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:46
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:47
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:60
; CHECK-NEXT: s_waitcnt vmcnt(33)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:92
-; CHECK-NEXT: s_waitcnt vmcnt(32)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:93
-; CHECK-NEXT: s_waitcnt vmcnt(31)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:94
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:34
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:79
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:28
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:41
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:42
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:55
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:33
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:32
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:77
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:61
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:74
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:40
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:53
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:39
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:52
; CHECK-NEXT: s_waitcnt vmcnt(30)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:95
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:38
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:51
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:37
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:50
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:57
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:70
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:56
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:69
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:58
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:71
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:49
; CHECK-NEXT: s_waitcnt vmcnt(29)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:96
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:48
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:93
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:46
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:59
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:60
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:73
; CHECK-NEXT: s_waitcnt vmcnt(28)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:97
-; CHECK-NEXT: s_waitcnt vmcnt(27)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:98
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:99
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:41
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:54
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:55
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:68
; CHECK-NEXT: s_waitcnt vmcnt(25)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:100
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:108
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:109
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:110
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:111
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:112
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:113
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:114
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:115
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:116
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:117
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:118
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:119
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:101
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:120
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:102
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:121
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:103
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:122
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:104
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:123
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:105
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:124
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:106
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:125
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:107
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:126
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: global_load_ubyte v21, v0, s[0:1] offset:127
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:108
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:109
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:110
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:111
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:112
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:113
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:114
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:115
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:116
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:117
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:118
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:74
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:87
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:53
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:66
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:52
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:65
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:51
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:64
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:62
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:63
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:76
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:50
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:95
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:77
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:90
; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:119
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:71
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:83
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:70
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:69
; CHECK-NEXT: s_waitcnt vmcnt(25)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:120
-; CHECK-NEXT: s_waitcnt vmcnt(24)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:121
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:59
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:72
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:73
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:85
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:54
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:67
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:68
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:81
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:66
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:111
; CHECK-NEXT: s_waitcnt vmcnt(23)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:122
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:65
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:110
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:64
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:109
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:62
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:75
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:76
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:89
; CHECK-NEXT: s_waitcnt vmcnt(22)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:123
-; CHECK-NEXT: s_waitcnt vmcnt(21)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:124
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:90
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:103
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:72
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:86
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:84
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:82
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:87
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:100
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:67
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:80
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:78
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:94
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:79
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:92
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:95
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:108
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:93
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:106
; CHECK-NEXT: s_waitcnt vmcnt(20)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:125
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:75
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:88
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:89
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:102
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:78
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:91
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:94
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:107
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:92
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:105
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:88
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:101
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:91
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:104
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:86
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:85
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:84
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:83
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:82
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:96
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:97
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:98
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:99
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:120
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:81
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:80
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:111
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:110
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:109
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:108
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:100
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:121
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:122
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:123
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:124
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:125
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:126
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:107
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:127
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:106
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:105
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:103
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:102
+; CHECK-NEXT: s_waitcnt vmcnt(31)
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:101
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:116
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:117
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:119
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:114
+; CHECK-NEXT: s_waitcnt vmcnt(34)
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:104
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:118
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:115
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:113
+; CHECK-NEXT: global_load_ubyte v21, v0, s[0:1] offset:112
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:99
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:98
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:97
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:96
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:127
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:126
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:125
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:124
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:123
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:122
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:121
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:120
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:119
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:118
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:117
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:116
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:115
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:114
; CHECK-NEXT: s_waitcnt vmcnt(19)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:126
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:113
; CHECK-NEXT: s_waitcnt vmcnt(19)
-; CHECK-NEXT: buffer_store_byte v21, v1, s[8:11], 0 offen offset:127
+; CHECK-NEXT: buffer_store_byte v21, v1, s[8:11], 0 offen offset:112
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) %local, ptr addrspace(4) %0, i64 128, i1 false)
@@ -635,367 +570,362 @@ define amdgpu_kernel void @memcpy_p0_p5_minsize(ptr %generic, ptr addrspace(5) %
; CHECK-NEXT: s_addc_u32 s9, s9, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:1
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:2
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:3
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:4
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:5
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:6
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:7
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:8
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:9
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:10
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:11
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:12
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:2
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:14
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:15
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:16
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:31
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:30
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: v_mov_b32_e32 v1, s1
; CHECK-NEXT: s_waitcnt vmcnt(17)
-; CHECK-NEXT: flat_store_byte v[0:1], v3
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:15
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:1
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:19
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:2
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:20
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:3
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:21
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:4
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:14
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:13
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:12
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:11
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:10
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:9
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:23
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:8
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:22
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:7
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:21
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:6
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:20
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:5
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:19
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:4
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:18
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:3
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:17
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:1
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:47
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v18
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:31
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:2
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:16
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:45
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:23
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:37
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:5
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:23
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:22
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:36
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:6
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:21
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:35
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:7
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:20
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:34
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:19
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:33
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:9
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:27
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:18
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:32
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:10
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:29
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:11
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:30
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:44
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:12
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:17
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:63
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:13
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:31
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:16
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:28
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:42
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:14
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:32
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:26
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:40
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:15
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:33
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:25
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:39
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:16
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:34
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:24
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:38
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:17
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:35
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:27
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:41
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:18
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:36
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:45
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:59
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:19
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:37
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:20
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:38
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:21
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:39
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:37
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:51
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:22
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:40
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:36
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:50
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:23
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:41
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:35
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:49
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:24
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:42
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:34
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:48
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:25
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:43
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:46
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:26
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:44
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:47
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:61
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:27
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:45
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:29
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:43
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:28
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:46
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:44
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:58
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:29
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:47
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:33
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:79
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:30
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:48
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:31
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:49
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:32
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:42
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:56
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:32
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:50
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:40
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:54
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:33
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:51
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:39
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:53
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:34
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:52
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:38
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:52
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:35
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:53
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:41
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:55
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:36
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:54
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:59
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:73
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:37
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:55
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:38
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:56
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:39
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:57
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:51
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:65
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:40
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:58
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:50
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:64
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:41
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:59
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:62
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:42
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:60
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:63
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:77
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:43
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:61
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:46
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:60
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:44
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:62
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:61
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:75
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:45
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:63
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:46
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:64
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:43
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:57
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:47
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:65
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:58
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:72
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:48
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:66
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:49
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:95
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:49
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:67
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:48
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:56
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:70
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:50
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:68
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:54
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:68
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:51
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:69
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:53
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:67
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:52
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:70
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:52
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:66
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:53
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:71
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:55
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:69
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:54
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:72
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:73
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:87
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:55
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:73
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:56
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:74
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:57
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:75
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:65
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:111
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:58
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:76
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:64
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:110
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:59
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:77
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:62
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:76
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:60
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:78
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:77
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:91
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:61
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:79
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:60
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:74
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:62
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:80
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:75
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:89
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:63
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:81
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:57
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:71
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:64
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:82
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:72
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:86
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:65
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:83
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:70
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:84
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:66
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:84
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:68
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:83
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:67
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:85
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:67
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:81
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:68
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:86
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:66
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:80
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:69
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:87
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:78
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:70
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:88
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:79
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:93
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:71
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:89
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:69
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:82
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:72
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:90
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:87
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:101
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:73
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:74
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:91
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:92
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:75
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:93
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:76
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:90
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:76
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:94
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:91
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:105
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:77
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:95
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:74
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:88
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:78
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:96
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:79
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:97
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:89
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:103
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:80
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:98
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:71
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:85
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:81
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:99
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:86
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:100
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:82
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:100
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:78
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:92
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:83
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:101
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:93
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:107
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:90
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:104
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:84
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:102
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:88
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:102
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:85
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:103
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:85
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:99
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:86
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:104
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:94
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:87
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:105
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:95
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:109
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:88
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:106
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:92
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:106
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:94
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:108
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:84
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:83
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:82
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:81
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:96
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:97
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:98
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:120
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:80
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:111
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:110
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:109
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:99
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:121
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:122
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:123
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:124
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:107
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:105
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:104
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:103
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:106
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:102
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:101
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:100
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:126
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:116
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:117
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:118
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:119
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:127
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:114
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:115
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:108
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:125
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:89
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:107
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:113
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[8:11], 0 offen offset:112
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:90
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:108
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:98
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:97
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:96
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:127
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:126
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:91
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:92
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:93
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:94
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:95
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:96
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:97
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:98
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:99
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:100
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:101
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:109
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:110
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:111
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:112
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:113
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:114
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:115
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:116
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:117
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:118
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:119
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:102
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:120
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:103
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:121
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:104
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:122
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:105
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:123
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:106
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:124
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:107
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:125
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:108
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:126
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: buffer_load_ubyte v21, v2, s[8:11], 0 offen offset:127
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:109
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:110
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:111
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:112
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:113
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:114
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:115
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:116
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:117
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:118
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:119
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:120
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:121
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:122
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:123
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:124
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:125
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:126
-; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:127
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:125
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:124
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:123
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:122
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:121
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:120
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:119
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:118
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:117
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:116
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:115
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:114
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:113
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:112
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p0.p5.i64(ptr %generic, ptr addrspace(5) %src, i64 128, i1 false)
@@ -1008,30 +938,30 @@ define amdgpu_kernel void @memcpy_p3_p4_minsize(ptr addrspace(4) %0) #0 {
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; CHECK-NEXT: v_mov_b32_e32 v24, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v24, s[0:1] offset:112
-; CHECK-NEXT: global_load_dwordx4 v[4:7], v24, s[0:1] offset:96
-; CHECK-NEXT: global_load_dwordx4 v[8:11], v24, s[0:1] offset:80
-; CHECK-NEXT: global_load_dwordx4 v[12:15], v24, s[0:1] offset:64
-; CHECK-NEXT: global_load_dwordx4 v[16:19], v24, s[0:1] offset:48
-; CHECK-NEXT: global_load_dwordx4 v[20:23], v24, s[0:1] offset:32
+; CHECK-NEXT: global_load_dwordx4 v[0:3], v24, s[0:1]
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v24, s[0:1] offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v24, s[0:1] offset:32
+; CHECK-NEXT: global_load_dwordx4 v[12:15], v24, s[0:1] offset:48
+; CHECK-NEXT: global_load_dwordx4 v[16:19], v24, s[0:1] offset:64
+; CHECK-NEXT: global_load_dwordx4 v[20:23], v24, s[0:1] offset:80
; CHECK-NEXT: s_waitcnt vmcnt(5)
-; CHECK-NEXT: ds_write2_b64 v24, v[0:1], v[2:3] offset0:14 offset1:15
+; CHECK-NEXT: ds_write2_b64 v24, v[0:1], v[2:3] offset1:1
; CHECK-NEXT: s_waitcnt vmcnt(4)
-; CHECK-NEXT: ds_write2_b64 v24, v[4:5], v[6:7] offset0:12 offset1:13
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v24, s[0:1] offset:16
-; CHECK-NEXT: global_load_dwordx4 v[4:7], v24, s[0:1]
+; CHECK-NEXT: ds_write2_b64 v24, v[4:5], v[6:7] offset0:2 offset1:3
+; CHECK-NEXT: global_load_dwordx4 v[0:3], v24, s[0:1] offset:96
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v24, s[0:1] offset:112
; CHECK-NEXT: s_waitcnt vmcnt(5)
-; CHECK-NEXT: ds_write2_b64 v24, v[8:9], v[10:11] offset0:10 offset1:11
+; CHECK-NEXT: ds_write2_b64 v24, v[8:9], v[10:11] offset0:4 offset1:5
; CHECK-NEXT: s_waitcnt vmcnt(4)
-; CHECK-NEXT: ds_write2_b64 v24, v[12:13], v[14:15] offset0:8 offset1:9
+; CHECK-NEXT: ds_write2_b64 v24, v[12:13], v[14:15] offset0:6 offset1:7
; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: ds_write2_b64 v24, v[16:17], v[18:19] offset0:6 offset1:7
+; CHECK-NEXT: ds_write2_b64 v24, v[16:17], v[18:19] offset0:8 offset1:9
; CHECK-NEXT: s_waitcnt vmcnt(2)
-; CHECK-NEXT: ds_write2_b64 v24, v[20:21], v[22:23] offset0:4 offset1:5
+; CHECK-NEXT: ds_write2_b64 v24, v[20:21], v[22:23] offset0:10 offset1:11
; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: ds_write2_b64 v24, v[0:1], v[2:3] offset0:2 offset1:3
+; CHECK-NEXT: ds_write2_b64 v24, v[0:1], v[2:3] offset0:12 offset1:13
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: ds_write2_b64 v24, v[4:5], v[6:7] offset1:1
+; CHECK-NEXT: ds_write2_b64 v24, v[4:5], v[6:7] offset0:14 offset1:15
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) @shared, ptr addrspace(4) %0, i64 128, i1 false)
@@ -1043,298 +973,278 @@ define amdgpu_kernel void @memcpy_p0_p3_minsize(ptr %generic) #0 {
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:127
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:126
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:125
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:124
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:112
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:113
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:114
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:115
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:116
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:117
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:118
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:119
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: v_mov_b32_e32 v1, s1
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:127
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:126
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:123
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:125
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:124
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:122
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:121
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:123
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:112
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:113
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:114
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:115
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:116
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:117
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:118
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:119
; CHECK-NEXT: ds_read_u8 v3, v2 offset:120
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:119
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:122
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:121
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:118
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:121
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:122
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:123
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:124
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:125
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:126
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:127
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:120
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:119
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:117
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:116
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:118
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:115
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:114
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:117
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:116
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:113
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:115
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:114
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:112
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:111
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:113
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:110
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:109
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:112
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:111
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:108
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:110
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:109
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:107
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:106
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:108
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:105
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:104
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:107
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:106
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:103
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:105
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:104
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:102
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:101
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:103
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:100
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:121
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:122
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:123
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:124
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:125
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:126
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:127
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:96
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:97
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:98
; CHECK-NEXT: ds_read_u8 v6, v2 offset:99
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:102
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:101
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:98
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:100
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:100
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:101
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:102
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:103
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:96
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:97
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:98
; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:99
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:97
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:96
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:98
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:95
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:94
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:97
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:96
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:93
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:95
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:94
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:92
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:91
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:93
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:90
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:89
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:92
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:91
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:88
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:90
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:89
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:87
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:86
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:88
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:85
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:84
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:87
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:86
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:83
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:85
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:84
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:82
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:81
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:100
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:101
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:102
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:103
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:104
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:105
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:106
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:107
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:108
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:109
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:110
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:111
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:83
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:104
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:105
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:106
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:107
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:108
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:109
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:110
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:111
; CHECK-NEXT: ds_read_u8 v3, v2 offset:80
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:79
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:82
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:81
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:78
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:81
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:82
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:83
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:84
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:85
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:86
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:87
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:80
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:79
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:77
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:76
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:78
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:75
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:74
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:77
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:76
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:73
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:75
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:74
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:72
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:71
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:73
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:70
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:69
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:72
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:71
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:68
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:70
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:69
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:67
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:66
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:68
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:81
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:82
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:83
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:84
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:85
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:86
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:87
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:88
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:89
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:90
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:91
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:92
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:93
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:94
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:95
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:88
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:89
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:90
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:91
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:92
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:93
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:94
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:95
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:64
; CHECK-NEXT: ds_read_u8 v4, v2 offset:65
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:64
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:66
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:67
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:68
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:69
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:70
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:71
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:67
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:66
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:63
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:64
; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:65
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:64
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:62
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:61
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:66
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:67
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:68
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:69
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:70
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:71
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:72
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:73
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:74
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:75
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:76
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:77
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:78
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:79
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:63
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:60
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:72
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:73
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:74
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:75
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:76
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:77
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:78
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:79
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:48
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:49
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:50
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:51
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:52
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:53
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:54
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:55
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:48
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:49
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:50
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:51
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:52
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:53
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:54
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:55
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:56
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:57
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:58
; CHECK-NEXT: ds_read_u8 v6, v2 offset:59
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:62
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:61
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:58
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:60
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:60
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:61
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:62
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:63
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:56
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:57
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:58
; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:59
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:57
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:56
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:58
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:55
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:54
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:57
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:56
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:53
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:55
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:54
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:52
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:51
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:53
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:50
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:49
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:52
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:51
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:48
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:50
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:49
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:47
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:46
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:48
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:45
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:44
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:47
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:46
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:43
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:45
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:44
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:42
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:41
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:43
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:60
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:61
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:62
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:63
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:32
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:33
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:34
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:35
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:36
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:37
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:38
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:39
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:32
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:33
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:34
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:35
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:36
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:37
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:38
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:39
; CHECK-NEXT: ds_read_u8 v3, v2 offset:40
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:39
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:42
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:41
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:38
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:41
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:42
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:43
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:44
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:45
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:46
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:47
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:40
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:39
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:37
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:36
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:38
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:35
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:34
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:37
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:36
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:33
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:35
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:34
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:32
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:31
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:33
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:30
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:29
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:32
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:31
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:28
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:30
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:29
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:27
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:26
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:28
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:25
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:24
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:27
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:26
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:23
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:25
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:24
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:22
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:21
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:23
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:20
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:19
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:22
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:21
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:41
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:42
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:43
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:44
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:45
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:46
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:47
+; CHECK-NEXT: ds_read_u8 v3, v2
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:1
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:2
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:3
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:4
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:5
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:6
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:7
+; CHECK-NEXT: ds_read_u8 v11, v2 offset:8
+; CHECK-NEXT: ds_read_u8 v12, v2 offset:9
+; CHECK-NEXT: ds_read_u8 v13, v2 offset:10
+; CHECK-NEXT: ds_read_u8 v14, v2 offset:11
+; CHECK-NEXT: ds_read_u8 v15, v2 offset:12
+; CHECK-NEXT: ds_read_u8 v16, v2 offset:13
+; CHECK-NEXT: ds_read_u8 v17, v2 offset:14
+; CHECK-NEXT: ds_read_u8 v18, v2 offset:15
+; CHECK-NEXT: ds_read_u8 v19, v2 offset:16
+; CHECK-NEXT: ds_read_u8 v20, v2 offset:17
+; CHECK-NEXT: ds_read_u8 v21, v2 offset:18
+; CHECK-NEXT: ds_read_u8 v22, v2 offset:19
+; CHECK-NEXT: ds_read_u8 v23, v2 offset:20
+; CHECK-NEXT: ds_read_u8 v24, v2 offset:21
+; CHECK-NEXT: ds_read_u8 v25, v2 offset:22
+; CHECK-NEXT: ds_read_u8 v26, v2 offset:23
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:20
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:19
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:16
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:17
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:18
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:8
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:9
-; CHECK-NEXT: ds_read_u8 v7, v2 offset:10
-; CHECK-NEXT: ds_read_u8 v8, v2 offset:11
-; CHECK-NEXT: ds_read_u8 v9, v2 offset:12
-; CHECK-NEXT: ds_read_u8 v10, v2 offset:13
-; CHECK-NEXT: ds_read_u8 v11, v2 offset:14
-; CHECK-NEXT: ds_read_u8 v12, v2 offset:15
-; CHECK-NEXT: ds_read_u8 v13, v2
-; CHECK-NEXT: ds_read_u8 v14, v2 offset:1
-; CHECK-NEXT: ds_read_u8 v15, v2 offset:2
-; CHECK-NEXT: ds_read_u8 v16, v2 offset:3
-; CHECK-NEXT: ds_read_u8 v17, v2 offset:4
-; CHECK-NEXT: ds_read_u8 v18, v2 offset:5
-; CHECK-NEXT: ds_read_u8 v19, v2 offset:6
-; CHECK-NEXT: ds_read_u8 v2, v2 offset:7
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:17
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:16
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:15
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:14
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:13
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:12
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:11
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:10
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:9
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:8
-; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:7
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:6
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:5
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:4
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:3
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:2
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:1
-; CHECK-NEXT: flat_store_byte v[0:1], v13
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:16
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:19
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:20
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:21
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v26 offset:23
+; CHECK-NEXT: ds_read_u8 v19, v2 offset:24
+; CHECK-NEXT: ds_read_u8 v20, v2 offset:25
+; CHECK-NEXT: ds_read_u8 v21, v2 offset:26
+; CHECK-NEXT: ds_read_u8 v22, v2 offset:27
+; CHECK-NEXT: ds_read_u8 v23, v2 offset:28
+; CHECK-NEXT: ds_read_u8 v24, v2 offset:29
+; CHECK-NEXT: ds_read_u8 v25, v2 offset:30
+; CHECK-NEXT: ds_read_u8 v2, v2 offset:31
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:27
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:28
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:31
+; CHECK-NEXT: flat_store_byte v[0:1], v3
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:1
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:2
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:3
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:4
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:5
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:6
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:7
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:9
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:10
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:11
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:12
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:13
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:14
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:15
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p0.p3.i64(ptr %generic, ptr addrspace(3) @shared, i64 128, i1 false)
@@ -1349,148 +1259,105 @@ define amdgpu_kernel void @memcpy_p0_p0_optsize(ptr %dest, ptr %src) #1 {
; CHECK-NEXT: v_mov_b32_e32 v0, s2
; CHECK-NEXT: v_mov_b32_e32 v1, s3
; CHECK-NEXT: flat_load_ubyte v4, v[0:1]
+; CHECK-NEXT: flat_load_ubyte v5, v[0:1] offset:1
+; CHECK-NEXT: flat_load_ubyte v6, v[0:1] offset:2
+; CHECK-NEXT: flat_load_ubyte v7, v[0:1] offset:3
+; CHECK-NEXT: flat_load_ubyte v8, v[0:1] offset:4
+; CHECK-NEXT: flat_load_ubyte v9, v[0:1] offset:5
+; CHECK-NEXT: flat_load_ubyte v10, v[0:1] offset:6
+; CHECK-NEXT: flat_load_ubyte v11, v[0:1] offset:7
+; CHECK-NEXT: flat_load_ubyte v12, v[0:1] offset:8
+; CHECK-NEXT: flat_load_ubyte v13, v[0:1] offset:9
+; CHECK-NEXT: flat_load_ubyte v14, v[0:1] offset:10
+; CHECK-NEXT: flat_load_ubyte v15, v[0:1] offset:11
+; CHECK-NEXT: flat_load_ubyte v16, v[0:1] offset:12
+; CHECK-NEXT: flat_load_ubyte v17, v[0:1] offset:13
+; CHECK-NEXT: flat_load_ubyte v18, v[0:1] offset:14
; CHECK-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:1
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:1
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:2
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:2
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:3
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:3
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:4
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:4
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:5
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:5
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:6
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:6
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:7
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:7
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:8
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:8
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:9
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:9
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:10
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:10
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:11
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:11
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:12
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:12
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:13
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:13
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:14
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:14
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:15
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:15
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:16
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:16
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:17
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:17
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:18
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:18
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:19
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:19
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:20
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:20
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:21
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:21
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:22
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:22
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:23
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:23
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:24
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:24
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:25
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:25
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:26
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:26
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:27
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:27
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:28
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:28
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:29
+; CHECK-NEXT: v_mov_b32_e32 v2, s0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:29
+; CHECK-NEXT: flat_store_byte v[2:3], v4
+; CHECK-NEXT: flat_store_byte v[2:3], v5 offset:1
+; CHECK-NEXT: flat_store_byte v[2:3], v6 offset:2
+; CHECK-NEXT: flat_store_byte v[2:3], v7 offset:3
+; CHECK-NEXT: flat_store_byte v[2:3], v8 offset:4
+; CHECK-NEXT: flat_store_byte v[2:3], v9 offset:5
+; CHECK-NEXT: flat_store_byte v[2:3], v10 offset:6
+; CHECK-NEXT: flat_store_byte v[2:3], v11 offset:7
+; CHECK-NEXT: flat_store_byte v[2:3], v12 offset:8
+; CHECK-NEXT: flat_store_byte v[2:3], v13 offset:9
+; CHECK-NEXT: flat_store_byte v[2:3], v14 offset:10
+; CHECK-NEXT: flat_store_byte v[2:3], v15 offset:11
+; CHECK-NEXT: flat_store_byte v[2:3], v16 offset:12
+; CHECK-NEXT: flat_store_byte v[2:3], v17 offset:13
+; CHECK-NEXT: flat_store_byte v[2:3], v18 offset:14
; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:30
+; CHECK-NEXT: flat_load_ubyte v5, v[0:1] offset:29
+; CHECK-NEXT: flat_load_ubyte v6, v[0:1] offset:28
+; CHECK-NEXT: flat_load_ubyte v7, v[0:1] offset:27
+; CHECK-NEXT: flat_load_ubyte v8, v[0:1] offset:26
+; CHECK-NEXT: flat_load_ubyte v9, v[0:1] offset:25
+; CHECK-NEXT: flat_load_ubyte v10, v[0:1] offset:24
+; CHECK-NEXT: flat_load_ubyte v11, v[0:1] offset:23
+; CHECK-NEXT: flat_load_ubyte v12, v[0:1] offset:22
+; CHECK-NEXT: flat_load_ubyte v13, v[0:1] offset:21
+; CHECK-NEXT: flat_load_ubyte v14, v[0:1] offset:20
+; CHECK-NEXT: flat_load_ubyte v15, v[0:1] offset:19
+; CHECK-NEXT: flat_load_ubyte v16, v[0:1] offset:18
+; CHECK-NEXT: flat_load_ubyte v17, v[0:1] offset:17
+; CHECK-NEXT: flat_load_ubyte v18, v[0:1] offset:16
+; CHECK-NEXT: flat_load_ubyte v19, v[0:1] offset:15
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:30
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:31
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:31
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:32
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:32
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:33
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:33
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:34
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:34
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:35
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:35
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:36
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:36
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:37
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:37
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:38
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:38
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:39
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:39
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:40
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:40
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:41
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:41
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:42
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:42
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:43
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:43
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:44
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:44
-; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:45
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:45
-; CHECK-NEXT: flat_load_ubyte v0, v[0:1] offset:46
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[2:3], v0 offset:46
+; CHECK-NEXT: flat_store_byte v[2:3], v5 offset:29
+; CHECK-NEXT: flat_store_byte v[2:3], v6 offset:28
+; CHECK-NEXT: flat_store_byte v[2:3], v7 offset:27
+; CHECK-NEXT: flat_store_byte v[2:3], v8 offset:26
+; CHECK-NEXT: flat_store_byte v[2:3], v9 offset:25
+; CHECK-NEXT: flat_store_byte v[2:3], v10 offset:24
+; CHECK-NEXT: flat_store_byte v[2:3], v11 offset:23
+; CHECK-NEXT: flat_store_byte v[2:3], v12 offset:22
+; CHECK-NEXT: flat_store_byte v[2:3], v13 offset:21
+; CHECK-NEXT: flat_store_byte v[2:3], v14 offset:20
+; CHECK-NEXT: flat_store_byte v[2:3], v15 offset:19
+; CHECK-NEXT: flat_store_byte v[2:3], v16 offset:18
+; CHECK-NEXT: flat_store_byte v[2:3], v17 offset:17
+; CHECK-NEXT: flat_store_byte v[2:3], v18 offset:16
+; CHECK-NEXT: flat_store_byte v[2:3], v19 offset:15
+; CHECK-NEXT: flat_load_ubyte v4, v[0:1] offset:46
+; CHECK-NEXT: flat_load_ubyte v5, v[0:1] offset:45
+; CHECK-NEXT: flat_load_ubyte v6, v[0:1] offset:44
+; CHECK-NEXT: flat_load_ubyte v7, v[0:1] offset:43
+; CHECK-NEXT: flat_load_ubyte v8, v[0:1] offset:42
+; CHECK-NEXT: flat_load_ubyte v9, v[0:1] offset:41
+; CHECK-NEXT: flat_load_ubyte v10, v[0:1] offset:40
+; CHECK-NEXT: flat_load_ubyte v11, v[0:1] offset:39
+; CHECK-NEXT: flat_load_ubyte v12, v[0:1] offset:38
+; CHECK-NEXT: flat_load_ubyte v13, v[0:1] offset:37
+; CHECK-NEXT: flat_load_ubyte v14, v[0:1] offset:36
+; CHECK-NEXT: flat_load_ubyte v15, v[0:1] offset:35
+; CHECK-NEXT: flat_load_ubyte v16, v[0:1] offset:34
+; CHECK-NEXT: flat_load_ubyte v17, v[0:1] offset:33
+; CHECK-NEXT: flat_load_ubyte v18, v[0:1] offset:32
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_load_ubyte v0, v[0:1] offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[2:3], v4 offset:46
+; CHECK-NEXT: flat_store_byte v[2:3], v5 offset:45
+; CHECK-NEXT: flat_store_byte v[2:3], v6 offset:44
+; CHECK-NEXT: flat_store_byte v[2:3], v7 offset:43
+; CHECK-NEXT: flat_store_byte v[2:3], v8 offset:42
+; CHECK-NEXT: flat_store_byte v[2:3], v9 offset:41
+; CHECK-NEXT: flat_store_byte v[2:3], v10 offset:40
+; CHECK-NEXT: flat_store_byte v[2:3], v11 offset:39
+; CHECK-NEXT: flat_store_byte v[2:3], v12 offset:38
+; CHECK-NEXT: flat_store_byte v[2:3], v13 offset:37
+; CHECK-NEXT: flat_store_byte v[2:3], v14 offset:36
+; CHECK-NEXT: flat_store_byte v[2:3], v15 offset:35
+; CHECK-NEXT: flat_store_byte v[2:3], v16 offset:34
+; CHECK-NEXT: flat_store_byte v[2:3], v17 offset:33
+; CHECK-NEXT: flat_store_byte v[2:3], v18 offset:32
+; CHECK-NEXT: flat_store_byte v[2:3], v0 offset:31
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p0.p0.i64(ptr %dest, ptr %src, i64 47, i1 false)
@@ -1501,20 +1368,20 @@ define amdgpu_kernel void @memcpy_p1_p1_optsize(ptr addrspace(1) %dest, ptr addr
; CHECK-LABEL: memcpy_p1_p1_optsize:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v12, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: global_load_dwordx2 v[0:1], v4, s[2:3] offset:32
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx2 v4, v[0:1], s[0:1] offset:32
-; CHECK-NEXT: global_load_dwordx2 v[0:1], v4, s[2:3] offset:39
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx2 v4, v[0:1], s[0:1] offset:39
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:16
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:16
+; CHECK-NEXT: global_load_dwordx2 v[8:9], v12, s[2:3] offset:32
+; CHECK-NEXT: global_load_dwordx2 v[10:11], v12, s[2:3] offset:39
+; CHECK-NEXT: global_load_dwordx4 v[0:3], v12, s[2:3]
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v12, s[2:3] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_dwordx2 v12, v[8:9], s[0:1] offset:32
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_dwordx2 v12, v[10:11], s[0:1] offset:39
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_dwordx4 v12, v[0:3], s[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_dwordx4 v12, v[4:7], s[0:1] offset:16
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) %dest, ptr addrspace(1) %src, i64 47, i1 false)
@@ -1525,32 +1392,32 @@ define amdgpu_kernel void @memcpy_p1_p4_optsize(ptr addrspace(1) %global, ptr ad
; CHECK-LABEL: memcpy_p1_p4_optsize:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CHECK-NEXT: v_mov_b32_e32 v4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:16
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:16
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:32
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:32
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:48
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:48
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:64
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:64
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:80
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:80
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:96
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:96
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3] offset:112
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:112
+; CHECK-NEXT: v_mov_b32_e32 v32, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[0:3], v32, s[2:3]
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v32, s[2:3] offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v32, s[2:3] offset:32
+; CHECK-NEXT: global_load_dwordx4 v[12:15], v32, s[2:3] offset:48
+; CHECK-NEXT: global_load_dwordx4 v[16:19], v32, s[2:3] offset:64
+; CHECK-NEXT: global_load_dwordx4 v[20:23], v32, s[2:3] offset:80
+; CHECK-NEXT: global_load_dwordx4 v[24:27], v32, s[2:3] offset:96
+; CHECK-NEXT: global_load_dwordx4 v[28:31], v32, s[2:3] offset:112
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) %global, ptr addrspace(4) %0, i64 128, i1 false)
@@ -1567,394 +1434,372 @@ define amdgpu_kernel void @memcpy_p5_p4_optsize(ptr addrspace(5) %local, ptr add
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: s_add_u32 s8, s8, s7
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1]
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:1
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:2
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:3
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:4
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:5
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:6
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:7
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:8
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:9
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:10
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:11
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:12
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:13
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:14
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:15
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:15
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:14
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:13
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:12
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:11
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:10
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:9
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:8
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:7
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:6
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:5
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:4
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:3
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:2
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:1
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1]
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:31
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:30
; CHECK-NEXT: s_addc_u32 s9, s9, 0
; CHECK-NEXT: v_mov_b32_e32 v1, s2
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:16
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:17
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:18
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:11
; CHECK-NEXT: s_waitcnt vmcnt(18)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:19
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:10
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:23
; CHECK-NEXT: s_waitcnt vmcnt(19)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:1
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:20
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:9
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:22
; CHECK-NEXT: s_waitcnt vmcnt(20)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:2
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:21
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:8
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:21
; CHECK-NEXT: s_waitcnt vmcnt(21)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:3
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:22
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:7
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:20
; CHECK-NEXT: s_waitcnt vmcnt(22)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:4
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:23
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:6
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:5
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:2
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:47
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:4
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:17
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:3
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:16
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:27
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:26
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:25
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:24
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:45
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:44
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:43
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:23
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:36
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:22
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:35
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:21
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:34
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:20
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:33
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:19
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:32
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:28
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:29
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:42
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:18
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:63
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:17
; CHECK-NEXT: s_waitcnt vmcnt(23)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:5
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:24
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:16
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:61
; CHECK-NEXT: s_waitcnt vmcnt(24)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:6
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:25
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:27
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:40
; CHECK-NEXT: s_waitcnt vmcnt(25)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:7
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:26
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:26
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:39
; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:8
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:27
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:25
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:38
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:24
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:37
; CHECK-NEXT: s_waitcnt vmcnt(27)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:9
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:28
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:44
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:57
; CHECK-NEXT: s_waitcnt vmcnt(28)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:10
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:29
-; CHECK-NEXT: s_waitcnt vmcnt(29)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:11
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:30
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:43
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:56
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:45
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:58
; CHECK-NEXT: s_waitcnt vmcnt(30)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:12
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:31
-; CHECK-NEXT: s_waitcnt vmcnt(31)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:13
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:32
-; CHECK-NEXT: s_waitcnt vmcnt(32)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:14
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:33
-; CHECK-NEXT: s_waitcnt vmcnt(33)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:15
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:34
-; CHECK-NEXT: s_waitcnt vmcnt(34)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:16
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:35
-; CHECK-NEXT: s_waitcnt vmcnt(35)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:17
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:36
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:18
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:37
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:19
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:38
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:20
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:39
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:21
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:40
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:22
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:41
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:23
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:42
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:24
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:43
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:25
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:44
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:26
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:45
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:27
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:46
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:28
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:47
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:29
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:48
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:30
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:49
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:31
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:50
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:32
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:51
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:33
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:52
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:34
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:53
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:35
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:54
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:36
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:55
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:37
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:56
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:38
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:57
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:39
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:58
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:40
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:59
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:41
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:60
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:42
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:61
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:43
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:62
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:44
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:63
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:45
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:64
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:46
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:65
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:47
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:66
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:48
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:67
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:49
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:68
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:50
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:69
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:51
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:70
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:52
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:71
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:53
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:72
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:54
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:73
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:55
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:74
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:56
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:75
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:57
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:76
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:58
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:77
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:59
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:78
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:60
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:79
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:61
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:80
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:62
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:81
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:63
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:82
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:64
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:83
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:65
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:84
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:66
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:85
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:67
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:86
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:68
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:87
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:69
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:88
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:70
-; CHECK-NEXT: s_waitcnt vmcnt(35)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:71
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:89
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:90
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:72
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:91
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:73
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:92
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:74
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:93
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:75
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:94
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:76
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:95
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:77
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:96
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:78
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:97
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:79
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:98
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:80
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:99
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:81
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:100
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:82
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:101
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:83
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:102
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:84
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:103
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:85
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:104
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:86
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:105
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:87
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:106
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:88
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:107
-; CHECK-NEXT: s_waitcnt vmcnt(35)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:89
-; CHECK-NEXT: s_waitcnt vmcnt(35)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:90
-; CHECK-NEXT: s_waitcnt vmcnt(34)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:91
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:36
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:49
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:35
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:48
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:46
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:47
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:60
; CHECK-NEXT: s_waitcnt vmcnt(33)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:92
-; CHECK-NEXT: s_waitcnt vmcnt(32)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:93
-; CHECK-NEXT: s_waitcnt vmcnt(31)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:94
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:34
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:79
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:28
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:41
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:42
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:55
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:33
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:32
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:77
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:61
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:74
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:40
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:53
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:39
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:52
; CHECK-NEXT: s_waitcnt vmcnt(30)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:95
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:38
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:51
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:37
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:50
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:57
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:70
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:56
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:69
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:58
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:71
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:49
; CHECK-NEXT: s_waitcnt vmcnt(29)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:96
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:48
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:93
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:46
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:59
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:60
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:73
; CHECK-NEXT: s_waitcnt vmcnt(28)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:97
-; CHECK-NEXT: s_waitcnt vmcnt(27)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:98
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:99
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:41
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:54
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:55
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:68
; CHECK-NEXT: s_waitcnt vmcnt(25)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:100
-; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:108
-; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:109
-; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:110
-; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:111
-; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:112
-; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:113
-; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:114
-; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:115
-; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:116
-; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:117
-; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:118
-; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:119
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:101
-; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:120
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:102
-; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:121
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:103
-; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:122
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:104
-; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:123
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:105
-; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:124
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:106
-; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:125
-; CHECK-NEXT: s_waitcnt vmcnt(36)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:107
-; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:126
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: global_load_ubyte v21, v0, s[0:1] offset:127
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:108
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:109
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:110
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:111
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:112
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:113
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:114
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:115
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:116
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:117
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:118
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:74
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:87
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:53
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:66
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:52
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:65
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:51
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:64
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:62
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:63
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:76
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:50
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:95
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:77
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:90
; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:119
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:71
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:83
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:70
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:69
; CHECK-NEXT: s_waitcnt vmcnt(25)
-; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:120
-; CHECK-NEXT: s_waitcnt vmcnt(24)
-; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:121
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:59
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:72
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:73
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:85
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:54
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:67
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:68
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:81
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:66
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:111
; CHECK-NEXT: s_waitcnt vmcnt(23)
-; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:122
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:65
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:110
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:64
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:109
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:62
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:75
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:76
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:89
; CHECK-NEXT: s_waitcnt vmcnt(22)
-; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:123
-; CHECK-NEXT: s_waitcnt vmcnt(21)
-; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:124
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:90
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:103
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:72
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:86
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:84
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:82
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:87
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:100
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:67
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:80
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:78
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:94
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:79
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:92
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:95
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:108
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:93
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:106
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:75
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:88
; CHECK-NEXT: s_waitcnt vmcnt(20)
-; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:125
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:89
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:102
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:78
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:91
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:94
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:107
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:92
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:105
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:88
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:101
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:91
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:104
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:86
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:85
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:84
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:83
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:82
+; CHECK-NEXT: global_load_ubyte v15, v0, s[0:1] offset:96
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v16, v0, s[0:1] offset:97
+; CHECK-NEXT: global_load_ubyte v17, v0, s[0:1] offset:98
+; CHECK-NEXT: global_load_ubyte v18, v0, s[0:1] offset:99
+; CHECK-NEXT: global_load_ubyte v19, v0, s[0:1] offset:120
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:81
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:80
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:111
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:110
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:109
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:108
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:100
+; CHECK-NEXT: global_load_ubyte v20, v0, s[0:1] offset:121
+; CHECK-NEXT: global_load_ubyte v2, v0, s[0:1] offset:122
+; CHECK-NEXT: global_load_ubyte v3, v0, s[0:1] offset:123
+; CHECK-NEXT: global_load_ubyte v4, v0, s[0:1] offset:124
+; CHECK-NEXT: global_load_ubyte v5, v0, s[0:1] offset:125
+; CHECK-NEXT: global_load_ubyte v6, v0, s[0:1] offset:126
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:107
+; CHECK-NEXT: global_load_ubyte v7, v0, s[0:1] offset:127
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:106
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:105
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:103
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:102
+; CHECK-NEXT: s_waitcnt vmcnt(31)
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:101
+; CHECK-NEXT: global_load_ubyte v8, v0, s[0:1] offset:116
+; CHECK-NEXT: global_load_ubyte v9, v0, s[0:1] offset:117
+; CHECK-NEXT: global_load_ubyte v11, v0, s[0:1] offset:119
+; CHECK-NEXT: global_load_ubyte v12, v0, s[0:1] offset:114
+; CHECK-NEXT: s_waitcnt vmcnt(34)
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:104
+; CHECK-NEXT: global_load_ubyte v10, v0, s[0:1] offset:118
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_ubyte v13, v0, s[0:1] offset:115
+; CHECK-NEXT: global_load_ubyte v14, v0, s[0:1] offset:113
+; CHECK-NEXT: global_load_ubyte v21, v0, s[0:1] offset:112
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v18, v1, s[8:11], 0 offen offset:99
+; CHECK-NEXT: buffer_store_byte v17, v1, s[8:11], 0 offen offset:98
+; CHECK-NEXT: buffer_store_byte v16, v1, s[8:11], 0 offen offset:97
+; CHECK-NEXT: buffer_store_byte v15, v1, s[8:11], 0 offen offset:96
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v7, v1, s[8:11], 0 offen offset:127
+; CHECK-NEXT: buffer_store_byte v6, v1, s[8:11], 0 offen offset:126
+; CHECK-NEXT: buffer_store_byte v5, v1, s[8:11], 0 offen offset:125
+; CHECK-NEXT: buffer_store_byte v4, v1, s[8:11], 0 offen offset:124
+; CHECK-NEXT: buffer_store_byte v3, v1, s[8:11], 0 offen offset:123
+; CHECK-NEXT: buffer_store_byte v2, v1, s[8:11], 0 offen offset:122
+; CHECK-NEXT: buffer_store_byte v20, v1, s[8:11], 0 offen offset:121
+; CHECK-NEXT: buffer_store_byte v19, v1, s[8:11], 0 offen offset:120
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v11, v1, s[8:11], 0 offen offset:119
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v10, v1, s[8:11], 0 offen offset:118
+; CHECK-NEXT: buffer_store_byte v9, v1, s[8:11], 0 offen offset:117
+; CHECK-NEXT: buffer_store_byte v8, v1, s[8:11], 0 offen offset:116
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v13, v1, s[8:11], 0 offen offset:115
+; CHECK-NEXT: buffer_store_byte v12, v1, s[8:11], 0 offen offset:114
; CHECK-NEXT: s_waitcnt vmcnt(19)
-; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:126
+; CHECK-NEXT: buffer_store_byte v14, v1, s[8:11], 0 offen offset:113
; CHECK-NEXT: s_waitcnt vmcnt(19)
-; CHECK-NEXT: buffer_store_byte v21, v1, s[8:11], 0 offen offset:127
+; CHECK-NEXT: buffer_store_byte v21, v1, s[8:11], 0 offen offset:112
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) %local, ptr addrspace(4) %0, i64 128, i1 false)
@@ -1971,367 +1816,362 @@ define amdgpu_kernel void @memcpy_p0_p5_optsize(ptr %generic, ptr addrspace(5) %
; CHECK-NEXT: s_addc_u32 s9, s9, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:1
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:2
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:3
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:4
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:5
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:6
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:7
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:8
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:9
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:10
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:11
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:12
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:2
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:14
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:15
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:16
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:31
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:30
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: v_mov_b32_e32 v1, s1
; CHECK-NEXT: s_waitcnt vmcnt(17)
-; CHECK-NEXT: flat_store_byte v[0:1], v3
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:15
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:1
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:19
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:2
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:20
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:3
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:21
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:4
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:14
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:13
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:12
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:11
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:10
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:9
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:23
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:8
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:22
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:7
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:21
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:6
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:20
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:5
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:19
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:4
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:18
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:3
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:17
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:1
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:47
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v18
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:31
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:2
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:16
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:45
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:23
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:37
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:5
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:23
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:22
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:36
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:6
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:21
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:35
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:7
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:20
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:34
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:19
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:33
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:9
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:27
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:18
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:32
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:10
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:29
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:11
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:30
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:44
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:12
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:17
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:63
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:13
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:31
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:16
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:28
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:42
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:14
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:32
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:26
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:40
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:15
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:33
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:25
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:39
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:16
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:34
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:24
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:38
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:17
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:35
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:27
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:41
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:18
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:36
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:45
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:59
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:19
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:37
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:37
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:51
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:20
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:38
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:36
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:50
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:21
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:39
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:35
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:49
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:22
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:40
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:34
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:48
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:23
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:41
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:46
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:24
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:42
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:47
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:61
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:25
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:43
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:29
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:43
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:26
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:44
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:44
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:58
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:27
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:45
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:33
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:79
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:28
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:46
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:29
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:47
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:30
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:48
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:31
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:49
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:32
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:42
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:56
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:32
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:50
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:40
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:54
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:33
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:51
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:39
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:53
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:34
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:52
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:38
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:52
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:35
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:53
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:41
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:55
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:36
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:54
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:59
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:73
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:37
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:55
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:38
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:56
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:39
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:57
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:40
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:58
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:51
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:65
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:41
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:59
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:50
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:64
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:42
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:60
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:62
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:43
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:61
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:63
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:77
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:44
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:62
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:46
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:60
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:45
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:63
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:61
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:75
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:46
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:64
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:43
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:57
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:47
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:65
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:58
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:72
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:48
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:66
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:49
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:95
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:49
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:67
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:48
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:56
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:70
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:50
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:68
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:54
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:68
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:51
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:69
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:53
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:67
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:52
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:70
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:52
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:66
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:53
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:71
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:55
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:69
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:54
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:72
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:73
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:87
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:55
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:73
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:56
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:74
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:57
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:75
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:65
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:111
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:58
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:76
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:64
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:110
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:59
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:77
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:62
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:76
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:60
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:78
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:77
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:91
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:61
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:79
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:60
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:74
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:62
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:80
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:75
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:89
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:63
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:81
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:57
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:71
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:64
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:82
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:72
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:86
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:65
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:83
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:70
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:84
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:66
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:84
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:68
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:83
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:67
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:85
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:67
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:81
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:68
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:86
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:66
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:80
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:69
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:87
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:78
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:70
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:88
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:79
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:93
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:71
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:89
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:69
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:82
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:72
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:90
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:87
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:101
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:73
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:74
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:91
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:92
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:75
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:93
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:76
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:94
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:76
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:90
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:77
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:95
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:91
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:105
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:78
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:96
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:74
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:88
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:79
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:97
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:89
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:103
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:80
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:98
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:71
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:85
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:81
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:99
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:86
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:100
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:82
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:100
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:78
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:92
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:83
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:101
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:93
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:107
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:90
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:104
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:84
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:102
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:88
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:102
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:85
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:103
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:85
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:99
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:86
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:104
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:94
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:87
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:105
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:95
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:109
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:88
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:106
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:92
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:106
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:94
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:108
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:84
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:83
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:82
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:81
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:96
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:97
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:98
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:120
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:80
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:111
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:110
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:109
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:99
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:121
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:122
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:123
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:124
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:107
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:105
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:104
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:103
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:106
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:102
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:101
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:100
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:126
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:116
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:117
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:118
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:119
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:127
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:114
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:115
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:108
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:125
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:89
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:107
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:113
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[8:11], 0 offen offset:112
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:90
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:108
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:91
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:92
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:93
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:94
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:95
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:96
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:97
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:98
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:99
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:100
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:101
-; CHECK-NEXT: buffer_load_ubyte v4, v2, s[8:11], 0 offen offset:109
-; CHECK-NEXT: buffer_load_ubyte v5, v2, s[8:11], 0 offen offset:110
-; CHECK-NEXT: buffer_load_ubyte v6, v2, s[8:11], 0 offen offset:111
-; CHECK-NEXT: buffer_load_ubyte v7, v2, s[8:11], 0 offen offset:112
-; CHECK-NEXT: buffer_load_ubyte v8, v2, s[8:11], 0 offen offset:113
-; CHECK-NEXT: buffer_load_ubyte v9, v2, s[8:11], 0 offen offset:114
-; CHECK-NEXT: buffer_load_ubyte v10, v2, s[8:11], 0 offen offset:115
-; CHECK-NEXT: buffer_load_ubyte v11, v2, s[8:11], 0 offen offset:116
-; CHECK-NEXT: buffer_load_ubyte v12, v2, s[8:11], 0 offen offset:117
-; CHECK-NEXT: buffer_load_ubyte v13, v2, s[8:11], 0 offen offset:118
-; CHECK-NEXT: buffer_load_ubyte v14, v2, s[8:11], 0 offen offset:119
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:102
-; CHECK-NEXT: buffer_load_ubyte v15, v2, s[8:11], 0 offen offset:120
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:103
-; CHECK-NEXT: buffer_load_ubyte v16, v2, s[8:11], 0 offen offset:121
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:104
-; CHECK-NEXT: buffer_load_ubyte v17, v2, s[8:11], 0 offen offset:122
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:105
-; CHECK-NEXT: buffer_load_ubyte v18, v2, s[8:11], 0 offen offset:123
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:106
-; CHECK-NEXT: buffer_load_ubyte v19, v2, s[8:11], 0 offen offset:124
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:107
-; CHECK-NEXT: buffer_load_ubyte v20, v2, s[8:11], 0 offen offset:125
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:108
-; CHECK-NEXT: buffer_load_ubyte v3, v2, s[8:11], 0 offen offset:126
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: buffer_load_ubyte v21, v2, s[8:11], 0 offen offset:127
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:98
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:97
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:96
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:127
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:126
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:109
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:110
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:111
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:112
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:113
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:114
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:115
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:116
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:117
-; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:118
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:119
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:120
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:121
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:122
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:123
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:124
-; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:125
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:126
-; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:127
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:125
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:124
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:123
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:122
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:121
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:120
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:119
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:118
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:117
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:116
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:115
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:114
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:113
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:112
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p0.p5.i64(ptr %generic, ptr addrspace(5) %src, i64 128, i1 false)
@@ -2344,30 +2184,30 @@ define amdgpu_kernel void @memcpy_p3_p4_optsize(ptr addrspace(4) %0) #1 {
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; CHECK-NEXT: v_mov_b32_e32 v24, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v24, s[0:1] offset:112
-; CHECK-NEXT: global_load_dwordx4 v[4:7], v24, s[0:1] offset:96
-; CHECK-NEXT: global_load_dwordx4 v[8:11], v24, s[0:1] offset:80
-; CHECK-NEXT: global_load_dwordx4 v[12:15], v24, s[0:1] offset:64
-; CHECK-NEXT: global_load_dwordx4 v[16:19], v24, s[0:1] offset:48
-; CHECK-NEXT: global_load_dwordx4 v[20:23], v24, s[0:1] offset:32
+; CHECK-NEXT: global_load_dwordx4 v[0:3], v24, s[0:1]
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v24, s[0:1] offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v24, s[0:1] offset:32
+; CHECK-NEXT: global_load_dwordx4 v[12:15], v24, s[0:1] offset:48
+; CHECK-NEXT: global_load_dwordx4 v[16:19], v24, s[0:1] offset:64
+; CHECK-NEXT: global_load_dwordx4 v[20:23], v24, s[0:1] offset:80
; CHECK-NEXT: s_waitcnt vmcnt(5)
-; CHECK-NEXT: ds_write2_b64 v24, v[0:1], v[2:3] offset0:14 offset1:15
+; CHECK-NEXT: ds_write2_b64 v24, v[0:1], v[2:3] offset1:1
; CHECK-NEXT: s_waitcnt vmcnt(4)
-; CHECK-NEXT: ds_write2_b64 v24, v[4:5], v[6:7] offset0:12 offset1:13
-; CHECK-NEXT: global_load_dwordx4 v[0:3], v24, s[0:1] offset:16
-; CHECK-NEXT: global_load_dwordx4 v[4:7], v24, s[0:1]
+; CHECK-NEXT: ds_write2_b64 v24, v[4:5], v[6:7] offset0:2 offset1:3
+; CHECK-NEXT: global_load_dwordx4 v[0:3], v24, s[0:1] offset:96
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v24, s[0:1] offset:112
; CHECK-NEXT: s_waitcnt vmcnt(5)
-; CHECK-NEXT: ds_write2_b64 v24, v[8:9], v[10:11] offset0:10 offset1:11
+; CHECK-NEXT: ds_write2_b64 v24, v[8:9], v[10:11] offset0:4 offset1:5
; CHECK-NEXT: s_waitcnt vmcnt(4)
-; CHECK-NEXT: ds_write2_b64 v24, v[12:13], v[14:15] offset0:8 offset1:9
+; CHECK-NEXT: ds_write2_b64 v24, v[12:13], v[14:15] offset0:6 offset1:7
; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: ds_write2_b64 v24, v[16:17], v[18:19] offset0:6 offset1:7
+; CHECK-NEXT: ds_write2_b64 v24, v[16:17], v[18:19] offset0:8 offset1:9
; CHECK-NEXT: s_waitcnt vmcnt(2)
-; CHECK-NEXT: ds_write2_b64 v24, v[20:21], v[22:23] offset0:4 offset1:5
+; CHECK-NEXT: ds_write2_b64 v24, v[20:21], v[22:23] offset0:10 offset1:11
; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: ds_write2_b64 v24, v[0:1], v[2:3] offset0:2 offset1:3
+; CHECK-NEXT: ds_write2_b64 v24, v[0:1], v[2:3] offset0:12 offset1:13
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: ds_write2_b64 v24, v[4:5], v[6:7] offset1:1
+; CHECK-NEXT: ds_write2_b64 v24, v[4:5], v[6:7] offset0:14 offset1:15
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) @shared, ptr addrspace(4) %0, i64 128, i1 false)
@@ -2379,298 +2219,278 @@ define amdgpu_kernel void @memcpy_p0_p3_optsize(ptr %generic) #1 {
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:127
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:126
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:125
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:124
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:112
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:113
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:114
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:115
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:116
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:117
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:118
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:119
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: v_mov_b32_e32 v1, s1
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:127
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:126
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:123
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:125
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:124
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:122
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:121
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:123
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:112
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:113
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:114
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:115
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:116
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:117
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:118
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:119
; CHECK-NEXT: ds_read_u8 v3, v2 offset:120
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:119
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:122
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:121
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:118
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:121
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:122
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:123
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:124
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:125
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:126
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:127
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:120
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:119
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:117
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:116
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:118
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:115
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:114
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:117
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:116
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:113
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:115
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:114
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:112
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:111
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:113
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:110
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:109
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:112
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:111
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:108
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:110
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:109
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:107
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:106
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:108
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:105
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:104
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:107
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:106
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:103
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:105
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:104
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:102
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:101
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:103
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:100
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:121
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:122
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:123
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:124
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:125
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:126
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:127
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:96
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:97
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:98
; CHECK-NEXT: ds_read_u8 v6, v2 offset:99
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:102
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:101
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:98
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:100
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:100
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:101
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:102
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:103
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:96
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:97
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:98
; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:99
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:97
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:96
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:98
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:95
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:94
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:97
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:96
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:93
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:95
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:94
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:92
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:91
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:93
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:90
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:89
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:92
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:91
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:88
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:90
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:89
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:87
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:86
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:88
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:85
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:84
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:87
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:86
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:83
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:85
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:84
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:82
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:81
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:100
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:101
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:102
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:103
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:104
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:105
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:106
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:107
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:108
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:109
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:110
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:111
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:83
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:104
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:105
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:106
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:107
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:108
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:109
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:110
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:111
; CHECK-NEXT: ds_read_u8 v3, v2 offset:80
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:79
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:82
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:81
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:78
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:81
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:82
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:83
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:84
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:85
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:86
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:87
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:80
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:79
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:77
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:76
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:78
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:75
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:74
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:77
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:76
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:73
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:75
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:74
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:72
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:71
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:73
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:70
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:69
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:72
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:71
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:68
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:70
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:69
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:67
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:66
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:68
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:81
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:82
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:83
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:84
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:85
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:86
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:87
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:88
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:89
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:90
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:91
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:92
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:93
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:94
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:95
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:88
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:89
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:90
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:91
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:92
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:93
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:94
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:95
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:64
; CHECK-NEXT: ds_read_u8 v4, v2 offset:65
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:64
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:66
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:67
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:68
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:69
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:70
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:71
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:67
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:66
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:63
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:64
; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:65
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:64
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:62
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:61
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:66
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:67
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:68
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:69
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:70
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:71
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:72
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:73
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:74
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:75
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:76
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:77
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:78
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:79
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:63
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:60
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:72
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:73
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:74
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:75
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:76
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:77
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:78
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:79
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:48
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:49
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:50
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:51
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:52
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:53
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:54
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:55
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:48
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:49
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:50
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:51
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:52
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:53
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:54
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:55
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:56
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:57
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:58
; CHECK-NEXT: ds_read_u8 v6, v2 offset:59
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:62
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:61
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:58
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:60
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:60
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:61
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:62
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:63
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:56
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:57
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:58
; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:59
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:57
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:56
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:58
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:55
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:54
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:57
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:56
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:53
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:55
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:54
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:52
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:51
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:53
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:50
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:49
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:52
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:51
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:48
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:50
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:49
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:47
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:46
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:48
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:45
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:44
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:47
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:46
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:43
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:45
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:44
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:42
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:41
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:43
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:60
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:61
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:62
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:63
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:32
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:33
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:34
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:35
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:36
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:37
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:38
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:39
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:32
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:33
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:34
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:35
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:36
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:37
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:38
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:39
; CHECK-NEXT: ds_read_u8 v3, v2 offset:40
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:39
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:42
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:41
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:38
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:41
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:42
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:43
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:44
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:45
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:46
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:47
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:40
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:39
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:37
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:36
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:38
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:35
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:34
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:37
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:36
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:33
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:35
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:34
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:32
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:31
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:41
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:42
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:43
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:44
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:45
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:46
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:47
+; CHECK-NEXT: ds_read_u8 v3, v2
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:1
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:2
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:3
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:4
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:5
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:6
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:7
+; CHECK-NEXT: ds_read_u8 v11, v2 offset:8
+; CHECK-NEXT: ds_read_u8 v12, v2 offset:9
+; CHECK-NEXT: ds_read_u8 v13, v2 offset:10
+; CHECK-NEXT: ds_read_u8 v14, v2 offset:11
+; CHECK-NEXT: ds_read_u8 v15, v2 offset:12
+; CHECK-NEXT: ds_read_u8 v16, v2 offset:13
+; CHECK-NEXT: ds_read_u8 v17, v2 offset:14
+; CHECK-NEXT: ds_read_u8 v18, v2 offset:15
+; CHECK-NEXT: ds_read_u8 v19, v2 offset:16
+; CHECK-NEXT: ds_read_u8 v20, v2 offset:17
+; CHECK-NEXT: ds_read_u8 v21, v2 offset:18
+; CHECK-NEXT: ds_read_u8 v22, v2 offset:19
+; CHECK-NEXT: ds_read_u8 v23, v2 offset:20
+; CHECK-NEXT: ds_read_u8 v24, v2 offset:21
+; CHECK-NEXT: ds_read_u8 v25, v2 offset:22
+; CHECK-NEXT: ds_read_u8 v26, v2 offset:23
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:33
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:30
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:29
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:32
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:31
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:28
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:30
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:29
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:27
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:26
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:28
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:25
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:24
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:27
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:26
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:23
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:25
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:24
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:22
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:21
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:23
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:20
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:19
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:22
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:21
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:18
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:20
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:19
-; CHECK-NEXT: ds_read_u8 v3, v2 offset:16
-; CHECK-NEXT: ds_read_u8 v5, v2 offset:17
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:18
-; CHECK-NEXT: ds_read_u8 v4, v2 offset:8
-; CHECK-NEXT: ds_read_u8 v6, v2 offset:9
-; CHECK-NEXT: ds_read_u8 v7, v2 offset:10
-; CHECK-NEXT: ds_read_u8 v8, v2 offset:11
-; CHECK-NEXT: ds_read_u8 v9, v2 offset:12
-; CHECK-NEXT: ds_read_u8 v10, v2 offset:13
-; CHECK-NEXT: ds_read_u8 v11, v2 offset:14
-; CHECK-NEXT: ds_read_u8 v12, v2 offset:15
-; CHECK-NEXT: ds_read_u8 v13, v2
-; CHECK-NEXT: ds_read_u8 v14, v2 offset:1
-; CHECK-NEXT: ds_read_u8 v15, v2 offset:2
-; CHECK-NEXT: ds_read_u8 v16, v2 offset:3
-; CHECK-NEXT: ds_read_u8 v17, v2 offset:4
-; CHECK-NEXT: ds_read_u8 v18, v2 offset:5
-; CHECK-NEXT: ds_read_u8 v19, v2 offset:6
-; CHECK-NEXT: ds_read_u8 v2, v2 offset:7
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:17
-; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:16
-; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:15
-; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:14
-; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:13
-; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:12
-; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:11
-; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:10
-; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:9
-; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:8
-; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:7
-; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:6
-; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:5
-; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:4
-; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:3
-; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:2
-; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:1
-; CHECK-NEXT: flat_store_byte v[0:1], v13
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:16
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:19
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:20
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:21
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v26 offset:23
+; CHECK-NEXT: ds_read_u8 v19, v2 offset:24
+; CHECK-NEXT: ds_read_u8 v20, v2 offset:25
+; CHECK-NEXT: ds_read_u8 v21, v2 offset:26
+; CHECK-NEXT: ds_read_u8 v22, v2 offset:27
+; CHECK-NEXT: ds_read_u8 v23, v2 offset:28
+; CHECK-NEXT: ds_read_u8 v24, v2 offset:29
+; CHECK-NEXT: ds_read_u8 v25, v2 offset:30
+; CHECK-NEXT: ds_read_u8 v2, v2 offset:31
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:27
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:28
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:31
+; CHECK-NEXT: flat_store_byte v[0:1], v3
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:1
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:2
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:3
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:4
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:5
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:6
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:7
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:9
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:10
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:11
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:12
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:13
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:14
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:15
; CHECK-NEXT: s_endpgm
entry:
tail call void @llvm.memcpy.p0.p3.i64(ptr %generic, ptr addrspace(3) @shared, i64 128, i1 false)
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-param-combinations.ll b/llvm/test/CodeGen/AMDGPU/memcpy-param-combinations.ll
new file mode 100644
index 0000000000000..7575782c1b2ac
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-param-combinations.ll
@@ -0,0 +1,9296 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 %s -o - | FileCheck %s
+
+; Testing codegen for memcpy with vector operands for all combinations of the following parameters:
+; destination address space: 0, 1, 3, 5
+; source address space: 0, 1, 3, 4, 5
+; alignment: 1, 2, 8, 16
+; sizes: 16, 31, 32
+
+
+define void @memcpy_p0_p0_sz16_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p0_sz31_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xe
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v18
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:17
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3] offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p0_sz32_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v19, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v19
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:31
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:17
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p0_sz16_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: flat_load_ushort v4, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v5, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ushort v7, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v8, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v9, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p0_sz31_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ushort v5, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ushort v7, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ushort v8, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ushort v9, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ushort v11, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ushort v12, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v13, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ushort v14, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ushort v15, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v16, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v17, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v18, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p0_sz32_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ushort v4, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ushort v5, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ushort v7, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ushort v8, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ushort v9, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ushort v11, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ushort v12, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v13, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ushort v14, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ushort v15, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v16, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v17, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v18, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p0_sz16_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p0_sz31_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x10
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:17
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v19, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v20, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v21, v[2:3] offset:28
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(16)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p0_sz32_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p0_sz16_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p0_sz31_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x10
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:17
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v19, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v20, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v21, v[2:3] offset:28
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(16)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p0_sz32_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p0_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz16_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:15
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:13
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:11
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:9
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:7
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:5
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:3
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:1
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz31_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xe
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:13
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:11
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:9
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:7
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:5
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:3
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:1
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v18
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:29
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:27
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:25
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:23
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:21
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:19
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:17
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz32_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:15
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:13
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:11
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:9
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:7
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:5
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:3
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:1
+; CHECK-NEXT: global_load_ubyte v19, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v19
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:31
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:29
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:27
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:25
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:23
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:21
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:19
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:17
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz16_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ushort v5, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ushort v6, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ushort v7, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ushort v8, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ushort v9, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz31_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v5, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ushort v6, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ushort v7, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ushort v8, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ushort v9, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ushort v11, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v12, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ushort v13, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ushort v14, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ushort v15, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ushort v16, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ushort v17, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ushort v18, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz32_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v5, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ushort v6, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ushort v7, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ushort v8, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ushort v9, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ushort v11, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v12, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ushort v13, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ushort v14, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ushort v15, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ushort v16, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ushort v17, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ushort v18, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz16_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz31_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v8 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:15
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v9 offset:21
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:19
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v10 offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:23
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v11 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 24, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 8, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 24, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 8, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 24, v10
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 8, v10
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 24, v11
+; CHECK-NEXT: v_lshrrev_b32_e32 v9, 8, v11
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:16
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:20
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz32_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz16_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz31_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v8 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:15
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v9 offset:21
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:19
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v10 offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:23
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v11 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 24, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 8, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 24, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 8, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 24, v10
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 8, v10
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 24, v11
+; CHECK-NEXT: v_lshrrev_b32_e32 v9, 8, v11
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:16
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:20
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p1_sz32_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p1_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz16_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:15
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:14
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:13
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:12
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:11
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:10
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:9
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:8
+; CHECK-NEXT: ds_read_u8 v11, v2 offset:7
+; CHECK-NEXT: ds_read_u8 v12, v2 offset:6
+; CHECK-NEXT: ds_read_u8 v13, v2 offset:5
+; CHECK-NEXT: ds_read_u8 v14, v2 offset:4
+; CHECK-NEXT: ds_read_u8 v15, v2 offset:3
+; CHECK-NEXT: ds_read_u8 v16, v2 offset:2
+; CHECK-NEXT: ds_read_u8 v17, v2 offset:1
+; CHECK-NEXT: ds_read_u8 v2, v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:13
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:11
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:9
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:5
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz31_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:14
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:13
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:12
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:11
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:10
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:9
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:8
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:7
+; CHECK-NEXT: ds_read_u8 v11, v2 offset:6
+; CHECK-NEXT: ds_read_u8 v12, v2 offset:5
+; CHECK-NEXT: ds_read_u8 v13, v2 offset:4
+; CHECK-NEXT: ds_read_u8 v14, v2 offset:3
+; CHECK-NEXT: ds_read_u8 v15, v2 offset:2
+; CHECK-NEXT: ds_read_u8 v16, v2 offset:1
+; CHECK-NEXT: ds_read_u8 v17, v2
+; CHECK-NEXT: ds_read_u8 v18, v2 offset:15
+; CHECK-NEXT: ds_read_u8 v19, v2 offset:16
+; CHECK-NEXT: ds_read_u8 v20, v2 offset:17
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:13
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:11
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:9
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:5
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v17
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:30
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:29
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:28
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:27
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:26
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:25
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:24
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:23
+; CHECK-NEXT: ds_read_u8 v11, v2 offset:22
+; CHECK-NEXT: ds_read_u8 v12, v2 offset:21
+; CHECK-NEXT: ds_read_u8 v13, v2 offset:20
+; CHECK-NEXT: ds_read_u8 v14, v2 offset:19
+; CHECK-NEXT: ds_read_u8 v2, v2 offset:18
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:29
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:27
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:26
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:25
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:22
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:21
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:19
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:16
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz32_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:15
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:14
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:13
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:12
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:11
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:10
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:9
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:8
+; CHECK-NEXT: ds_read_u8 v11, v2 offset:7
+; CHECK-NEXT: ds_read_u8 v12, v2 offset:6
+; CHECK-NEXT: ds_read_u8 v13, v2 offset:5
+; CHECK-NEXT: ds_read_u8 v14, v2 offset:4
+; CHECK-NEXT: ds_read_u8 v15, v2 offset:3
+; CHECK-NEXT: ds_read_u8 v16, v2 offset:2
+; CHECK-NEXT: ds_read_u8 v17, v2 offset:1
+; CHECK-NEXT: ds_read_u8 v18, v2
+; CHECK-NEXT: ds_read_u8 v19, v2 offset:16
+; CHECK-NEXT: ds_read_u8 v20, v2 offset:17
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:13
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:11
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:9
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:5
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v18
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:31
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:30
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:29
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:28
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:27
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:26
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:25
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:24
+; CHECK-NEXT: ds_read_u8 v11, v2 offset:23
+; CHECK-NEXT: ds_read_u8 v12, v2 offset:22
+; CHECK-NEXT: ds_read_u8 v13, v2 offset:21
+; CHECK-NEXT: ds_read_u8 v14, v2 offset:20
+; CHECK-NEXT: ds_read_u8 v15, v2 offset:19
+; CHECK-NEXT: ds_read_u8 v2, v2 offset:18
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:31
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:29
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:27
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:26
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:25
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:22
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:21
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:19
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz16_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u16 v3, v2 offset:14
+; CHECK-NEXT: ds_read_u16 v4, v2 offset:12
+; CHECK-NEXT: ds_read_u16 v5, v2 offset:10
+; CHECK-NEXT: ds_read_u16 v6, v2 offset:8
+; CHECK-NEXT: ds_read_u16 v7, v2 offset:6
+; CHECK-NEXT: ds_read_u16 v8, v2 offset:4
+; CHECK-NEXT: ds_read_u16 v9, v2 offset:2
+; CHECK-NEXT: ds_read_u16 v2, v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v3 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz31_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:30
+; CHECK-NEXT: ds_read_u16 v4, v2 offset:28
+; CHECK-NEXT: ds_read_u16 v5, v2 offset:26
+; CHECK-NEXT: ds_read_u16 v6, v2 offset:24
+; CHECK-NEXT: ds_read_u16 v7, v2 offset:22
+; CHECK-NEXT: ds_read_u16 v8, v2 offset:20
+; CHECK-NEXT: ds_read_u16 v9, v2 offset:18
+; CHECK-NEXT: ds_read_u16 v10, v2 offset:16
+; CHECK-NEXT: ds_read_u16 v11, v2 offset:14
+; CHECK-NEXT: ds_read_u16 v12, v2 offset:12
+; CHECK-NEXT: ds_read_u16 v13, v2 offset:10
+; CHECK-NEXT: ds_read_u16 v14, v2 offset:8
+; CHECK-NEXT: ds_read_u16 v15, v2 offset:6
+; CHECK-NEXT: ds_read_u16 v16, v2 offset:4
+; CHECK-NEXT: ds_read_u16 v17, v2 offset:2
+; CHECK-NEXT: ds_read_u16 v2, v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:26
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:22
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:18
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz32_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u16 v3, v2 offset:30
+; CHECK-NEXT: ds_read_u16 v4, v2 offset:28
+; CHECK-NEXT: ds_read_u16 v5, v2 offset:26
+; CHECK-NEXT: ds_read_u16 v6, v2 offset:24
+; CHECK-NEXT: ds_read_u16 v7, v2 offset:22
+; CHECK-NEXT: ds_read_u16 v8, v2 offset:20
+; CHECK-NEXT: ds_read_u16 v9, v2 offset:18
+; CHECK-NEXT: ds_read_u16 v10, v2 offset:16
+; CHECK-NEXT: ds_read_u16 v11, v2 offset:14
+; CHECK-NEXT: ds_read_u16 v12, v2 offset:12
+; CHECK-NEXT: ds_read_u16 v13, v2 offset:10
+; CHECK-NEXT: ds_read_u16 v14, v2 offset:8
+; CHECK-NEXT: ds_read_u16 v15, v2 offset:6
+; CHECK-NEXT: ds_read_u16 v16, v2 offset:4
+; CHECK-NEXT: ds_read_u16 v17, v2 offset:2
+; CHECK-NEXT: ds_read_u16 v2, v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v3 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:26
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:22
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:18
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz16_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v2 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz31_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[3:6], v2 offset1:1
+; CHECK-NEXT: ds_read_b128 v[7:10], v2 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v7 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:15
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v8 offset:21
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:19
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v9 offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:23
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v10 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 24, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 8, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 24, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 8, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 24, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 8, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 24, v10
+; CHECK-NEXT: v_lshrrev_b32_e32 v9, 8, v10
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:16
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:20
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz32_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[3:6], v2 offset0:2 offset1:3
+; CHECK-NEXT: ds_read2_b64 v[7:10], v2 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[7:10]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz16_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz31_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[3:6], v2
+; CHECK-NEXT: ds_read_b128 v[7:10], v2 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v7 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:15
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v8 offset:21
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:19
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v9 offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:23
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v10 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 24, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 8, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 24, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 8, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 24, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 8, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 24, v10
+; CHECK-NEXT: v_lshrrev_b32_e32 v9, 8, v10
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:16
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:20
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p3_sz32_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p3_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[3:6], v2 offset:16
+; CHECK-NEXT: ds_read_b128 v[7:10], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[7:10]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz16_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:15
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:13
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:11
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:9
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:7
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:5
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:3
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:1
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz31_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:1
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:2
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:3
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:4
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:5
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:6
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:7
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:8
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:9
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:10
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:11
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:12
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:13
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:14
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:29
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:27
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:25
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:23
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:21
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:19
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:17
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz32_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:15
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:13
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:11
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:9
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:7
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:5
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:3
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:1
+; CHECK-NEXT: global_load_ubyte v19, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v19
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:31
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:29
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:27
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:25
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:23
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:21
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:19
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:17
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz16_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v4
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:2
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:4
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:6
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:8
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:10
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:12
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz31_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v5, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ushort v6, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ushort v7, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ushort v8, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ushort v9, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ushort v11, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v12, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ushort v13, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ushort v14, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ushort v15, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ushort v16, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ushort v17, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ushort v18, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz32_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v5, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ushort v6, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ushort v7, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ushort v8, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ushort v9, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ushort v11, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v12, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ushort v13, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ushort v14, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ushort v15, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ushort v16, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ushort v17, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ushort v18, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz16_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz31_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v2 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:15
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v3 offset:21
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:19
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v4 offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:23
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v5 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 24, v2
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 8, v2
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 24, v3
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 8, v3
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 24, v4
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; CHECK-NEXT: v_lshrrev_b32_e32 v9, 24, v5
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 8, v5
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:16
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:20
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz32_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz16_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz31_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v2 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:15
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v3 offset:21
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:19
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v4 offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:23
+; CHECK-NEXT: flat_store_byte_d16_hi v[0:1], v5 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 24, v2
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 8, v2
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 24, v3
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 8, v3
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 24, v4
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; CHECK-NEXT: v_lshrrev_b32_e32 v9, 24, v5
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 8, v5
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:16
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:20
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p4_sz32_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p4_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz16_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v2, v2, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz31_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x11
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v17
+; CHECK-NEXT: s_clause 0xc
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v2, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz32_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x11
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v18
+; CHECK-NEXT: s_clause 0xd
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:31
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v2, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz16_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_ushort v3, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v8, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v2, v2, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v3 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz31_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v11, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v12, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v13, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v14, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v15, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v16, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v17, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v2, v2, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz32_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ushort v3, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v11, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v12, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v13, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v14, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v15, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v16, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v17, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v2, v2, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v3 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz16_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz31_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:15
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:19
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:23
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz32_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[7:10] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz16_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz31_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:15
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:19
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:23
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p0_p5_sz32_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p0_p5_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[7:10] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz16_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3]
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3] offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v3, v4, 8, v10
+; CHECK-NEXT: v_lshl_or_b32 v4, v6, 8, v5
+; CHECK-NEXT: v_lshl_or_b32 v5, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v6, v11, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v8, v12, 8, v13
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v7, v14, 8, v15
+; CHECK-NEXT: v_lshl_or_b32 v3, v4, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v9, v16, 8, v17
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v10, v18, 8, v2
+; CHECK-NEXT: v_lshl_or_b32 v2, v6, 16, v5
+; CHECK-NEXT: v_lshl_or_b32 v5, v8, 16, v7
+; CHECK-NEXT: v_lshl_or_b32 v4, v10, 16, v9
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz31_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v19, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v20, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v21, v[2:3] offset:17
+; CHECK-NEXT: flat_load_ubyte v22, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v23, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v24, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ubyte v25, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v26, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v27, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v28, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v29, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v30, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v31, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v32, v[2:3]
+; CHECK-NEXT: flat_load_ubyte v33, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3] offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(25)
+; CHECK-NEXT: v_lshl_or_b32 v3, v4, 8, v9
+; CHECK-NEXT: v_lshl_or_b32 v4, v6, 8, v5
+; CHECK-NEXT: s_waitcnt vmcnt(24) lgkmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v6, v10, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v11, v11, 8, v12
+; CHECK-NEXT: s_waitcnt vmcnt(20) lgkmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v13, v13, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(19) lgkmcnt(19)
+; CHECK-NEXT: v_lshl_or_b32 v5, v7, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(18) lgkmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v9, v15, 8, v16
+; CHECK-NEXT: v_lshl_or_b32 v7, v4, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v8, v17, 8, v18
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v15, v19, 8, v20
+; CHECK-NEXT: v_lshl_or_b32 v6, v6, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v10, v21, 8, v22
+; CHECK-NEXT: v_lshl_or_b32 v9, v9, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v14, v23, 8, v24
+; CHECK-NEXT: v_lshl_or_b32 v5, v11, 16, v13
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v12, v25, 8, v26
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v17, v27, 8, v28
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v16, v29, 8, v30
+; CHECK-NEXT: v_lshl_or_b32 v4, v14, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v18, v31, 8, v32
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v2, v33, 8, v2
+; CHECK-NEXT: v_lshl_or_b32 v3, v17, 16, v16
+; CHECK-NEXT: v_lshl_or_b32 v2, v2, 16, v18
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[6:7], off offset:23
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[8:9], off offset:16
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz32_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:31
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v19, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v20, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v21, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v22, v[2:3] offset:17
+; CHECK-NEXT: flat_load_ubyte v23, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v24, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v25, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ubyte v26, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v27, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v28, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v29, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v30, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v31, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v32, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v33, v[2:3]
+; CHECK-NEXT: flat_load_ubyte v34, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3] offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(25)
+; CHECK-NEXT: v_lshl_or_b32 v3, v4, 8, v10
+; CHECK-NEXT: v_lshl_or_b32 v4, v6, 8, v5
+; CHECK-NEXT: v_lshl_or_b32 v6, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(24) lgkmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v7, v11, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v12, v12, 8, v13
+; CHECK-NEXT: s_waitcnt vmcnt(20) lgkmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v14, v14, 8, v15
+; CHECK-NEXT: v_lshl_or_b32 v5, v4, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(18) lgkmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v9, v16, 8, v17
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v8, v18, 8, v19
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v11, v20, 8, v21
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v10, v22, 8, v23
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v15, v24, 8, v25
+; CHECK-NEXT: v_lshl_or_b32 v9, v12, 16, v14
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v13, v26, 8, v27
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v17, v28, 8, v29
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v16, v30, 8, v31
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 16, v13
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v18, v32, 8, v33
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v19, v34, 8, v2
+; CHECK-NEXT: v_lshl_or_b32 v2, v11, 16, v10
+; CHECK-NEXT: v_lshl_or_b32 v7, v17, 16, v16
+; CHECK-NEXT: v_lshl_or_b32 v6, v19, 16, v18
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:16
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz16_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: flat_load_ushort v4, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v5, v[2:3]
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ushort v7, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v8, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v9, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v11, v[2:3] offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: v_lshl_or_b32 v3, v4, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v2, v9, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v5, v10, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v7
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz31_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: flat_load_ushort v4, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ushort v5, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ushort v12, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ushort v13, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v14, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ushort v19, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ushort v20, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v21, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v22, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v23, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v8, v8, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v10, v10, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: v_lshl_or_b32 v7, v4, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v6, v12, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v5, v13, 16, v14
+; CHECK-NEXT: v_lshl_or_b32 v8, v8, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v13, v15, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v12, v17, 8, v18
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v4, v19, 16, v20
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v3, v21, 16, v22
+; CHECK-NEXT: v_lshl_or_b32 v9, v13, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v2, v23, 16, v2
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[6:7], off offset:16
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[8:9], off offset:23
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz32_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ushort v4, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ushort v5, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ushort v7, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v8, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ushort v11, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v9, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ushort v12, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ushort v13, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ushort v14, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ushort v15, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ushort v16, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v17, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v18, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v19, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v5, v4, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v4, v6, 16, v9
+; CHECK-NEXT: v_lshl_or_b32 v9, v7, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v3, v12, 16, v13
+; CHECK-NEXT: v_lshl_or_b32 v8, v10, 16, v11
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v2, v14, 16, v15
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v7, v16, 16, v17
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v6, v18, 16, v19
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:16
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz16_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz31_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x10
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v19, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v20, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v21, v[2:3] offset:17
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v7, v7, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v10, v9, 8, v10
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v11, v11, 8, v12
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v13, v13, 8, v14
+; CHECK-NEXT: v_lshl_or_b32 v8, v7, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v14, v15, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v12, v17, 8, v18
+; CHECK-NEXT: v_lshl_or_b32 v9, v11, 16, v13
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v15, v19, 8, v20
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v6, v6, 8, v21
+; CHECK-NEXT: v_lshl_or_b32 v7, v14, 16, v12
+; CHECK-NEXT: v_lshl_or_b32 v6, v6, 16, v15
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz32_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz16_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz31_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x10
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v19, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v20, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v21, v[2:3] offset:17
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v7, v7, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v10, v9, 8, v10
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v11, v11, 8, v12
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v13, v13, 8, v14
+; CHECK-NEXT: v_lshl_or_b32 v8, v7, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v14, v15, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v12, v17, 8, v18
+; CHECK-NEXT: v_lshl_or_b32 v9, v11, 16, v13
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v15, v19, 8, v20
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v6, v6, 8, v21
+; CHECK-NEXT: v_lshl_or_b32 v7, v14, 16, v12
+; CHECK-NEXT: v_lshl_or_b32 v6, v6, 16, v15
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p0_sz32_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p0_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz16_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz31_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x2
+; CHECK-NEXT: global_load_dwordx2 v[8:9], v[2:3], off offset:23
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: global_load_dwordx2 v[2:3], v[2:3], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[8:9], off offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[2:3], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz32_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz16_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz31_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x2
+; CHECK-NEXT: global_load_dwordx2 v[8:9], v[2:3], off offset:23
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: global_load_dwordx2 v[2:3], v[2:3], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[8:9], off offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[2:3], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz32_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz16_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz31_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:15
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz32_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz16_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz31_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:15
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p1_sz32_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p1_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz16_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz31_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b64 v[7:8], v2
+; CHECK-NEXT: ds_read_b128 v[3:6], v2 offset:8
+; CHECK-NEXT: ds_read_b64 v[9:10], v2 offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[7:8], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[9:10], off offset:23
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz32_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[3:6], v2
+; CHECK-NEXT: ds_read_b128 v[7:10], v2 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz16_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz31_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b64 v[7:8], v2
+; CHECK-NEXT: ds_read_b128 v[3:6], v2 offset:8
+; CHECK-NEXT: ds_read_b64 v[9:10], v2 offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[7:8], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[9:10], off offset:23
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz32_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[3:6], v2
+; CHECK-NEXT: ds_read_b128 v[7:10], v2 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz16_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v2 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz31_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[3:6], v2 offset1:1
+; CHECK-NEXT: ds_read_b128 v[7:10], v2 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz32_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[3:6], v2 offset1:1
+; CHECK-NEXT: ds_read2_b64 v[7:10], v2 offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz16_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz31_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[3:6], v2
+; CHECK-NEXT: ds_read_b128 v[7:10], v2 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p3_sz32_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p3_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[3:6], v2
+; CHECK-NEXT: ds_read_b128 v[7:10], v2 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz16_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz31_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx2 v[8:9], v[2:3], off
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[8:9], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:8
+; CHECK-NEXT: global_load_dwordx2 v[2:3], v[2:3], off offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[2:3], off offset:23
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz32_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz16_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz31_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx2 v[8:9], v[2:3], off
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[8:9], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:8
+; CHECK-NEXT: global_load_dwordx2 v[2:3], v[2:3], off offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[2:3], off offset:23
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz32_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz16_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz31_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz32_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz16_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz31_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz32_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz16_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v2, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v4, v4, 8, v3
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v5, v6, 8, v5
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v7, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v8, v10, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v6, v12, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v9, v14, 8, v13
+; CHECK-NEXT: v_lshl_or_b32 v3, v8, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v10, v16, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v11, v2, 8, v17
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v4
+; CHECK-NEXT: v_lshl_or_b32 v5, v9, 16, v6
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v10
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz31_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v26, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v27, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v31, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v2, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: v_lshl_or_b32 v4, v4, 8, v3
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: v_lshl_or_b32 v5, v6, 8, v5
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: v_lshl_or_b32 v7, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: v_lshl_or_b32 v8, v10, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: v_lshl_or_b32 v10, v12, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: v_lshl_or_b32 v11, v14, 8, v13
+; CHECK-NEXT: v_lshl_or_b32 v3, v8, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: v_lshl_or_b32 v6, v16, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: v_lshl_or_b32 v9, v18, 8, v17
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v13, v21, 8, v20
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v15, v23, 8, v22
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v12, v19, 8, v25
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: v_lshl_or_b32 v14, v26, 8, v24
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: v_lshl_or_b32 v16, v28, 8, v27
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshl_or_b32 v17, v30, 8, v29
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v18, v31, 8, v21
+; CHECK-NEXT: v_lshl_or_b32 v7, v13, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v19, v2, 8, v32
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v4
+; CHECK-NEXT: v_lshl_or_b32 v5, v9, 16, v6
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v10
+; CHECK-NEXT: v_lshl_or_b32 v6, v14, 16, v15
+; CHECK-NEXT: v_lshl_or_b32 v9, v17, 16, v16
+; CHECK-NEXT: v_lshl_or_b32 v8, v19, 16, v18
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[6:7], off offset:16
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[8:9], off offset:23
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz32_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v26, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v27, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:31
+; CHECK-NEXT: buffer_load_ubyte v31, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v2, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: v_lshl_or_b32 v4, v4, 8, v3
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: v_lshl_or_b32 v5, v6, 8, v5
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: v_lshl_or_b32 v7, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v8, v10, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v10, v12, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v11, v14, 8, v13
+; CHECK-NEXT: v_lshl_or_b32 v3, v8, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v6, v16, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v9, v18, 8, v17
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: v_lshl_or_b32 v13, v21, 8, v20
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: v_lshl_or_b32 v15, v23, 8, v22
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v12, v19, 8, v25
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v14, v26, 8, v24
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v16, v28, 8, v27
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v17, v30, 8, v29
+; CHECK-NEXT: v_lshl_or_b32 v7, v13, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v18, v32, 8, v31
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v19, v2, 8, v33
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v4
+; CHECK-NEXT: v_lshl_or_b32 v5, v9, 16, v6
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v10
+; CHECK-NEXT: v_lshl_or_b32 v6, v14, 16, v15
+; CHECK-NEXT: v_lshl_or_b32 v9, v17, 16, v16
+; CHECK-NEXT: v_lshl_or_b32 v8, v19, 16, v18
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz16_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v3, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v8, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v3, v6, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v5, v8, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v4, v10, 16, v9
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz31_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v3, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v8, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v19, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v20, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v21, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v22, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v3, v6, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v4, v8, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v5, v10, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v10, v12, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v9, v14, 8, v13
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v11, v16, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v12, v18, 8, v17
+; CHECK-NEXT: v_lshl_or_b32 v9, v9, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v7, v20, 16, v19
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v6, v22, 16, v21
+; CHECK-NEXT: v_lshl_or_b32 v8, v12, 16, v11
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[6:7], off offset:16
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[8:9], off offset:23
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz32_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v3, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v8, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v11, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v12, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v13, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v14, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v15, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v16, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v17, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v18, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v3, v6, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v4, v8, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v5, v10, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v6, v12, 16, v11
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v7, v14, 16, v13
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v9, v16, 16, v15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v8, v18, 16, v17
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz16_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz31_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v7, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v2, v10, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v11, v12, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v12, v14, 8, v13
+; CHECK-NEXT: v_lshl_or_b32 v10, v2, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v8, v16, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v9, v18, 8, v17
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v13, v20, 8, v19
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v14, v22, 8, v21
+; CHECK-NEXT: v_lshl_or_b32 v9, v9, 16, v8
+; CHECK-NEXT: v_lshl_or_b32 v8, v12, 16, v11
+; CHECK-NEXT: v_lshl_or_b32 v7, v14, 16, v13
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz32_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz16_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz31_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v7, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v2, v10, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v11, v12, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v12, v14, 8, v13
+; CHECK-NEXT: v_lshl_or_b32 v10, v2, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v8, v16, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v9, v18, 8, v17
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v13, v20, 8, v19
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v14, v22, 8, v21
+; CHECK-NEXT: v_lshl_or_b32 v9, v9, 16, v8
+; CHECK-NEXT: v_lshl_or_b32 v8, v12, 16, v11
+; CHECK-NEXT: v_lshl_or_b32 v7, v14, 16, v13
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p5_sz32_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p1_p5_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz16_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2]
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2] offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 8, v9
+; CHECK-NEXT: v_lshl_or_b32 v3, v5, 8, v4
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v5, v10, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v7, v11, 8, v12
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v6, v13, 8, v14
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v9, v17, 8, v1
+; CHECK-NEXT: v_lshl_or_b32 v1, v5, 16, v4
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 16, v6
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v8
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz31_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v19, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v20, v[1:2] offset:17
+; CHECK-NEXT: flat_load_ubyte v21, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v22, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v23, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ubyte v24, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v25, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v26, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v27, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v28, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v29, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v30, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v31, v[1:2]
+; CHECK-NEXT: flat_load_ubyte v32, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2] offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(25)
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 8, v8
+; CHECK-NEXT: v_lshl_or_b32 v3, v5, 8, v4
+; CHECK-NEXT: s_waitcnt vmcnt(24) lgkmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v5, v9, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v10, v10, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(20) lgkmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v12, v12, 8, v13
+; CHECK-NEXT: s_waitcnt vmcnt(19) lgkmcnt(19)
+; CHECK-NEXT: v_lshl_or_b32 v4, v6, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(18) lgkmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v7, v14, 8, v15
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v6, v16, 8, v17
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v9, v18, 8, v19
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v8, v20, 8, v21
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v13, v22, 8, v23
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v11, v24, 8, v25
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v15, v26, 8, v27
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v14, v28, 8, v29
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v16, v30, 8, v31
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v17, v32, 8, v1
+; CHECK-NEXT: v_lshl_or_b32 v1, v5, 16, v4
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 16, v6
+; CHECK-NEXT: v_lshl_or_b32 v6, v10, 16, v12
+; CHECK-NEXT: v_lshl_or_b32 v5, v13, 16, v11
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 16, v14
+; CHECK-NEXT: v_lshl_or_b32 v7, v17, 16, v16
+; CHECK-NEXT: ds_write_b64 v0, v[1:2] offset:23
+; CHECK-NEXT: ds_write_b64 v0, v[3:4] offset:16
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz32_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:31
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v19, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v20, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v21, v[1:2] offset:17
+; CHECK-NEXT: flat_load_ubyte v22, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v23, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v24, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ubyte v25, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v26, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v27, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v28, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v29, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v30, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v31, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v32, v[1:2]
+; CHECK-NEXT: flat_load_ubyte v33, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2] offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(25)
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 8, v9
+; CHECK-NEXT: v_lshl_or_b32 v3, v5, 8, v4
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(24) lgkmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v5, v10, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v11, v11, 8, v12
+; CHECK-NEXT: s_waitcnt vmcnt(20) lgkmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v10, v13, 8, v14
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(18) lgkmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v7, v15, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v6, v17, 8, v18
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v9, v19, 8, v20
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v8, v21, 8, v22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v13, v23, 8, v24
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v12, v25, 8, v26
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v15, v27, 8, v28
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v14, v29, 8, v30
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v16, v31, 8, v32
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v17, v33, 8, v1
+; CHECK-NEXT: v_lshl_or_b32 v1, v5, 16, v4
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 16, v6
+; CHECK-NEXT: v_lshl_or_b32 v6, v11, 16, v10
+; CHECK-NEXT: v_lshl_or_b32 v5, v13, 16, v12
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 16, v14
+; CHECK-NEXT: v_lshl_or_b32 v7, v17, 16, v16
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[1:2] offset0:2 offset1:3
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz16_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: flat_load_ushort v3, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v4, v[1:2]
+; CHECK-NEXT: flat_load_ushort v5, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ushort v6, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v7, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v8, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v10, v[1:2] offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v1, v8, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v4, v9, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v3, v10, 16, v6
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz31_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: flat_load_ushort v3, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ushort v4, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ushort v10, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ushort v11, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v12, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ushort v17, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ushort v18, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v19, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v20, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v21, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v22, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v7, v7, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v1, v10, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v12
+; CHECK-NEXT: v_lshl_or_b32 v11, v5, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v10, v13, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v9, v15, 8, v16
+; CHECK-NEXT: v_lshl_or_b32 v7, v11, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v3, v17, 16, v18
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v6, v19, 16, v20
+; CHECK-NEXT: v_lshl_or_b32 v8, v10, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v5, v21, 16, v22
+; CHECK-NEXT: ds_write_b64 v0, v[1:2] offset:16
+; CHECK-NEXT: ds_write2_b64 v0, v[5:6], v[3:4] offset1:1
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz32_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ushort v3, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ushort v4, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ushort v5, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ushort v6, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v7, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ushort v8, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v10, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ushort v11, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ushort v12, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ushort v13, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ushort v14, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ushort v15, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v16, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v17, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v18, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: v_lshl_or_b32 v6, v6, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v1, v5, 16, v10
+; CHECK-NEXT: v_lshl_or_b32 v5, v8, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v3, v13, 16, v14
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 16, v16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v7, v17, 16, v18
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[1:2] offset0:2 offset1:3
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz16_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz31_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x10
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v19, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v20, v[1:2] offset:17
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v6, v6, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v9, v8, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v10, v10, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v12, v12, 8, v13
+; CHECK-NEXT: v_lshl_or_b32 v7, v6, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v13, v14, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v11, v16, 8, v17
+; CHECK-NEXT: v_lshl_or_b32 v8, v10, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v14, v18, 8, v19
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v5, v5, 8, v20
+; CHECK-NEXT: v_lshl_or_b32 v6, v13, 16, v11
+; CHECK-NEXT: v_lshl_or_b32 v5, v5, 16, v14
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: ds_write_b128 v0, v[5:8] offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz32_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz16_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[1:4]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz31_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x10
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v19, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v20, v[1:2] offset:17
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v6, v6, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v9, v8, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v10, v10, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v12, v12, 8, v13
+; CHECK-NEXT: v_lshl_or_b32 v7, v6, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v13, v14, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v11, v16, 8, v17
+; CHECK-NEXT: v_lshl_or_b32 v8, v10, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v14, v18, 8, v19
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v5, v5, 8, v20
+; CHECK-NEXT: v_lshl_or_b32 v6, v13, 16, v11
+; CHECK-NEXT: v_lshl_or_b32 v5, v5, 16, v14
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[1:4]
+; CHECK-NEXT: ds_write_b128 v0, v[5:8] offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p0_sz32_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p0_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[3:6] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[7:10]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz16_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz31_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x2
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx2 v[7:8], v[1:2], off offset:16
+; CHECK-NEXT: global_load_dwordx2 v[1:2], v[1:2], off offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b64 v0, v[1:2] offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz32_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz16_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz31_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x2
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx2 v[7:8], v[1:2], off offset:16
+; CHECK-NEXT: global_load_dwordx2 v[1:2], v[1:2], off offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b64 v0, v[1:2] offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz32_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz16_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz31_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[7:10] offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz32_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz16_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[1:4]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz31_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[3:6]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[7:10] offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p1_sz32_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p1_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[3:6]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[7:10] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz16_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz31_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b64 v[5:6], v1 offset:23
+; CHECK-NEXT: ds_read_b64 v[7:8], v1 offset:16
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: ds_write_b64 v0, v[5:6] offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz32_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v1 offset0:2 offset1:3
+; CHECK-NEXT: ds_read2_b64 v[6:9], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[6:7], v[8:9] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz16_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz31_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b64 v[5:6], v1 offset:23
+; CHECK-NEXT: ds_read_b64 v[7:8], v1 offset:16
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: ds_write_b64 v0, v[5:6] offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz32_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v1 offset0:2 offset1:3
+; CHECK-NEXT: ds_read2_b64 v[6:9], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[6:7], v[8:9] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz16_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz31_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v1 offset1:1
+; CHECK-NEXT: ds_read_b128 v[6:9], v1 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[6:9] offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz32_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v1 offset0:2 offset1:3
+; CHECK-NEXT: ds_read2_b64 v[6:9], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[6:7], v[8:9] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz16_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[1:4], v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[1:4]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz31_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v1 offset:15
+; CHECK-NEXT: ds_read_b128 v[6:9], v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[2:5] offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[6:9]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p3_sz32_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p3_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v1 offset:16
+; CHECK-NEXT: ds_read_b128 v[6:9], v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[2:5] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[6:9]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz16_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz31_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x2
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx2 v[7:8], v[1:2], off offset:16
+; CHECK-NEXT: global_load_dwordx2 v[1:2], v[1:2], off offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b64 v0, v[1:2] offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz32_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz16_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz31_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x2
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx2 v[7:8], v[1:2], off offset:16
+; CHECK-NEXT: global_load_dwordx2 v[1:2], v[1:2], off offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b64 v0, v[1:2] offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz32_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz16_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz31_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[7:10] offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz32_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz16_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[1:4]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz31_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[3:6]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[7:10] offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p4_sz32_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p4_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[3:6]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[7:10] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz16_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v3, v3, 8, v2
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v4, v5, 8, v4
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v6, v7, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v7, v9, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v5, v11, 8, v10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v8, v13, 8, v12
+; CHECK-NEXT: v_lshl_or_b32 v2, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v9, v15, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v10, v1, 8, v16
+; CHECK-NEXT: v_lshl_or_b32 v1, v4, 16, v3
+; CHECK-NEXT: v_lshl_or_b32 v4, v8, 16, v5
+; CHECK-NEXT: v_lshl_or_b32 v3, v10, 16, v9
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz31_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v18, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v19, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v20, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v21, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v22, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v23, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v24, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v25, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v26, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v27, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v28, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v29, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v30, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v31, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: v_lshl_or_b32 v3, v3, 8, v2
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: v_lshl_or_b32 v4, v5, 8, v4
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: v_lshl_or_b32 v6, v7, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: v_lshl_or_b32 v7, v9, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: v_lshl_or_b32 v9, v11, 8, v10
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: v_lshl_or_b32 v10, v13, 8, v12
+; CHECK-NEXT: v_lshl_or_b32 v2, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: v_lshl_or_b32 v5, v15, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: v_lshl_or_b32 v8, v17, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v12, v20, 8, v19
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v14, v22, 8, v21
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v11, v18, 8, v24
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: v_lshl_or_b32 v13, v25, 8, v23
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: v_lshl_or_b32 v15, v27, 8, v26
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshl_or_b32 v16, v29, 8, v28
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v17, v30, 8, v20
+; CHECK-NEXT: v_lshl_or_b32 v6, v12, 16, v11
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v18, v1, 8, v31
+; CHECK-NEXT: v_lshl_or_b32 v1, v4, 16, v3
+; CHECK-NEXT: v_lshl_or_b32 v4, v8, 16, v5
+; CHECK-NEXT: v_lshl_or_b32 v3, v10, 16, v9
+; CHECK-NEXT: v_lshl_or_b32 v5, v13, 16, v14
+; CHECK-NEXT: v_lshl_or_b32 v8, v16, 16, v15
+; CHECK-NEXT: v_lshl_or_b32 v7, v18, 16, v17
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: ds_write_b64 v0, v[5:6] offset:16
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz32_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v18, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v19, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v20, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v21, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v22, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v23, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v24, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v25, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v26, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v27, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v28, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v29, v1, s[0:3], 0 offen offset:31
+; CHECK-NEXT: buffer_load_ubyte v30, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v31, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v32, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: v_lshl_or_b32 v3, v3, 8, v2
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: v_lshl_or_b32 v4, v5, 8, v4
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: v_lshl_or_b32 v6, v7, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v7, v9, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v9, v11, 8, v10
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v10, v13, 8, v12
+; CHECK-NEXT: v_lshl_or_b32 v2, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v5, v15, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v8, v17, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: v_lshl_or_b32 v12, v20, 8, v19
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: v_lshl_or_b32 v14, v22, 8, v21
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v11, v18, 8, v24
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v13, v25, 8, v23
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v15, v27, 8, v26
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v16, v29, 8, v28
+; CHECK-NEXT: v_lshl_or_b32 v6, v12, 16, v11
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v17, v31, 8, v30
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v18, v1, 8, v32
+; CHECK-NEXT: v_lshl_or_b32 v1, v4, 16, v3
+; CHECK-NEXT: v_lshl_or_b32 v4, v8, 16, v5
+; CHECK-NEXT: v_lshl_or_b32 v3, v10, 16, v9
+; CHECK-NEXT: v_lshl_or_b32 v5, v13, 16, v14
+; CHECK-NEXT: v_lshl_or_b32 v8, v16, 16, v15
+; CHECK-NEXT: v_lshl_or_b32 v7, v18, 16, v17
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: ds_write2_b64 v0, v[5:6], v[7:8] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz16_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v2, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v1, v4, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v8
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz31_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v2, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v18, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v19, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v20, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v21, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v1, v4, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v3, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v4, v9, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v9, v11, 8, v10
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v8, v13, 8, v12
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v10, v15, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v11, v17, 8, v16
+; CHECK-NEXT: v_lshl_or_b32 v8, v8, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v6, v19, 16, v18
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v5, v21, 16, v20
+; CHECK-NEXT: v_lshl_or_b32 v7, v11, 16, v10
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: ds_write_b64 v0, v[5:6] offset:16
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz32_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v2, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v10, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v11, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v12, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v13, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v14, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v15, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v16, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v17, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v1, v4, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v3, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v4, v9, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v5, v11, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v6, v13, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 16, v14
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v7, v17, 16, v16
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: ds_write2_b64 v0, v[5:6], v[7:8] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz16_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz31_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v18, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v19, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v20, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v21, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v6, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 8, v2
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v1, v9, 8, v4
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v10, v11, 8, v10
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v11, v13, 8, v12
+; CHECK-NEXT: v_lshl_or_b32 v4, v1, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v3, v15, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v9, v17, 8, v16
+; CHECK-NEXT: v_lshl_or_b32 v2, v11, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v12, v19, 8, v18
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v13, v21, 8, v20
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v3
+; CHECK-NEXT: v_lshl_or_b32 v1, v13, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[5:6], v[7:8] offset1:1
+; CHECK-NEXT: ds_write_b128 v0, v[1:4] offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz32_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v6, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v9, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[6:7], v[8:9] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz16_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz31_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v18, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v19, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v20, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v21, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v6, v7, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v1, v9, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v10, v11, 8, v10
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v11, v13, 8, v12
+; CHECK-NEXT: v_lshl_or_b32 v9, v1, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v7, v15, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v8, v17, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v12, v19, 8, v18
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v13, v21, 8, v20
+; CHECK-NEXT: v_lshl_or_b32 v8, v8, 16, v7
+; CHECK-NEXT: v_lshl_or_b32 v7, v11, 16, v10
+; CHECK-NEXT: v_lshl_or_b32 v6, v13, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[2:5]
+; CHECK-NEXT: ds_write_b128 v0, v[6:9] offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p3_p5_sz32_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p3_p5_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v6, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v9, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: ds_write_b128 v0, v[2:5]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[6:9] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz16_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz31_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xe
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:17
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2] offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz32_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:31
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:17
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz16_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: flat_load_ushort v3, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v4, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ushort v5, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ushort v6, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v7, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v8, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz31_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ushort v4, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ushort v5, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ushort v6, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ushort v7, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ushort v8, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ushort v10, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ushort v11, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v12, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ushort v13, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ushort v14, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v15, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v16, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v17, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz32_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ushort v3, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ushort v4, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ushort v5, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ushort v6, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ushort v7, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ushort v8, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ushort v10, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ushort v11, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v12, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ushort v13, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ushort v14, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v15, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v16, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v17, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz16_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz31_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x10
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:17
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v19, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v20, v[1:2] offset:28
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz32_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz16_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz31_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x10
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:17
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v19, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v20, v[1:2] offset:28
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p0_sz32_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p0_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz16_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:15
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz31_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v18, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ubyte v19, v[1:2], off offset:29
+; CHECK-NEXT: global_load_ubyte v20, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v21, v[1:2], off offset:27
+; CHECK-NEXT: global_load_ubyte v22, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ubyte v23, v[1:2], off offset:25
+; CHECK-NEXT: global_load_ubyte v24, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ubyte v25, v[1:2], off offset:23
+; CHECK-NEXT: global_load_ubyte v26, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ubyte v27, v[1:2], off offset:21
+; CHECK-NEXT: global_load_ubyte v28, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ubyte v29, v[1:2], off offset:19
+; CHECK-NEXT: global_load_ubyte v30, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ubyte v31, v[1:2], off offset:17
+; CHECK-NEXT: global_load_ubyte v32, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz32_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:15
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v18, v[1:2], off
+; CHECK-NEXT: global_load_ubyte v19, v[1:2], off offset:31
+; CHECK-NEXT: global_load_ubyte v20, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ubyte v21, v[1:2], off offset:29
+; CHECK-NEXT: global_load_ubyte v22, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v23, v[1:2], off offset:27
+; CHECK-NEXT: global_load_ubyte v24, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ubyte v25, v[1:2], off offset:25
+; CHECK-NEXT: global_load_ubyte v26, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ubyte v27, v[1:2], off offset:23
+; CHECK-NEXT: global_load_ubyte v28, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ubyte v29, v[1:2], off offset:21
+; CHECK-NEXT: global_load_ubyte v30, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ubyte v31, v[1:2], off offset:19
+; CHECK-NEXT: global_load_ubyte v32, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ubyte v33, v[1:2], off offset:17
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(31)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v33, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz16_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: global_load_ushort v3, v[1:2], off
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz31_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v11, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ushort v12, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v13, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v14, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v15, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v16, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v17, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz32_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ushort v3, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v11, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ushort v12, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v13, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v14, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v15, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v16, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v17, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz16_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz31_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte_d16_hi v7, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_store_byte_d16_hi v8, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_store_byte_d16_hi v9, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_store_byte_d16_hi v10, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 24, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 8, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 24, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 8, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 24, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 8, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 24, v10
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 8, v10
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz32_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz16_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz31_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte_d16_hi v7, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_store_byte_d16_hi v8, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_store_byte_d16_hi v9, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_store_byte_d16_hi v10, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 24, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 8, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 24, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 8, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 24, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 8, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 24, v10
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 8, v10
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p1_sz32_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p1_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz16_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v2, v1 offset:15
+; CHECK-NEXT: ds_read_u8 v3, v1 offset:14
+; CHECK-NEXT: ds_read_u8 v4, v1 offset:13
+; CHECK-NEXT: ds_read_u8 v5, v1 offset:12
+; CHECK-NEXT: ds_read_u8 v6, v1 offset:11
+; CHECK-NEXT: ds_read_u8 v7, v1 offset:10
+; CHECK-NEXT: ds_read_u8 v8, v1 offset:9
+; CHECK-NEXT: ds_read_u8 v9, v1 offset:8
+; CHECK-NEXT: ds_read_u8 v10, v1 offset:7
+; CHECK-NEXT: ds_read_u8 v11, v1 offset:6
+; CHECK-NEXT: ds_read_u8 v12, v1 offset:5
+; CHECK-NEXT: ds_read_u8 v13, v1 offset:4
+; CHECK-NEXT: ds_read_u8 v14, v1 offset:3
+; CHECK-NEXT: ds_read_u8 v15, v1 offset:2
+; CHECK-NEXT: ds_read_u8 v16, v1 offset:1
+; CHECK-NEXT: ds_read_u8 v1, v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz31_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v2, v1
+; CHECK-NEXT: ds_read_u8 v3, v1 offset:1
+; CHECK-NEXT: ds_read_u8 v4, v1 offset:2
+; CHECK-NEXT: ds_read_u8 v5, v1 offset:3
+; CHECK-NEXT: ds_read_u8 v6, v1 offset:4
+; CHECK-NEXT: ds_read_u8 v7, v1 offset:5
+; CHECK-NEXT: ds_read_u8 v8, v1 offset:6
+; CHECK-NEXT: ds_read_u8 v9, v1 offset:7
+; CHECK-NEXT: ds_read_u8 v10, v1 offset:8
+; CHECK-NEXT: ds_read_u8 v11, v1 offset:9
+; CHECK-NEXT: ds_read_u8 v12, v1 offset:10
+; CHECK-NEXT: ds_read_u8 v13, v1 offset:11
+; CHECK-NEXT: ds_read_u8 v14, v1 offset:12
+; CHECK-NEXT: ds_read_u8 v15, v1 offset:13
+; CHECK-NEXT: ds_read_u8 v16, v1 offset:14
+; CHECK-NEXT: ds_read_u8 v17, v1 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: ds_read_u8 v2, v1 offset:24
+; CHECK-NEXT: ds_read_u8 v3, v1 offset:25
+; CHECK-NEXT: ds_read_u8 v4, v1 offset:26
+; CHECK-NEXT: ds_read_u8 v18, v1 offset:27
+; CHECK-NEXT: ds_read_u8 v19, v1 offset:28
+; CHECK-NEXT: ds_read_u8 v20, v1 offset:29
+; CHECK-NEXT: ds_read_u8 v21, v1 offset:30
+; CHECK-NEXT: ds_read_u8 v22, v1 offset:16
+; CHECK-NEXT: ds_read_u8 v23, v1 offset:17
+; CHECK-NEXT: ds_read_u8 v24, v1 offset:18
+; CHECK-NEXT: ds_read_u8 v25, v1 offset:19
+; CHECK-NEXT: ds_read_u8 v26, v1 offset:20
+; CHECK-NEXT: ds_read_u8 v27, v1 offset:21
+; CHECK-NEXT: ds_read_u8 v28, v1 offset:22
+; CHECK-NEXT: ds_read_u8 v1, v1 offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(27)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(26)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(25)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt lgkmcnt(24)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(23)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(22)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(21)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt lgkmcnt(20)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(19)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt lgkmcnt(18)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(17)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt lgkmcnt(16)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz32_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v2, v1 offset:15
+; CHECK-NEXT: ds_read_u8 v3, v1 offset:14
+; CHECK-NEXT: ds_read_u8 v4, v1 offset:13
+; CHECK-NEXT: ds_read_u8 v5, v1 offset:12
+; CHECK-NEXT: ds_read_u8 v6, v1 offset:11
+; CHECK-NEXT: ds_read_u8 v7, v1 offset:8
+; CHECK-NEXT: ds_read_u8 v8, v1 offset:9
+; CHECK-NEXT: ds_read_u8 v9, v1 offset:10
+; CHECK-NEXT: ds_read_u8 v10, v1
+; CHECK-NEXT: ds_read_u8 v11, v1 offset:1
+; CHECK-NEXT: ds_read_u8 v12, v1 offset:2
+; CHECK-NEXT: ds_read_u8 v13, v1 offset:3
+; CHECK-NEXT: ds_read_u8 v14, v1 offset:4
+; CHECK-NEXT: ds_read_u8 v15, v1 offset:5
+; CHECK-NEXT: ds_read_u8 v16, v1 offset:6
+; CHECK-NEXT: ds_read_u8 v17, v1 offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: ds_read_u8 v2, v1 offset:24
+; CHECK-NEXT: ds_read_u8 v3, v1 offset:25
+; CHECK-NEXT: ds_read_u8 v4, v1 offset:26
+; CHECK-NEXT: ds_read_u8 v5, v1 offset:27
+; CHECK-NEXT: ds_read_u8 v6, v1 offset:28
+; CHECK-NEXT: ds_read_u8 v18, v1 offset:29
+; CHECK-NEXT: ds_read_u8 v19, v1 offset:30
+; CHECK-NEXT: ds_read_u8 v20, v1 offset:31
+; CHECK-NEXT: ds_read_u8 v21, v1 offset:16
+; CHECK-NEXT: ds_read_u8 v22, v1 offset:17
+; CHECK-NEXT: ds_read_u8 v23, v1 offset:18
+; CHECK-NEXT: ds_read_u8 v24, v1 offset:19
+; CHECK-NEXT: ds_read_u8 v25, v1 offset:20
+; CHECK-NEXT: ds_read_u8 v26, v1 offset:21
+; CHECK-NEXT: ds_read_u8 v27, v1 offset:22
+; CHECK-NEXT: ds_read_u8 v1, v1 offset:23
+; CHECK-NEXT: s_waitcnt lgkmcnt(24)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(16)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:31
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz16_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u16 v2, v1
+; CHECK-NEXT: ds_read_u16 v3, v1 offset:2
+; CHECK-NEXT: ds_read_u16 v4, v1 offset:4
+; CHECK-NEXT: ds_read_u16 v5, v1 offset:6
+; CHECK-NEXT: ds_read_u16 v6, v1 offset:8
+; CHECK-NEXT: ds_read_u16 v7, v1 offset:10
+; CHECK-NEXT: ds_read_u16 v8, v1 offset:12
+; CHECK-NEXT: ds_read_u16 v1, v1 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz31_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v2, v1 offset:30
+; CHECK-NEXT: ds_read_u16 v3, v1 offset:28
+; CHECK-NEXT: ds_read_u16 v4, v1 offset:26
+; CHECK-NEXT: ds_read_u16 v5, v1 offset:24
+; CHECK-NEXT: ds_read_u16 v6, v1 offset:22
+; CHECK-NEXT: ds_read_u16 v7, v1 offset:20
+; CHECK-NEXT: ds_read_u16 v8, v1 offset:18
+; CHECK-NEXT: ds_read_u16 v9, v1 offset:16
+; CHECK-NEXT: ds_read_u16 v10, v1 offset:14
+; CHECK-NEXT: ds_read_u16 v11, v1 offset:12
+; CHECK-NEXT: ds_read_u16 v12, v1 offset:10
+; CHECK-NEXT: ds_read_u16 v13, v1 offset:8
+; CHECK-NEXT: ds_read_u16 v14, v1 offset:6
+; CHECK-NEXT: ds_read_u16 v15, v1 offset:4
+; CHECK-NEXT: ds_read_u16 v16, v1 offset:2
+; CHECK-NEXT: ds_read_u16 v1, v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(14)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(11)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt lgkmcnt(10)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(9)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt lgkmcnt(8)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz32_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u16 v2, v1 offset:30
+; CHECK-NEXT: ds_read_u16 v3, v1 offset:28
+; CHECK-NEXT: ds_read_u16 v4, v1 offset:26
+; CHECK-NEXT: ds_read_u16 v5, v1 offset:24
+; CHECK-NEXT: ds_read_u16 v6, v1 offset:22
+; CHECK-NEXT: ds_read_u16 v7, v1 offset:20
+; CHECK-NEXT: ds_read_u16 v8, v1 offset:18
+; CHECK-NEXT: ds_read_u16 v9, v1 offset:16
+; CHECK-NEXT: ds_read_u16 v10, v1 offset:14
+; CHECK-NEXT: ds_read_u16 v11, v1 offset:12
+; CHECK-NEXT: ds_read_u16 v12, v1 offset:10
+; CHECK-NEXT: ds_read_u16 v13, v1 offset:8
+; CHECK-NEXT: ds_read_u16 v14, v1 offset:6
+; CHECK-NEXT: ds_read_u16 v15, v1 offset:4
+; CHECK-NEXT: ds_read_u16 v16, v1 offset:2
+; CHECK-NEXT: ds_read_u16 v1, v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: buffer_store_short v2, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(14)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(11)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt lgkmcnt(10)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(9)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt lgkmcnt(8)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz16_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz31_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v1 offset1:1
+; CHECK-NEXT: ds_read_b128 v[6:9], v1 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte_d16_hi v6, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_store_byte_d16_hi v7, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_store_byte_d16_hi v8, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_store_byte_d16_hi v9, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 24, v6
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 8, v6
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 24, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 8, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 24, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 8, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 24, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 8, v9
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz32_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v1 offset1:1
+; CHECK-NEXT: ds_read2_b64 v[6:9], v1 offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz16_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[1:4], v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz31_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v1
+; CHECK-NEXT: ds_read_b128 v[6:9], v1 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte_d16_hi v6, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_store_byte_d16_hi v7, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_store_byte_d16_hi v8, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_store_byte_d16_hi v9, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 24, v6
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 8, v6
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 24, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 8, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 24, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 8, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 24, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 8, v9
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p3_sz32_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p3_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v1
+; CHECK-NEXT: ds_read_b128 v[6:9], v1 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz16_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:15
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz31_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v18, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ubyte v19, v[1:2], off offset:29
+; CHECK-NEXT: global_load_ubyte v20, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v21, v[1:2], off offset:27
+; CHECK-NEXT: global_load_ubyte v22, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ubyte v23, v[1:2], off offset:25
+; CHECK-NEXT: global_load_ubyte v24, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ubyte v25, v[1:2], off offset:23
+; CHECK-NEXT: global_load_ubyte v26, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ubyte v27, v[1:2], off offset:21
+; CHECK-NEXT: global_load_ubyte v28, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ubyte v29, v[1:2], off offset:19
+; CHECK-NEXT: global_load_ubyte v30, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ubyte v31, v[1:2], off offset:17
+; CHECK-NEXT: global_load_ubyte v32, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz32_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:15
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v18, v[1:2], off
+; CHECK-NEXT: global_load_ubyte v19, v[1:2], off offset:31
+; CHECK-NEXT: global_load_ubyte v20, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ubyte v21, v[1:2], off offset:29
+; CHECK-NEXT: global_load_ubyte v22, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v23, v[1:2], off offset:27
+; CHECK-NEXT: global_load_ubyte v24, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ubyte v25, v[1:2], off offset:25
+; CHECK-NEXT: global_load_ubyte v26, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ubyte v27, v[1:2], off offset:23
+; CHECK-NEXT: global_load_ubyte v28, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ubyte v29, v[1:2], off offset:21
+; CHECK-NEXT: global_load_ubyte v30, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ubyte v31, v[1:2], off offset:19
+; CHECK-NEXT: global_load_ubyte v32, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ubyte v33, v[1:2], off offset:17
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(31)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v33, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz16_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: global_load_ushort v3, v[1:2], off
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz31_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v11, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ushort v12, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v13, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v14, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v15, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v16, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v17, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz32_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ushort v3, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v11, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ushort v12, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v13, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v14, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v15, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v16, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v17, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz16_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz31_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte_d16_hi v7, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_store_byte_d16_hi v8, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_store_byte_d16_hi v9, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_store_byte_d16_hi v10, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 24, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 8, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 24, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 8, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 24, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 8, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 24, v10
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 8, v10
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz32_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz16_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz31_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte_d16_hi v7, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_store_byte_d16_hi v8, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_store_byte_d16_hi v9, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_store_byte_d16_hi v10, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 24, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 8, v7
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 24, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 8, v8
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 24, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 8, v9
+; CHECK-NEXT: v_lshrrev_b32_e32 v7, 24, v10
+; CHECK-NEXT: v_lshrrev_b32_e32 v8, 8, v10
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p4_sz32_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p4_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz16_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz31_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x11
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v18, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v19, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_clause 0xc
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz32_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x11
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v18, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v19, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_clause 0xd
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:31
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz16_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_ushort v2, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v1, v1, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v2, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz31_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v10, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v11, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v12, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v13, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v14, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v15, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v16, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v1, v1, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz32_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ushort v2, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v10, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v11, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v12, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v13, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v14, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v15, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v16, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v1, v1, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_short v2, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz16_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz31_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_dword v16, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v17, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v18, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v19, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v20, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_dword v16, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_dword v17, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v18, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v19, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz32_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_dword v6, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz16_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz31_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x13
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_dword v16, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v17, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v18, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v19, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v20, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_dword v16, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_dword v17, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v18, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v19, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p5_p5_sz32_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memcpy_p5_p5_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_dword v6, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p0.p0.i64(ptr addrspace(0) noalias nocapture writeonly, ptr addrspace(0) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p0.p1.i64(ptr addrspace(0) noalias nocapture writeonly, ptr addrspace(1) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p0.p3.i64(ptr addrspace(0) noalias nocapture writeonly, ptr addrspace(3) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p0.p4.i64(ptr addrspace(0) noalias nocapture writeonly, ptr addrspace(4) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p0.p5.i64(ptr addrspace(0) noalias nocapture writeonly, ptr addrspace(5) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noalias nocapture writeonly, ptr addrspace(0) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noalias nocapture writeonly, ptr addrspace(1) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p1.p3.i64(ptr addrspace(1) noalias nocapture writeonly, ptr addrspace(3) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noalias nocapture writeonly, ptr addrspace(4) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p1.p5.i64(ptr addrspace(1) noalias nocapture writeonly, ptr addrspace(5) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) noalias nocapture writeonly, ptr addrspace(0) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) noalias nocapture writeonly, ptr addrspace(1) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noalias nocapture writeonly, ptr addrspace(3) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p3.p4.i64(ptr addrspace(3) noalias nocapture writeonly, ptr addrspace(4) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p3.p5.i64(ptr addrspace(3) noalias nocapture writeonly, ptr addrspace(5) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p5.p0.i64(ptr addrspace(5) noalias nocapture writeonly, ptr addrspace(0) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) noalias nocapture writeonly, ptr addrspace(1) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) noalias nocapture writeonly, ptr addrspace(3) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noalias nocapture writeonly, ptr addrspace(4) noalias nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) noalias nocapture writeonly, ptr addrspace(5) noalias nocapture readonly, i64, i1 immarg) #2
+
+attributes #0 = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
+
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-scalar-load.ll b/llvm/test/CodeGen/AMDGPU/memcpy-scalar-load.ll
new file mode 100644
index 0000000000000..f60728c16a3ae
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-scalar-load.ll
@@ -0,0 +1,71 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 %s -o - | FileCheck %s
+
+; Testing codegen for memcpy with scalar reads.
+
+
+define void @memcpy_p1_p4_sz16_align_4_4(ptr addrspace(1) align 4 %dst, ptr addrspace(4) align 4 readonly inreg %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz16_align_4_4:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v2, s4
+; CHECK-NEXT: v_mov_b32_e32 v3, s5
+; CHECK-NEXT: v_mov_b32_e32 v4, s6
+; CHECK-NEXT: v_mov_b32_e32 v5, s7
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 4 %dst, ptr addrspace(4) noundef nonnull align 4 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz31_align_4_4(ptr addrspace(1) align 4 %dst, ptr addrspace(4) align 4 readonly inreg %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz31_align_4_4:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; CHECK-NEXT: v_mov_b32_e32 v6, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v2, s8
+; CHECK-NEXT: v_mov_b32_e32 v3, s9
+; CHECK-NEXT: v_mov_b32_e32 v4, s10
+; CHECK-NEXT: v_mov_b32_e32 v5, s11
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v6, s[4:5] offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:15
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 4 %dst, ptr addrspace(4) noundef nonnull align 4 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memcpy_p1_p4_sz32_align_4_4(ptr addrspace(1) align 4 %dst, ptr addrspace(4) align 4 readonly inreg %src) {
+; CHECK-LABEL: memcpy_p1_p4_sz32_align_4_4:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v2, s4
+; CHECK-NEXT: v_mov_b32_e32 v3, s5
+; CHECK-NEXT: v_mov_b32_e32 v4, s6
+; CHECK-NEXT: v_mov_b32_e32 v5, s7
+; CHECK-NEXT: v_mov_b32_e32 v6, s8
+; CHECK-NEXT: v_mov_b32_e32 v7, s9
+; CHECK-NEXT: v_mov_b32_e32 v8, s10
+; CHECK-NEXT: v_mov_b32_e32 v9, s11
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noundef nonnull align 4 %dst, ptr addrspace(4) noundef nonnull align 4 %src, i64 32, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noalias nocapture writeonly, ptr addrspace(4) noalias nocapture readonly, i64, i1 immarg) #2
+
+attributes #0 = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
+
diff --git a/llvm/test/CodeGen/AMDGPU/memmove-param-combinations.ll b/llvm/test/CodeGen/AMDGPU/memmove-param-combinations.ll
new file mode 100644
index 0000000000000..cc5256620bfe0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/memmove-param-combinations.ll
@@ -0,0 +1,8698 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 %s -o - | FileCheck %s
+
+; Testing codegen for memmove with vector operands for all combinations of the following parameters:
+; destination address space: 0, 1, 3, 5
+; source address space: 0, 1, 3, 4, 5
+; alignment: 1, 2, 8, 16
+; sizes: 16, 31, 32
+
+
+define void @memmove_p0_p0_sz16_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p0_sz31_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:17
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v19, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v20, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v21, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v22, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v23, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v24, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ubyte v25, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v26, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v27, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v28, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v29, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v30, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v31, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v32, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ubyte v33, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(30) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v26 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v27 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v28 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v29 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v30 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v31 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v32 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v33 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p0_sz32_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:31
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:17
+; CHECK-NEXT: flat_load_ubyte v19, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v20, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v21, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v22, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v23, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v24, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v25, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ubyte v26, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v27, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v28, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v29, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v30, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v31, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v32, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v33, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ubyte v34, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(31) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(30) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v26 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v27 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v28 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v29 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v30 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v31 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v32 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v33 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v34 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p0_sz16_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: flat_load_ushort v4, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v5, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ushort v7, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v8, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v9, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p0_sz31_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ushort v5, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ushort v7, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ushort v8, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ushort v9, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ushort v11, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ushort v12, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v13, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ushort v14, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ushort v15, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v16, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v17, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v18, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p0_sz32_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ushort v4, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ushort v5, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ushort v7, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ushort v8, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ushort v9, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ushort v11, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ushort v12, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v13, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ushort v14, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ushort v15, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v16, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v17, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v18, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p0_sz16_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p0_sz31_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x4
+; CHECK-NEXT: flat_load_dword v8, v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3]
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:28
+; CHECK-NEXT: flat_load_dword v9, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: flat_store_dword v[0:1], v8 offset:16
+; CHECK-NEXT: flat_load_dword v8, v[2:3] offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:30
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(2)
+; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[8:9] offset:20
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p0_sz32_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p0_sz16_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p0_sz31_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:30
+; CHECK-NEXT: flat_load_dword v9, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:28
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: flat_store_dword v[0:1], v9 offset:16
+; CHECK-NEXT: flat_load_dword v9, v[2:3] offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_store_dword v[0:1], v9 offset:20
+; CHECK-NEXT: flat_load_dword v2, v[2:3] offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:30
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(2)
+; CHECK-NEXT: flat_store_dword v[0:1], v2 offset:24
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p0_sz32_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p0_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p0.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz16_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:15
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:13
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:11
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:9
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:7
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:5
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:3
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:1
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz31_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:29
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:27
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:25
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:23
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:21
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:19
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:17
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ubyte v19, v[2:3], off offset:15
+; CHECK-NEXT: global_load_ubyte v20, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ubyte v21, v[2:3], off offset:13
+; CHECK-NEXT: global_load_ubyte v22, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ubyte v23, v[2:3], off offset:11
+; CHECK-NEXT: global_load_ubyte v24, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ubyte v25, v[2:3], off offset:9
+; CHECK-NEXT: global_load_ubyte v26, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ubyte v27, v[2:3], off offset:7
+; CHECK-NEXT: global_load_ubyte v28, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ubyte v29, v[2:3], off offset:5
+; CHECK-NEXT: global_load_ubyte v30, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ubyte v31, v[2:3], off offset:3
+; CHECK-NEXT: global_load_ubyte v32, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ubyte v33, v[2:3], off offset:1
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v26 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v27 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v28 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v29 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v30 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v31 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v32 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v33 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz32_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:31
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:29
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:27
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:25
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:23
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:21
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:19
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:17
+; CHECK-NEXT: global_load_ubyte v19, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ubyte v20, v[2:3], off offset:15
+; CHECK-NEXT: global_load_ubyte v21, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ubyte v22, v[2:3], off offset:13
+; CHECK-NEXT: global_load_ubyte v23, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ubyte v24, v[2:3], off offset:11
+; CHECK-NEXT: global_load_ubyte v25, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ubyte v26, v[2:3], off offset:9
+; CHECK-NEXT: global_load_ubyte v27, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ubyte v28, v[2:3], off offset:7
+; CHECK-NEXT: global_load_ubyte v29, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ubyte v30, v[2:3], off offset:5
+; CHECK-NEXT: global_load_ubyte v31, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ubyte v32, v[2:3], off offset:3
+; CHECK-NEXT: global_load_ubyte v33, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ubyte v34, v[2:3], off offset:1
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v26 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v27 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v28 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v29 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v30 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v31 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v32 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v33 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v34 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz16_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ushort v5, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ushort v6, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ushort v7, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ushort v8, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ushort v9, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz31_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v5, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ushort v6, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ushort v7, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ushort v8, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ushort v9, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ushort v11, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v12, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ushort v13, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ushort v14, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ushort v15, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ushort v16, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ushort v17, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ushort v18, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz32_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v5, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ushort v6, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ushort v7, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ushort v8, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ushort v9, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ushort v11, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v12, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ushort v13, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ushort v14, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ushort v15, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ushort v16, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ushort v17, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ushort v18, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz16_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz31_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x4
+; CHECK-NEXT: global_load_dword v8, v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_dword v[0:1], v8 offset:16
+; CHECK-NEXT: global_load_dword v8, v[2:3], off offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:30
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[8:9] offset:20
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz32_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz16_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz31_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:30
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_dword v[0:1], v9 offset:16
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dword v[0:1], v9 offset:20
+; CHECK-NEXT: global_load_dword v2, v[2:3], off offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:30
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dword v[0:1], v2 offset:24
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p1_sz32_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p1_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p1.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz16_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:15
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:14
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:13
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:12
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:11
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:10
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:9
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:8
+; CHECK-NEXT: ds_read_u8 v11, v2 offset:7
+; CHECK-NEXT: ds_read_u8 v12, v2 offset:6
+; CHECK-NEXT: ds_read_u8 v13, v2 offset:5
+; CHECK-NEXT: ds_read_u8 v14, v2 offset:4
+; CHECK-NEXT: ds_read_u8 v15, v2 offset:3
+; CHECK-NEXT: ds_read_u8 v16, v2 offset:2
+; CHECK-NEXT: ds_read_u8 v17, v2 offset:1
+; CHECK-NEXT: ds_read_u8 v2, v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:13
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:11
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:9
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:5
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz31_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:24
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:25
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:26
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:27
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:28
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:29
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:30
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:16
+; CHECK-NEXT: ds_read_u8 v11, v2 offset:17
+; CHECK-NEXT: ds_read_u8 v12, v2 offset:18
+; CHECK-NEXT: ds_read_u8 v13, v2 offset:19
+; CHECK-NEXT: ds_read_u8 v14, v2 offset:20
+; CHECK-NEXT: ds_read_u8 v15, v2 offset:21
+; CHECK-NEXT: ds_read_u8 v16, v2 offset:22
+; CHECK-NEXT: ds_read_u8 v17, v2 offset:23
+; CHECK-NEXT: ds_read_u8 v18, v2 offset:8
+; CHECK-NEXT: ds_read_u8 v19, v2 offset:9
+; CHECK-NEXT: ds_read_u8 v20, v2 offset:10
+; CHECK-NEXT: ds_read_u8 v21, v2 offset:11
+; CHECK-NEXT: ds_read_u8 v22, v2 offset:12
+; CHECK-NEXT: ds_read_u8 v23, v2 offset:13
+; CHECK-NEXT: ds_read_u8 v24, v2 offset:14
+; CHECK-NEXT: ds_read_u8 v25, v2 offset:15
+; CHECK-NEXT: ds_read_u8 v26, v2
+; CHECK-NEXT: ds_read_u8 v27, v2 offset:1
+; CHECK-NEXT: ds_read_u8 v28, v2 offset:2
+; CHECK-NEXT: ds_read_u8 v29, v2 offset:3
+; CHECK-NEXT: ds_read_u8 v30, v2 offset:4
+; CHECK-NEXT: ds_read_u8 v31, v2 offset:5
+; CHECK-NEXT: ds_read_u8 v32, v2 offset:6
+; CHECK-NEXT: ds_read_u8 v2, v2 offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(24)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:28
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:27
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(23)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:23
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:21
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:20
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:19
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(23)
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:15
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:14
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:13
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:12
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:11
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:10
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:9
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(23)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:7
+; CHECK-NEXT: flat_store_byte v[0:1], v32 offset:6
+; CHECK-NEXT: flat_store_byte v[0:1], v31 offset:5
+; CHECK-NEXT: flat_store_byte v[0:1], v30 offset:4
+; CHECK-NEXT: flat_store_byte v[0:1], v29 offset:3
+; CHECK-NEXT: flat_store_byte v[0:1], v28 offset:2
+; CHECK-NEXT: flat_store_byte v[0:1], v27 offset:1
+; CHECK-NEXT: flat_store_byte v[0:1], v26
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz32_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:24
+; CHECK-NEXT: ds_read_u8 v4, v2 offset:25
+; CHECK-NEXT: ds_read_u8 v5, v2 offset:26
+; CHECK-NEXT: ds_read_u8 v6, v2 offset:27
+; CHECK-NEXT: ds_read_u8 v7, v2 offset:28
+; CHECK-NEXT: ds_read_u8 v8, v2 offset:29
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:30
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:31
+; CHECK-NEXT: ds_read_u8 v11, v2 offset:16
+; CHECK-NEXT: ds_read_u8 v12, v2 offset:17
+; CHECK-NEXT: ds_read_u8 v13, v2 offset:18
+; CHECK-NEXT: ds_read_u8 v14, v2 offset:19
+; CHECK-NEXT: ds_read_u8 v15, v2 offset:20
+; CHECK-NEXT: ds_read_u8 v16, v2 offset:21
+; CHECK-NEXT: ds_read_u8 v17, v2 offset:22
+; CHECK-NEXT: ds_read_u8 v18, v2 offset:23
+; CHECK-NEXT: ds_read_u8 v19, v2 offset:8
+; CHECK-NEXT: ds_read_u8 v20, v2 offset:9
+; CHECK-NEXT: ds_read_u8 v21, v2 offset:10
+; CHECK-NEXT: ds_read_u8 v22, v2 offset:11
+; CHECK-NEXT: ds_read_u8 v23, v2 offset:12
+; CHECK-NEXT: ds_read_u8 v24, v2 offset:13
+; CHECK-NEXT: ds_read_u8 v25, v2 offset:14
+; CHECK-NEXT: ds_read_u8 v26, v2 offset:15
+; CHECK-NEXT: ds_read_u8 v27, v2
+; CHECK-NEXT: ds_read_u8 v28, v2 offset:1
+; CHECK-NEXT: ds_read_u8 v29, v2 offset:2
+; CHECK-NEXT: ds_read_u8 v30, v2 offset:3
+; CHECK-NEXT: ds_read_u8 v31, v2 offset:4
+; CHECK-NEXT: ds_read_u8 v32, v2 offset:5
+; CHECK-NEXT: ds_read_u8 v33, v2 offset:6
+; CHECK-NEXT: ds_read_u8 v2, v2 offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(24)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:31
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:30
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:29
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:28
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:27
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:26
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:25
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(24)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:23
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:22
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:21
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:20
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:19
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:18
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:17
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(24)
+; CHECK-NEXT: flat_store_byte v[0:1], v26 offset:15
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:14
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:13
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:12
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:11
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:10
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:9
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(24)
+; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:7
+; CHECK-NEXT: flat_store_byte v[0:1], v33 offset:6
+; CHECK-NEXT: flat_store_byte v[0:1], v32 offset:5
+; CHECK-NEXT: flat_store_byte v[0:1], v31 offset:4
+; CHECK-NEXT: flat_store_byte v[0:1], v30 offset:3
+; CHECK-NEXT: flat_store_byte v[0:1], v29 offset:2
+; CHECK-NEXT: flat_store_byte v[0:1], v28 offset:1
+; CHECK-NEXT: flat_store_byte v[0:1], v27
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz16_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u16 v3, v2 offset:14
+; CHECK-NEXT: ds_read_u16 v4, v2 offset:12
+; CHECK-NEXT: ds_read_u16 v5, v2 offset:10
+; CHECK-NEXT: ds_read_u16 v6, v2 offset:8
+; CHECK-NEXT: ds_read_u16 v7, v2 offset:6
+; CHECK-NEXT: ds_read_u16 v8, v2 offset:4
+; CHECK-NEXT: ds_read_u16 v9, v2 offset:2
+; CHECK-NEXT: ds_read_u16 v2, v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v3 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz31_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v3, v2 offset:30
+; CHECK-NEXT: ds_read_u16 v4, v2 offset:28
+; CHECK-NEXT: ds_read_u16 v5, v2 offset:26
+; CHECK-NEXT: ds_read_u16 v6, v2 offset:24
+; CHECK-NEXT: ds_read_u16 v7, v2 offset:22
+; CHECK-NEXT: ds_read_u16 v8, v2 offset:20
+; CHECK-NEXT: ds_read_u16 v9, v2 offset:18
+; CHECK-NEXT: ds_read_u16 v10, v2 offset:16
+; CHECK-NEXT: ds_read_u16 v11, v2 offset:14
+; CHECK-NEXT: ds_read_u16 v12, v2 offset:12
+; CHECK-NEXT: ds_read_u16 v13, v2 offset:10
+; CHECK-NEXT: ds_read_u16 v14, v2 offset:8
+; CHECK-NEXT: ds_read_u16 v15, v2 offset:6
+; CHECK-NEXT: ds_read_u16 v16, v2 offset:4
+; CHECK-NEXT: ds_read_u16 v17, v2 offset:2
+; CHECK-NEXT: ds_read_u16 v2, v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:26
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:22
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:18
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz32_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u16 v3, v2 offset:30
+; CHECK-NEXT: ds_read_u16 v4, v2 offset:28
+; CHECK-NEXT: ds_read_u16 v5, v2 offset:26
+; CHECK-NEXT: ds_read_u16 v6, v2 offset:24
+; CHECK-NEXT: ds_read_u16 v7, v2 offset:22
+; CHECK-NEXT: ds_read_u16 v8, v2 offset:20
+; CHECK-NEXT: ds_read_u16 v9, v2 offset:18
+; CHECK-NEXT: ds_read_u16 v10, v2 offset:16
+; CHECK-NEXT: ds_read_u16 v11, v2 offset:14
+; CHECK-NEXT: ds_read_u16 v12, v2 offset:12
+; CHECK-NEXT: ds_read_u16 v13, v2 offset:10
+; CHECK-NEXT: ds_read_u16 v14, v2 offset:8
+; CHECK-NEXT: ds_read_u16 v15, v2 offset:6
+; CHECK-NEXT: ds_read_u16 v16, v2 offset:4
+; CHECK-NEXT: ds_read_u16 v17, v2 offset:2
+; CHECK-NEXT: ds_read_u16 v2, v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v3 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:26
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:22
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:18
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz16_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v2 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz31_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b32 v[6:7], v2 offset0:4 offset1:5
+; CHECK-NEXT: ds_read_b32 v8, v2 offset:24
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:30
+; CHECK-NEXT: ds_read_u16 v10, v2 offset:28
+; CHECK-NEXT: ds_read2_b64 v[2:5], v2 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: flat_store_dwordx3 v[0:1], v[6:8] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz32_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[3:6], v2 offset0:2 offset1:3
+; CHECK-NEXT: ds_read2_b64 v[7:10], v2 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[7:10]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz16_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz31_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b32 v[6:7], v2 offset0:4 offset1:5
+; CHECK-NEXT: ds_read_b32 v8, v2 offset:24
+; CHECK-NEXT: ds_read_u8 v9, v2 offset:30
+; CHECK-NEXT: ds_read_u16 v10, v2 offset:28
+; CHECK-NEXT: ds_read_b128 v[2:5], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: flat_store_dwordx3 v[0:1], v[6:8] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p3_sz32_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p3_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[3:6], v2 offset:16
+; CHECK-NEXT: ds_read_b128 v[7:10], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[7:10]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p3.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz16_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:15
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:13
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:11
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:9
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:7
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:5
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:3
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:1
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz31_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:29
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:27
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:25
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:23
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:21
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:19
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:17
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ubyte v19, v[2:3], off offset:15
+; CHECK-NEXT: global_load_ubyte v20, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ubyte v21, v[2:3], off offset:13
+; CHECK-NEXT: global_load_ubyte v22, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ubyte v23, v[2:3], off offset:11
+; CHECK-NEXT: global_load_ubyte v24, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ubyte v25, v[2:3], off offset:9
+; CHECK-NEXT: global_load_ubyte v26, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ubyte v27, v[2:3], off offset:7
+; CHECK-NEXT: global_load_ubyte v28, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ubyte v29, v[2:3], off offset:5
+; CHECK-NEXT: global_load_ubyte v30, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ubyte v31, v[2:3], off offset:3
+; CHECK-NEXT: global_load_ubyte v32, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ubyte v33, v[2:3], off offset:1
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v26 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v27 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v28 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v29 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v30 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v31 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v32 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v33 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz32_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:31
+; CHECK-NEXT: global_load_ubyte v5, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ubyte v6, v[2:3], off offset:29
+; CHECK-NEXT: global_load_ubyte v7, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:27
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ubyte v10, v[2:3], off offset:25
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v12, v[2:3], off offset:23
+; CHECK-NEXT: global_load_ubyte v13, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ubyte v14, v[2:3], off offset:21
+; CHECK-NEXT: global_load_ubyte v15, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ubyte v16, v[2:3], off offset:19
+; CHECK-NEXT: global_load_ubyte v17, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ubyte v18, v[2:3], off offset:17
+; CHECK-NEXT: global_load_ubyte v19, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ubyte v20, v[2:3], off offset:15
+; CHECK-NEXT: global_load_ubyte v21, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ubyte v22, v[2:3], off offset:13
+; CHECK-NEXT: global_load_ubyte v23, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ubyte v24, v[2:3], off offset:11
+; CHECK-NEXT: global_load_ubyte v25, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ubyte v26, v[2:3], off offset:9
+; CHECK-NEXT: global_load_ubyte v27, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ubyte v28, v[2:3], off offset:7
+; CHECK-NEXT: global_load_ubyte v29, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ubyte v30, v[2:3], off offset:5
+; CHECK-NEXT: global_load_ubyte v31, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ubyte v32, v[2:3], off offset:3
+; CHECK-NEXT: global_load_ubyte v33, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ubyte v34, v[2:3], off offset:1
+; CHECK-NEXT: global_load_ubyte v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v26 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v27 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v28 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v29 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v30 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v31 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v32 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v33 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v34 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz16_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ushort v5, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ushort v6, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ushort v7, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ushort v8, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ushort v9, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz31_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v5, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ushort v6, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ushort v7, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ushort v8, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ushort v9, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ushort v11, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v12, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ushort v13, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ushort v14, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ushort v15, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ushort v16, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ushort v17, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ushort v18, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz32_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ushort v4, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v5, v[2:3], off offset:28
+; CHECK-NEXT: global_load_ushort v6, v[2:3], off offset:26
+; CHECK-NEXT: global_load_ushort v7, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ushort v8, v[2:3], off offset:22
+; CHECK-NEXT: global_load_ushort v9, v[2:3], off offset:20
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:18
+; CHECK-NEXT: global_load_ushort v11, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v12, v[2:3], off offset:14
+; CHECK-NEXT: global_load_ushort v13, v[2:3], off offset:12
+; CHECK-NEXT: global_load_ushort v14, v[2:3], off offset:10
+; CHECK-NEXT: global_load_ushort v15, v[2:3], off offset:8
+; CHECK-NEXT: global_load_ushort v16, v[2:3], off offset:6
+; CHECK-NEXT: global_load_ushort v17, v[2:3], off offset:4
+; CHECK-NEXT: global_load_ushort v18, v[2:3], off offset:2
+; CHECK-NEXT: global_load_ushort v2, v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v18 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz16_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz31_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x4
+; CHECK-NEXT: global_load_dword v8, v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_dword v[0:1], v8 offset:16
+; CHECK-NEXT: global_load_dword v8, v[2:3], off offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:30
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[8:9] offset:20
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz32_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz16_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz31_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:30
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_dword v[0:1], v9 offset:16
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dword v[0:1], v9 offset:20
+; CHECK-NEXT: global_load_dword v2, v[2:3], off offset:24
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:30
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dword v[0:1], v2 offset:24
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p4_sz32_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p4_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[4:7] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p4.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz16_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v2, v2, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz31_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v26, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v27, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v31, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v2, v2, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v26 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v27 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v28 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v29 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v30 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v31 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v32 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz32_align_1_1(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:31
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v26, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v27, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v31, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v2, v2, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(31)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: flat_store_byte v[0:1], v4 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: flat_store_byte v[0:1], v5 offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: flat_store_byte v[0:1], v6 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: flat_store_byte v[0:1], v7 offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: flat_store_byte v[0:1], v8 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: flat_store_byte v[0:1], v9 offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: flat_store_byte v[0:1], v10 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: flat_store_byte v[0:1], v12 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: flat_store_byte v[0:1], v13 offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: flat_store_byte v[0:1], v14 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: flat_store_byte v[0:1], v15 offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: flat_store_byte v[0:1], v16 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: flat_store_byte v[0:1], v17 offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: flat_store_byte v[0:1], v18 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v19 offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_byte v[0:1], v20 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_byte v[0:1], v21 offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_byte v[0:1], v22 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_byte v[0:1], v23 offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_byte v[0:1], v24 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_byte v[0:1], v25 offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_byte v[0:1], v26 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_byte v[0:1], v27 offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_byte v[0:1], v28 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_byte v[0:1], v29 offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v30 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_byte v[0:1], v31 offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_byte v[0:1], v32 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_byte v[0:1], v33 offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz16_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_ushort v3, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v8, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v2, v2, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v3 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz31_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v11, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v12, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v13, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v14, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v15, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v16, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v17, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v2, v2, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_byte v[0:1], v3 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz32_align_2_2(ptr addrspace(0) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ushort v3, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v11, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v12, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v13, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v14, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v15, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v16, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v17, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v2, v2, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: flat_store_short v[0:1], v3 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: flat_store_short v[0:1], v4 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: flat_store_short v[0:1], v5 offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: flat_store_short v[0:1], v6 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: flat_store_short v[0:1], v7 offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: flat_store_short v[0:1], v8 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: flat_store_short v[0:1], v9 offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: flat_store_short v[0:1], v11 offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_short v[0:1], v12 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: flat_store_short v[0:1], v13 offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_short v[0:1], v14 offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: flat_store_short v[0:1], v15 offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: flat_store_short v[0:1], v16 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_store_short v[0:1], v17 offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_short v[0:1], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz16_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz31_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x8
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_dwordx3 v[0:1], v[7:9] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:30
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz32_align_8_8(ptr addrspace(0) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[7:10] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz16_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz31_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x8
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: flat_store_dwordx3 v[0:1], v[7:9] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_byte v[0:1], v11 offset:30
+; CHECK-NEXT: flat_store_short v[0:1], v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p0_p5_sz32_align_16_16(ptr addrspace(0) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p0_p5_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[3:6]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[7:10] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p0.p5.i64(ptr addrspace(0) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz16_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3]
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3] offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v3, v4, 8, v10
+; CHECK-NEXT: v_lshl_or_b32 v4, v6, 8, v5
+; CHECK-NEXT: v_lshl_or_b32 v5, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v6, v11, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v8, v12, 8, v13
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v7, v14, 8, v15
+; CHECK-NEXT: v_lshl_or_b32 v3, v4, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v9, v16, 8, v17
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v10, v18, 8, v2
+; CHECK-NEXT: v_lshl_or_b32 v2, v6, 16, v5
+; CHECK-NEXT: v_lshl_or_b32 v5, v8, 16, v7
+; CHECK-NEXT: v_lshl_or_b32 v4, v10, 16, v9
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz31_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v19, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v20, v[2:3] offset:17
+; CHECK-NEXT: flat_load_ubyte v21, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v22, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v23, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ubyte v24, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v25, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v26, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v27, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v28, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v29, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v30, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v31, v[2:3]
+; CHECK-NEXT: flat_load_ubyte v32, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v33, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3] offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(29) lgkmcnt(29)
+; CHECK-NEXT: v_lshl_or_b32 v4, v4, 8, v5
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(25)
+; CHECK-NEXT: v_lshl_or_b32 v9, v8, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(23) lgkmcnt(23)
+; CHECK-NEXT: v_lshl_or_b32 v10, v10, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(22)
+; CHECK-NEXT: v_lshlrev_b16 v12, 8, v12
+; CHECK-NEXT: s_waitcnt vmcnt(19) lgkmcnt(19)
+; CHECK-NEXT: v_lshl_or_b32 v14, v14, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(17) lgkmcnt(17)
+; CHECK-NEXT: v_lshl_or_b32 v3, v16, 8, v17
+; CHECK-NEXT: v_lshl_or_b32 v16, v6, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: v_lshl_or_b32 v15, v18, 8, v19
+; CHECK-NEXT: v_lshl_or_b32 v7, v9, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: v_lshl_or_b32 v11, v20, 8, v21
+; CHECK-NEXT: v_lshl_or_b32 v8, v14, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: v_lshl_or_b32 v18, v22, 8, v23
+; CHECK-NEXT: v_lshl_or_b32 v5, v4, 16, v16
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v17, v24, 8, v25
+; CHECK-NEXT: v_lshl_or_b32 v6, v15, 16, v11
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: v_lshl_or_b32 v20, v26, 8, v27
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: v_lshl_or_b32 v19, v28, 8, v29
+; CHECK-NEXT: v_lshl_or_b32 v4, v18, 16, v17
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: v_lshl_or_b32 v21, v30, 8, v31
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v22, v32, 8, v33
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v12, v12, v2
+; CHECK-NEXT: v_lshl_or_b32 v3, v20, 16, v19
+; CHECK-NEXT: v_lshl_or_b32 v2, v22, 16, v21
+; CHECK-NEXT: global_store_byte v[0:1], v13, off offset:30
+; CHECK-NEXT: global_store_short v[0:1], v12, off offset:28
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[6:8], off offset:16
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz32_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: flat_load_ubyte v4, v[2:3] offset:29
+; CHECK-NEXT: flat_load_ubyte v5, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ubyte v6, v[2:3] offset:31
+; CHECK-NEXT: flat_load_ubyte v7, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:25
+; CHECK-NEXT: flat_load_ubyte v9, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ubyte v10, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:27
+; CHECK-NEXT: flat_load_ubyte v12, v[2:3] offset:15
+; CHECK-NEXT: flat_load_ubyte v13, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ubyte v14, v[2:3] offset:13
+; CHECK-NEXT: flat_load_ubyte v15, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ubyte v16, v[2:3] offset:23
+; CHECK-NEXT: flat_load_ubyte v17, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ubyte v18, v[2:3] offset:21
+; CHECK-NEXT: flat_load_ubyte v19, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ubyte v20, v[2:3] offset:19
+; CHECK-NEXT: flat_load_ubyte v21, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ubyte v22, v[2:3] offset:17
+; CHECK-NEXT: flat_load_ubyte v23, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ubyte v24, v[2:3] offset:11
+; CHECK-NEXT: flat_load_ubyte v25, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ubyte v26, v[2:3] offset:9
+; CHECK-NEXT: flat_load_ubyte v27, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ubyte v28, v[2:3] offset:7
+; CHECK-NEXT: flat_load_ubyte v29, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ubyte v30, v[2:3] offset:5
+; CHECK-NEXT: flat_load_ubyte v31, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ubyte v32, v[2:3] offset:1
+; CHECK-NEXT: flat_load_ubyte v33, v[2:3]
+; CHECK-NEXT: flat_load_ubyte v34, v[2:3] offset:3
+; CHECK-NEXT: flat_load_ubyte v2, v[2:3] offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(25)
+; CHECK-NEXT: v_lshl_or_b32 v3, v4, 8, v10
+; CHECK-NEXT: v_lshl_or_b32 v4, v6, 8, v5
+; CHECK-NEXT: v_lshl_or_b32 v6, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(24) lgkmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v7, v11, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v12, v12, 8, v13
+; CHECK-NEXT: s_waitcnt vmcnt(20) lgkmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v14, v14, 8, v15
+; CHECK-NEXT: v_lshl_or_b32 v5, v4, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(18) lgkmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v9, v16, 8, v17
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v8, v18, 8, v19
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v11, v20, 8, v21
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v10, v22, 8, v23
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v15, v24, 8, v25
+; CHECK-NEXT: v_lshl_or_b32 v9, v12, 16, v14
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v13, v26, 8, v27
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v17, v28, 8, v29
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v16, v30, 8, v31
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 16, v13
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v18, v32, 8, v33
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v19, v34, 8, v2
+; CHECK-NEXT: v_lshl_or_b32 v2, v11, 16, v10
+; CHECK-NEXT: v_lshl_or_b32 v7, v17, 16, v16
+; CHECK-NEXT: v_lshl_or_b32 v6, v19, 16, v18
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:16
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz16_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: flat_load_ushort v4, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v5, v[2:3]
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ushort v7, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v8, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v9, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v11, v[2:3] offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: v_lshl_or_b32 v3, v4, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v2, v9, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v5, v10, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v7
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz31_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ushort v4, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v5, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ushort v9, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v11, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ushort v7, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ushort v12, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ushort v13, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ushort v14, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ushort v15, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ushort v16, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v17, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v18, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v19, v[2:3]
+; CHECK-NEXT: flat_load_ubyte v20, v[2:3] offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v5, v4, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v4, v9, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v8, v6, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: v_lshl_or_b32 v7, v12, 16, v13
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: v_lshl_or_b32 v6, v14, 16, v15
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: v_lshl_or_b32 v3, v16, 16, v17
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v2, v18, 16, v19
+; CHECK-NEXT: global_store_short v[0:1], v11, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_byte v[0:1], v20, off offset:30
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[6:8], off offset:16
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz32_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ushort v4, v[2:3] offset:30
+; CHECK-NEXT: flat_load_ushort v5, v[2:3] offset:28
+; CHECK-NEXT: flat_load_ushort v6, v[2:3] offset:26
+; CHECK-NEXT: flat_load_ushort v7, v[2:3] offset:14
+; CHECK-NEXT: flat_load_ushort v8, v[2:3] offset:12
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:10
+; CHECK-NEXT: flat_load_ushort v11, v[2:3] offset:8
+; CHECK-NEXT: flat_load_ushort v9, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ushort v12, v[2:3] offset:22
+; CHECK-NEXT: flat_load_ushort v13, v[2:3] offset:20
+; CHECK-NEXT: flat_load_ushort v14, v[2:3] offset:18
+; CHECK-NEXT: flat_load_ushort v15, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ushort v16, v[2:3] offset:6
+; CHECK-NEXT: flat_load_ushort v17, v[2:3] offset:4
+; CHECK-NEXT: flat_load_ushort v18, v[2:3] offset:2
+; CHECK-NEXT: flat_load_ushort v19, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v5, v4, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v4, v6, 16, v9
+; CHECK-NEXT: v_lshl_or_b32 v9, v7, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v3, v12, 16, v13
+; CHECK-NEXT: v_lshl_or_b32 v8, v10, 16, v11
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v2, v14, 16, v15
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v7, v16, 16, v17
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v6, v18, 16, v19
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:16
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz16_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz31_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x4
+; CHECK-NEXT: flat_load_dword v8, v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3]
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:28
+; CHECK-NEXT: flat_load_dword v9, v[2:3] offset:24
+; CHECK-NEXT: flat_load_ubyte v11, v[2:3] offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: global_store_dword v[0:1], v8, off offset:16
+; CHECK-NEXT: flat_load_dword v8, v[2:3] offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: global_store_byte v[0:1], v11, off offset:30
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[8:9], off offset:20
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz32_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz16_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[2:5], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz31_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: flat_load_ubyte v8, v[2:3] offset:30
+; CHECK-NEXT: flat_load_dword v9, v[2:3] offset:16
+; CHECK-NEXT: flat_load_ushort v10, v[2:3] offset:28
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: global_store_dword v[0:1], v9, off offset:16
+; CHECK-NEXT: flat_load_dword v9, v[2:3] offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dword v[0:1], v9, off offset:20
+; CHECK-NEXT: flat_load_dword v2, v[2:3] offset:24
+; CHECK-NEXT: global_store_byte v[0:1], v8, off offset:30
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dword v[0:1], v2, off offset:24
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p0_sz32_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p0_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p0.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz16_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz31_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dwordx3 v[6:8], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_byte v[0:1], v9, off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[6:8], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz32_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz16_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz31_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dwordx3 v[6:8], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_byte v[0:1], v9, off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[6:8], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz32_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz16_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz31_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x4
+; CHECK-NEXT: global_load_dword v8, v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: global_store_dword v[0:1], v8, off offset:16
+; CHECK-NEXT: global_load_dword v8, v[2:3], off offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_byte v[0:1], v11, off offset:30
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[8:9], off offset:20
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz32_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz16_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz31_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:30
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: global_store_dword v[0:1], v9, off offset:16
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dword v[0:1], v9, off offset:20
+; CHECK-NEXT: global_load_dword v2, v[2:3], off offset:24
+; CHECK-NEXT: global_store_byte v[0:1], v8, off offset:30
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dword v[0:1], v2, off offset:24
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p1_sz32_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p1_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz16_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz31_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b64 v[7:8], v2
+; CHECK-NEXT: ds_read_b128 v[3:6], v2 offset:8
+; CHECK-NEXT: ds_read_b32 v9, v2 offset:24
+; CHECK-NEXT: ds_read_u16 v10, v2 offset:28
+; CHECK-NEXT: ds_read_u8 v2, v2 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[7:8], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: global_store_dword v[0:1], v9, off offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_byte v[0:1], v2, off offset:30
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz32_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[3:6], v2
+; CHECK-NEXT: ds_read_b128 v[7:10], v2 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz16_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz31_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b64 v[7:8], v2
+; CHECK-NEXT: ds_read_b128 v[3:6], v2 offset:8
+; CHECK-NEXT: ds_read_b32 v9, v2 offset:24
+; CHECK-NEXT: ds_read_u16 v10, v2 offset:28
+; CHECK-NEXT: ds_read_u8 v2, v2 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[7:8], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: global_store_dword v[0:1], v9, off offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_byte v[0:1], v2, off offset:30
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz32_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[3:6], v2
+; CHECK-NEXT: ds_read_b128 v[7:10], v2 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz16_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v2 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz31_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[3:6], v2 offset1:1
+; CHECK-NEXT: ds_read_b32 v7, v2 offset:16
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:30
+; CHECK-NEXT: ds_read2_b32 v[8:9], v2 offset0:5 offset1:6
+; CHECK-NEXT: ds_read_u16 v2, v2 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[7:9], off offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_short v[0:1], v2, off offset:28
+; CHECK-NEXT: global_store_byte v[0:1], v10, off offset:30
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz32_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[3:6], v2 offset1:1
+; CHECK-NEXT: ds_read2_b64 v[7:10], v2 offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz16_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz31_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[3:6], v2
+; CHECK-NEXT: ds_read_b32 v7, v2 offset:16
+; CHECK-NEXT: ds_read_u8 v10, v2 offset:30
+; CHECK-NEXT: ds_read2_b32 v[8:9], v2 offset0:5 offset1:6
+; CHECK-NEXT: ds_read_u16 v2, v2 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[7:9], off offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_short v[0:1], v2, off offset:28
+; CHECK-NEXT: global_store_byte v[0:1], v10, off offset:30
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p3_sz32_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p3_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[3:6], v2
+; CHECK-NEXT: ds_read_b128 v[7:10], v2 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p3.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz16_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz31_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dwordx3 v[6:8], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_byte v[0:1], v9, off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[6:8], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz32_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz16_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz31_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_ubyte v9, v[2:3], off offset:30
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dwordx3 v[6:8], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_byte v[0:1], v9, off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[6:8], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz32_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz16_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz31_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x4
+; CHECK-NEXT: global_load_dword v8, v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:24
+; CHECK-NEXT: global_load_ubyte v11, v[2:3], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: global_store_dword v[0:1], v8, off offset:16
+; CHECK-NEXT: global_load_dword v8, v[2:3], off offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_byte v[0:1], v11, off offset:30
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx2 v[0:1], v[8:9], off offset:20
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz32_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz16_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz31_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_ubyte v8, v[2:3], off offset:30
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:16
+; CHECK-NEXT: global_load_ushort v10, v[2:3], off offset:28
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: global_store_dword v[0:1], v9, off offset:16
+; CHECK-NEXT: global_load_dword v9, v[2:3], off offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dword v[0:1], v9, off offset:20
+; CHECK-NEXT: global_load_dword v2, v[2:3], off offset:24
+; CHECK-NEXT: global_store_byte v[0:1], v8, off offset:30
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dword v[0:1], v2, off offset:24
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz32_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p4_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[4:7], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[8:11], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz16_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v2, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v4, v4, 8, v3
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v5, v6, 8, v5
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v7, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v8, v10, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v6, v12, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v9, v14, 8, v13
+; CHECK-NEXT: v_lshl_or_b32 v3, v8, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v10, v16, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v11, v2, 8, v17
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v4
+; CHECK-NEXT: v_lshl_or_b32 v5, v9, 16, v6
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v10
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz31_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v26, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v27, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v31, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: v_lshl_or_b32 v2, v4, 8, v3
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: v_lshl_or_b32 v3, v6, 8, v5
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: v_lshlrev_b16 v4, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v7, v9, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v9, v11, 8, v10
+; CHECK-NEXT: v_lshl_or_b32 v8, v3, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v5, v13, 8, v12
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v6, v15, 8, v14
+; CHECK-NEXT: v_lshl_or_b32 v2, v9, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v12, v17, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v13, v19, 8, v18
+; CHECK-NEXT: v_lshl_or_b32 v3, v6, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v10, v21, 8, v20
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v11, v23, 8, v22
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: v_lshl_or_b32 v15, v26, 8, v25
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: v_lshl_or_b32 v14, v24, 8, v28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v16, v29, 8, v27
+; CHECK-NEXT: v_lshl_or_b32 v5, v11, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v17, v31, 8, v30
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: v_or_b32_e32 v18, v4, v32
+; CHECK-NEXT: v_lshl_or_b32 v4, v13, 16, v12
+; CHECK-NEXT: v_lshl_or_b32 v7, v15, 16, v14
+; CHECK-NEXT: v_lshl_or_b32 v6, v16, 16, v17
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_byte v[0:1], v33, off offset:30
+; CHECK-NEXT: global_store_short v[0:1], v18, off offset:28
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[6:8], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz32_align_1_1(ptr addrspace(1) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v26, v2, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v27, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:31
+; CHECK-NEXT: buffer_load_ubyte v31, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v2, v2, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: v_lshl_or_b32 v4, v4, 8, v3
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: v_lshl_or_b32 v5, v6, 8, v5
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: v_lshl_or_b32 v7, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v8, v10, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v10, v12, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v11, v14, 8, v13
+; CHECK-NEXT: v_lshl_or_b32 v3, v8, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v6, v16, 8, v15
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v9, v18, 8, v17
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: v_lshl_or_b32 v13, v21, 8, v20
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: v_lshl_or_b32 v15, v23, 8, v22
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v12, v19, 8, v25
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v14, v26, 8, v24
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v16, v28, 8, v27
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v17, v30, 8, v29
+; CHECK-NEXT: v_lshl_or_b32 v7, v13, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v18, v32, 8, v31
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v19, v2, 8, v33
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v4
+; CHECK-NEXT: v_lshl_or_b32 v5, v9, 16, v6
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v10
+; CHECK-NEXT: v_lshl_or_b32 v6, v14, 16, v15
+; CHECK-NEXT: v_lshl_or_b32 v9, v17, 16, v16
+; CHECK-NEXT: v_lshl_or_b32 v8, v19, 16, v18
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz16_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v3, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v8, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v3, v6, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v5, v8, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v4, v10, 16, v9
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz31_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ushort v3, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v11, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v12, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v13, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v14, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v15, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v16, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v17, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v18, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v8, v4, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v2, v6, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v5, v13, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v7, v15, 16, v14
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v6, v17, 16, v16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: global_store_short v[0:1], v18, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_byte v[0:1], v19, off offset:30
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[6:8], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz32_align_2_2(ptr addrspace(1) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ushort v4, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v5, v2, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v3, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v6, v2, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v7, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v8, v2, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v9, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v11, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v12, v2, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v13, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v14, v2, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v15, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v16, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v17, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v18, v2, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v3, v6, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v4, v8, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v5, v10, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v6, v12, 16, v11
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v7, v14, 16, v13
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v9, v16, 16, v15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v8, v18, 16, v17
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz16_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz31_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x8
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_byte v[0:1], v11, off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[7:9], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz32_align_8_8(ptr addrspace(1) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz16_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz31_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x8
+; CHECK-NEXT: buffer_load_ushort v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: global_store_byte v[0:1], v11, off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[7:9], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p5_sz32_align_16_16(ptr addrspace(1) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p1_p5_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v7, v2, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v10, v2, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[3:6], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[7:10], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p5.i64(ptr addrspace(1) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz16_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2]
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2] offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 8, v9
+; CHECK-NEXT: v_lshl_or_b32 v3, v5, 8, v4
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v5, v10, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v7, v11, 8, v12
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v6, v13, 8, v14
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v9, v17, 8, v1
+; CHECK-NEXT: v_lshl_or_b32 v1, v5, 16, v4
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 16, v6
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v8
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz31_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v19, v[1:2] offset:17
+; CHECK-NEXT: flat_load_ubyte v20, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v21, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v22, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ubyte v23, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v24, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v25, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v26, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v27, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v28, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v29, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v30, v[1:2]
+; CHECK-NEXT: flat_load_ubyte v31, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v32, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2] offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(29) lgkmcnt(29)
+; CHECK-NEXT: v_lshl_or_b32 v3, v3, 8, v4
+; CHECK-NEXT: s_waitcnt vmcnt(27) lgkmcnt(27)
+; CHECK-NEXT: v_lshl_or_b32 v5, v5, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(25)
+; CHECK-NEXT: v_lshl_or_b32 v7, v7, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(23) lgkmcnt(23)
+; CHECK-NEXT: v_lshl_or_b32 v9, v9, 8, v10
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(22)
+; CHECK-NEXT: v_lshlrev_b16 v11, 8, v11
+; CHECK-NEXT: v_lshl_or_b32 v4, v3, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(19) lgkmcnt(19)
+; CHECK-NEXT: v_lshl_or_b32 v13, v13, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(17) lgkmcnt(17)
+; CHECK-NEXT: v_lshl_or_b32 v2, v15, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: v_lshl_or_b32 v10, v17, 8, v18
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: v_lshl_or_b32 v8, v19, 8, v20
+; CHECK-NEXT: v_lshl_or_b32 v13, v13, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: v_lshl_or_b32 v14, v21, 8, v22
+; CHECK-NEXT: v_lshl_or_b32 v2, v7, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v6, v23, 8, v24
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: v_lshl_or_b32 v16, v25, 8, v26
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: v_lshl_or_b32 v15, v27, 8, v28
+; CHECK-NEXT: v_lshl_or_b32 v3, v14, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: v_lshl_or_b32 v17, v29, 8, v30
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v18, v31, 8, v32
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v11, v11, v1
+; CHECK-NEXT: v_lshl_or_b32 v1, v10, 16, v8
+; CHECK-NEXT: v_lshl_or_b32 v6, v16, 16, v15
+; CHECK-NEXT: v_lshl_or_b32 v5, v18, 16, v17
+; CHECK-NEXT: ds_write_b8 v0, v12 offset:30
+; CHECK-NEXT: ds_write_b32 v0, v13 offset:24
+; CHECK-NEXT: ds_write_b16 v0, v11 offset:28
+; CHECK-NEXT: ds_write_b64 v0, v[1:2] offset:16
+; CHECK-NEXT: ds_write2_b64 v0, v[5:6], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz32_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:31
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v19, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v20, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v21, v[1:2] offset:17
+; CHECK-NEXT: flat_load_ubyte v22, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v23, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v24, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ubyte v25, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v26, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v27, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v28, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v29, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v30, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v31, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v32, v[1:2]
+; CHECK-NEXT: flat_load_ubyte v33, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2] offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(25)
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 8, v9
+; CHECK-NEXT: v_lshl_or_b32 v3, v5, 8, v4
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(24) lgkmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v5, v10, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v11, v11, 8, v12
+; CHECK-NEXT: s_waitcnt vmcnt(20) lgkmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v10, v13, 8, v14
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(18) lgkmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v7, v15, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v6, v17, 8, v18
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v9, v19, 8, v20
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v8, v21, 8, v22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v13, v23, 8, v24
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v12, v25, 8, v26
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v15, v27, 8, v28
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v14, v29, 8, v30
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v16, v31, 8, v32
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v17, v33, 8, v1
+; CHECK-NEXT: v_lshl_or_b32 v1, v5, 16, v4
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 16, v6
+; CHECK-NEXT: v_lshl_or_b32 v6, v11, 16, v10
+; CHECK-NEXT: v_lshl_or_b32 v5, v13, 16, v12
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 16, v14
+; CHECK-NEXT: v_lshl_or_b32 v7, v17, 16, v16
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[1:2] offset0:2 offset1:3
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz16_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: flat_load_ushort v3, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v4, v[1:2]
+; CHECK-NEXT: flat_load_ushort v5, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ushort v6, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v7, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v8, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v10, v[1:2] offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v1, v8, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v4, v9, 16, v5
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v3, v10, 16, v6
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz31_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ushort v3, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v4, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ushort v5, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ushort v6, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v7, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ushort v8, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ushort v10, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ushort v11, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ushort v12, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ushort v13, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ushort v14, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v15, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v16, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v17, v[1:2]
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2] offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v4, v3, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v3, v5, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v8, v8, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: v_lshl_or_b32 v2, v10, 16, v11
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: v_lshl_or_b32 v1, v12, 16, v13
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: v_lshl_or_b32 v6, v14, 16, v15
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: v_lshl_or_b32 v5, v16, 16, v17
+; CHECK-NEXT: ds_write_b16 v0, v7 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(1)
+; CHECK-NEXT: ds_write_b8 v0, v18 offset:30
+; CHECK-NEXT: ds_write_b32 v0, v8 offset:24
+; CHECK-NEXT: ds_write_b64 v0, v[1:2] offset:16
+; CHECK-NEXT: ds_write2_b64 v0, v[5:6], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz32_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ushort v3, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ushort v4, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ushort v5, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ushort v6, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v7, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ushort v8, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v10, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ushort v11, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ushort v12, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ushort v13, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ushort v14, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ushort v15, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v16, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v17, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v18, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v2, v3, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: v_lshl_or_b32 v6, v6, 16, v7
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v1, v5, 16, v10
+; CHECK-NEXT: v_lshl_or_b32 v5, v8, 16, v9
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v3, v13, 16, v14
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 16, v16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v7, v17, 16, v18
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[1:2] offset0:2 offset1:3
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz16_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz31_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2]
+; CHECK-NEXT: flat_load_dwordx3 v[7:9], v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ushort v1, v[1:2] offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(3)
+; CHECK-NEXT: ds_write2_b32 v0, v7, v8 offset0:4 offset1:5
+; CHECK-NEXT: ds_write_b32 v0, v9 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(4)
+; CHECK-NEXT: ds_write_b8 v0, v10 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(4)
+; CHECK-NEXT: ds_write_b16 v0, v1 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz32_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz16_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[1:4]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz31_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:30
+; CHECK-NEXT: flat_load_dword v8, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:28
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: ds_write_b32 v0, v8 offset:16
+; CHECK-NEXT: flat_load_dword v8, v[1:2] offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_write_b32 v0, v8 offset:20
+; CHECK-NEXT: flat_load_dword v1, v[1:2] offset:24
+; CHECK-NEXT: ds_write_b8 v0, v7 offset:30
+; CHECK-NEXT: ds_write_b16 v0, v9 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(2)
+; CHECK-NEXT: ds_write_b32 v0, v1 offset:24
+; CHECK-NEXT: ds_write_b128 v0, v[3:6]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p0_sz32_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p0_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[3:6] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[7:10]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p0.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz16_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz31_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx3 v[7:9], v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:16
+; CHECK-NEXT: ds_write_b32 v0, v9 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b16 v0, v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b8 v0, v1 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz32_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz16_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz31_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx3 v[7:9], v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:16
+; CHECK-NEXT: ds_write_b32 v0, v9 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b16 v0, v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b8 v0, v1 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz32_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz16_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz31_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx3 v[7:9], v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write2_b32 v0, v8, v9 offset0:5 offset1:6
+; CHECK-NEXT: ds_write_b32 v0, v7 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b16 v0, v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b8 v0, v1 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz32_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz16_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[1:4]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz31_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx3 v[7:9], v[1:2], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: ds_write2_b32 v0, v8, v9 offset0:5 offset1:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write_b128 v0, v[3:6]
+; CHECK-NEXT: ds_write_b32 v0, v7 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b16 v0, v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b8 v0, v1 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p1_sz32_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p1_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[3:6]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[7:10] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p1.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz16_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz31_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v7, v1 offset:30
+; CHECK-NEXT: ds_read_u16 v8, v1 offset:28
+; CHECK-NEXT: ds_read_b32 v9, v1 offset:24
+; CHECK-NEXT: ds_read_b64 v[5:6], v1 offset:16
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b8 v0, v7 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b16 v0, v8 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b32 v0, v9 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b64 v0, v[5:6] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz32_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v1 offset0:2 offset1:3
+; CHECK-NEXT: ds_read2_b64 v[6:9], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[6:7], v[8:9] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz16_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz31_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v7, v1 offset:30
+; CHECK-NEXT: ds_read_u16 v8, v1 offset:28
+; CHECK-NEXT: ds_read_b32 v9, v1 offset:24
+; CHECK-NEXT: ds_read_b64 v[5:6], v1 offset:16
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b8 v0, v7 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b16 v0, v8 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b32 v0, v9 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b64 v0, v[5:6] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz32_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v1 offset0:2 offset1:3
+; CHECK-NEXT: ds_read2_b64 v[6:9], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[6:7], v[8:9] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz16_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz31_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v1 offset1:1
+; CHECK-NEXT: ds_read2_b32 v[6:7], v1 offset0:4 offset1:5
+; CHECK-NEXT: ds_read_b32 v8, v1 offset:24
+; CHECK-NEXT: ds_read_u8 v9, v1 offset:30
+; CHECK-NEXT: ds_read_u16 v1, v1 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write2_b32 v0, v6, v7 offset0:4 offset1:5
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b32 v0, v8 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b8 v0, v9 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b16 v0, v1 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz32_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v1 offset0:2 offset1:3
+; CHECK-NEXT: ds_read2_b64 v[6:9], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[6:7], v[8:9] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz16_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[1:4], v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[1:4]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz31_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b32 v[5:6], v1 offset0:4 offset1:5
+; CHECK-NEXT: ds_read_b32 v7, v1 offset:24
+; CHECK-NEXT: ds_read_u8 v8, v1 offset:30
+; CHECK-NEXT: ds_read_u16 v9, v1 offset:28
+; CHECK-NEXT: ds_read_b128 v[1:4], v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write2_b32 v0, v5, v6 offset0:4 offset1:5
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b32 v0, v7 offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b8 v0, v8 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b16 v0, v9 offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: ds_write_b128 v0, v[1:4]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p3_sz32_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p3_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v1 offset:16
+; CHECK-NEXT: ds_read_b128 v[6:9], v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[2:5] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[6:9]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p3.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz16_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz31_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx3 v[7:9], v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:16
+; CHECK-NEXT: ds_write_b32 v0, v9 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b16 v0, v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b8 v0, v1 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz32_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz16_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz31_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx3 v[7:9], v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write_b64 v0, v[7:8] offset:16
+; CHECK-NEXT: ds_write_b32 v0, v9 offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b16 v0, v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b8 v0, v1 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz32_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz16_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz31_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx3 v[7:9], v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write2_b32 v0, v8, v9 offset0:5 offset1:6
+; CHECK-NEXT: ds_write_b32 v0, v7 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b16 v0, v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b8 v0, v1 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz32_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write2_b64 v0, v[3:4], v[5:6] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[7:8], v[9:10] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz16_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[1:4]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz31_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx3 v[7:9], v[1:2], off offset:16
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: ds_write2_b32 v0, v8, v9 offset0:5 offset1:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write_b128 v0, v[3:6]
+; CHECK-NEXT: ds_write_b32 v0, v7 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b16 v0, v10 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b8 v0, v1 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p4_sz32_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p4_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b128 v0, v[3:6]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[7:10] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p4.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz16_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v3, v3, 8, v2
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v4, v5, 8, v4
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v6, v7, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v7, v9, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v5, v11, 8, v10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v8, v13, 8, v12
+; CHECK-NEXT: v_lshl_or_b32 v2, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v9, v15, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v10, v1, 8, v16
+; CHECK-NEXT: v_lshl_or_b32 v1, v4, 16, v3
+; CHECK-NEXT: v_lshl_or_b32 v4, v8, 16, v5
+; CHECK-NEXT: v_lshl_or_b32 v3, v10, 16, v9
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz31_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v18, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v19, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v20, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v21, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v22, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v23, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v24, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v25, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v26, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v27, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v28, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v29, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v30, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v31, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v32, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: v_lshl_or_b32 v1, v3, 8, v2
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 8, v4
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: v_lshlrev_b16 v3, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v6, v8, 8, v7
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v7, v10, 8, v9
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v4, v12, 8, v11
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v5, v14, 8, v13
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v10, v16, 8, v15
+; CHECK-NEXT: v_lshl_or_b32 v16, v2, 16, v1
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v11, v18, 8, v17
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v8, v20, 8, v19
+; CHECK-NEXT: v_lshl_or_b32 v1, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v9, v22, 8, v21
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: v_lshl_or_b32 v13, v25, 8, v24
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: v_lshl_or_b32 v12, v23, 8, v27
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v14, v28, 8, v26
+; CHECK-NEXT: v_lshl_or_b32 v4, v9, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v15, v30, 8, v29
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: v_or_b32_e32 v17, v3, v31
+; CHECK-NEXT: v_lshl_or_b32 v3, v11, 16, v10
+; CHECK-NEXT: v_lshl_or_b32 v6, v13, 16, v12
+; CHECK-NEXT: v_lshl_or_b32 v5, v14, 16, v15
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b8 v0, v32 offset:30
+; CHECK-NEXT: ds_write_b32 v0, v16 offset:24
+; CHECK-NEXT: ds_write_b16 v0, v17 offset:28
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: ds_write_b64 v0, v[5:6] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz32_align_1_1(ptr addrspace(3) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v18, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v19, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v20, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v21, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v22, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v23, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v24, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v25, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v26, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v27, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v28, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v29, v1, s[0:3], 0 offen offset:31
+; CHECK-NEXT: buffer_load_ubyte v30, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v31, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v32, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: v_lshl_or_b32 v3, v3, 8, v2
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: v_lshl_or_b32 v4, v5, 8, v4
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: v_lshl_or_b32 v6, v7, 8, v6
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: v_lshl_or_b32 v7, v9, 8, v8
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: v_lshl_or_b32 v9, v11, 8, v10
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: v_lshl_or_b32 v10, v13, 8, v12
+; CHECK-NEXT: v_lshl_or_b32 v2, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_lshl_or_b32 v5, v15, 8, v14
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: v_lshl_or_b32 v8, v17, 8, v16
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: v_lshl_or_b32 v12, v20, 8, v19
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: v_lshl_or_b32 v14, v22, 8, v21
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: v_lshl_or_b32 v11, v18, 8, v24
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v13, v25, 8, v23
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v15, v27, 8, v26
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v16, v29, 8, v28
+; CHECK-NEXT: v_lshl_or_b32 v6, v12, 16, v11
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v17, v31, 8, v30
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v18, v1, 8, v32
+; CHECK-NEXT: v_lshl_or_b32 v1, v4, 16, v3
+; CHECK-NEXT: v_lshl_or_b32 v4, v8, 16, v5
+; CHECK-NEXT: v_lshl_or_b32 v3, v10, 16, v9
+; CHECK-NEXT: v_lshl_or_b32 v5, v13, 16, v14
+; CHECK-NEXT: v_lshl_or_b32 v8, v16, 16, v15
+; CHECK-NEXT: v_lshl_or_b32 v7, v18, 16, v17
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: ds_write2_b64 v0, v[5:6], v[7:8] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz16_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v2, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v1, v4, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v4, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v8
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz31_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ushort v2, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v10, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v11, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v12, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v13, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v14, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v15, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v16, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v18, v3, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v1, v5, 16, v4
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v2, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v3, v9, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v4, v11, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v6, v13, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v5, v15, 16, v14
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b16 v0, v16 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b8 v0, v17 offset:30
+; CHECK-NEXT: ds_write_b32 v0, v18 offset:24
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: ds_write_b64 v0, v[5:6] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz32_align_2_2(ptr addrspace(3) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v2, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v10, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v11, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v12, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v13, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v14, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v15, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v16, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v17, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_lshl_or_b32 v1, v4, 16, v3
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_lshl_or_b32 v2, v5, 16, v2
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_lshl_or_b32 v3, v7, 16, v6
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_lshl_or_b32 v4, v9, 16, v8
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_lshl_or_b32 v5, v11, 16, v10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshl_or_b32 v6, v13, 16, v12
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshl_or_b32 v8, v15, 16, v14
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshl_or_b32 v7, v17, 16, v16
+; CHECK-NEXT: ds_write2_b64 v0, v[1:2], v[3:4] offset1:1
+; CHECK-NEXT: ds_write2_b64 v0, v[5:6], v[7:8] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz16_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz31_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x8
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v6, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: ds_write2_b32 v0, v6, v7 offset0:5 offset1:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: ds_write_b32 v0, v8 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: ds_write_b16 v0, v9 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b8 v0, v1 offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz32_align_8_8(ptr addrspace(3) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v6, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v9, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset1:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b64 v0, v[6:7], v[8:9] offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz16_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz31_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x8
+; CHECK-NEXT: buffer_load_dword v6, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: ds_write2_b32 v0, v7, v8 offset0:5 offset1:6
+; CHECK-NEXT: ds_write_b32 v0, v6 offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: ds_write_b16 v0, v9 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: ds_write_b8 v0, v10 offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[2:5]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p3_p5_sz32_align_16_16(ptr addrspace(3) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p3_p5_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_dword v6, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v9, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: ds_write_b128 v0, v[2:5]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write_b128 v0, v[6:9] offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p3.p5.i64(ptr addrspace(3) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz16_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz31_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:17
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v19, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v20, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v21, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v22, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v23, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ubyte v24, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v25, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v26, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v27, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v28, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v29, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v30, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v31, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ubyte v32, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(30) lgkmcnt(30)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29) lgkmcnt(29)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28) lgkmcnt(28)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27) lgkmcnt(27)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26) lgkmcnt(26)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(25)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24) lgkmcnt(24)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23) lgkmcnt(23)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(22)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21) lgkmcnt(21)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20) lgkmcnt(20)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19) lgkmcnt(19)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18) lgkmcnt(18)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17) lgkmcnt(17)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz32_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(0) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:31
+; CHECK-NEXT: flat_load_ubyte v4, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ubyte v5, v[1:2] offset:29
+; CHECK-NEXT: flat_load_ubyte v6, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:27
+; CHECK-NEXT: flat_load_ubyte v8, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:25
+; CHECK-NEXT: flat_load_ubyte v10, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v11, v[1:2] offset:23
+; CHECK-NEXT: flat_load_ubyte v12, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ubyte v13, v[1:2] offset:21
+; CHECK-NEXT: flat_load_ubyte v14, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ubyte v15, v[1:2] offset:19
+; CHECK-NEXT: flat_load_ubyte v16, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ubyte v17, v[1:2] offset:17
+; CHECK-NEXT: flat_load_ubyte v18, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ubyte v19, v[1:2] offset:15
+; CHECK-NEXT: flat_load_ubyte v20, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ubyte v21, v[1:2] offset:13
+; CHECK-NEXT: flat_load_ubyte v22, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ubyte v23, v[1:2] offset:11
+; CHECK-NEXT: flat_load_ubyte v24, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ubyte v25, v[1:2] offset:9
+; CHECK-NEXT: flat_load_ubyte v26, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ubyte v27, v[1:2] offset:7
+; CHECK-NEXT: flat_load_ubyte v28, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ubyte v29, v[1:2] offset:5
+; CHECK-NEXT: flat_load_ubyte v30, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ubyte v31, v[1:2] offset:3
+; CHECK-NEXT: flat_load_ubyte v32, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ubyte v33, v[1:2] offset:1
+; CHECK-NEXT: flat_load_ubyte v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(31) lgkmcnt(31)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(30) lgkmcnt(30)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29) lgkmcnt(29)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28) lgkmcnt(28)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27) lgkmcnt(27)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26) lgkmcnt(26)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25) lgkmcnt(25)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24) lgkmcnt(24)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23) lgkmcnt(23)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22) lgkmcnt(22)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21) lgkmcnt(21)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20) lgkmcnt(20)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19) lgkmcnt(19)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18) lgkmcnt(18)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17) lgkmcnt(17)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16) lgkmcnt(16)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v33, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(0) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz16_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: flat_load_ushort v3, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v4, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ushort v5, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ushort v6, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v7, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v8, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz31_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ubyte v3, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ushort v4, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ushort v5, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ushort v6, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ushort v7, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ushort v8, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ushort v10, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ushort v11, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v12, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ushort v13, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ushort v14, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v15, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v16, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v17, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz32_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(0) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: flat_load_ushort v3, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ushort v4, v[1:2] offset:28
+; CHECK-NEXT: flat_load_ushort v5, v[1:2] offset:26
+; CHECK-NEXT: flat_load_ushort v6, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ushort v7, v[1:2] offset:22
+; CHECK-NEXT: flat_load_ushort v8, v[1:2] offset:20
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:18
+; CHECK-NEXT: flat_load_ushort v10, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ushort v11, v[1:2] offset:14
+; CHECK-NEXT: flat_load_ushort v12, v[1:2] offset:12
+; CHECK-NEXT: flat_load_ushort v13, v[1:2] offset:10
+; CHECK-NEXT: flat_load_ushort v14, v[1:2] offset:8
+; CHECK-NEXT: flat_load_ushort v15, v[1:2] offset:6
+; CHECK-NEXT: flat_load_ushort v16, v[1:2] offset:4
+; CHECK-NEXT: flat_load_ushort v17, v[1:2] offset:2
+; CHECK-NEXT: flat_load_ushort v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(15) lgkmcnt(15)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14) lgkmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13) lgkmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12) lgkmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11) lgkmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10) lgkmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9) lgkmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8) lgkmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7) lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6) lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5) lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(0) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz16_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz31_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x4
+; CHECK-NEXT: flat_load_dword v7, v[1:2] offset:16
+; CHECK-NEXT: flat_load_dword v8, v[1:2] offset:24
+; CHECK-NEXT: flat_load_ubyte v9, v[1:2] offset:30
+; CHECK-NEXT: flat_load_ushort v10, v[1:2] offset:28
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: flat_load_dword v1, v[1:2] offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(4) lgkmcnt(4)
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz32_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(0) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(0) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz16_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[1:4], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz31_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: flat_load_ubyte v7, v[1:2] offset:30
+; CHECK-NEXT: flat_load_dword v8, v[1:2] offset:16
+; CHECK-NEXT: flat_load_ushort v9, v[1:2] offset:28
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2)
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: flat_load_dword v8, v[1:2] offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: flat_load_dword v1, v[1:2] offset:24
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p0_sz32_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(0) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p0_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: flat_load_dwordx4 v[3:6], v[1:2] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p0.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(0) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz16_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:15
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz31_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:29
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:27
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:25
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:23
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:21
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:19
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:17
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ubyte v18, v[1:2], off offset:15
+; CHECK-NEXT: global_load_ubyte v19, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v20, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v21, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v22, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v23, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v24, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v25, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v26, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v27, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v28, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v29, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v30, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v31, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v32, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz32_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(1) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:31
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:29
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:27
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:25
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:23
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:21
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:19
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:17
+; CHECK-NEXT: global_load_ubyte v18, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ubyte v19, v[1:2], off offset:15
+; CHECK-NEXT: global_load_ubyte v20, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v21, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v22, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v23, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v24, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v25, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v26, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v27, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v28, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v29, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v30, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v31, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v32, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v33, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(31)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v33, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(1) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz16_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: global_load_ushort v3, v[1:2], off
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz31_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v11, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ushort v12, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v13, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v14, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v15, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v16, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v17, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz32_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(1) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ushort v3, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v11, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ushort v12, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v13, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v14, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v15, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v16, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v17, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(1) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz16_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz31_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx3 v[5:7], v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:30
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz32_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(1) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(1) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz16_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz31_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx3 v[5:7], v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:30
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p1_sz32_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(1) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p1_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p1.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(1) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz16_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v2, v1 offset:15
+; CHECK-NEXT: ds_read_u8 v3, v1 offset:14
+; CHECK-NEXT: ds_read_u8 v4, v1 offset:13
+; CHECK-NEXT: ds_read_u8 v5, v1 offset:12
+; CHECK-NEXT: ds_read_u8 v6, v1 offset:11
+; CHECK-NEXT: ds_read_u8 v7, v1 offset:10
+; CHECK-NEXT: ds_read_u8 v8, v1 offset:9
+; CHECK-NEXT: ds_read_u8 v9, v1 offset:8
+; CHECK-NEXT: ds_read_u8 v10, v1 offset:7
+; CHECK-NEXT: ds_read_u8 v11, v1 offset:6
+; CHECK-NEXT: ds_read_u8 v12, v1 offset:5
+; CHECK-NEXT: ds_read_u8 v13, v1 offset:4
+; CHECK-NEXT: ds_read_u8 v14, v1 offset:3
+; CHECK-NEXT: ds_read_u8 v15, v1 offset:2
+; CHECK-NEXT: ds_read_u8 v16, v1 offset:1
+; CHECK-NEXT: ds_read_u8 v1, v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt lgkmcnt(14)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(11)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt lgkmcnt(10)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(9)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(6)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(5)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz31_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v2, v1 offset:24
+; CHECK-NEXT: ds_read_u8 v3, v1 offset:25
+; CHECK-NEXT: ds_read_u8 v4, v1 offset:26
+; CHECK-NEXT: ds_read_u8 v5, v1 offset:27
+; CHECK-NEXT: ds_read_u8 v6, v1 offset:28
+; CHECK-NEXT: ds_read_u8 v7, v1 offset:29
+; CHECK-NEXT: ds_read_u8 v8, v1 offset:30
+; CHECK-NEXT: ds_read_u8 v9, v1 offset:16
+; CHECK-NEXT: ds_read_u8 v10, v1 offset:17
+; CHECK-NEXT: ds_read_u8 v11, v1 offset:18
+; CHECK-NEXT: ds_read_u8 v12, v1 offset:19
+; CHECK-NEXT: ds_read_u8 v13, v1 offset:20
+; CHECK-NEXT: ds_read_u8 v14, v1 offset:21
+; CHECK-NEXT: ds_read_u8 v15, v1 offset:22
+; CHECK-NEXT: ds_read_u8 v16, v1 offset:23
+; CHECK-NEXT: ds_read_u8 v17, v1 offset:8
+; CHECK-NEXT: ds_read_u8 v18, v1 offset:9
+; CHECK-NEXT: ds_read_u8 v19, v1 offset:10
+; CHECK-NEXT: ds_read_u8 v20, v1 offset:11
+; CHECK-NEXT: ds_read_u8 v21, v1 offset:12
+; CHECK-NEXT: ds_read_u8 v22, v1 offset:13
+; CHECK-NEXT: ds_read_u8 v23, v1 offset:14
+; CHECK-NEXT: ds_read_u8 v24, v1 offset:15
+; CHECK-NEXT: ds_read_u8 v25, v1
+; CHECK-NEXT: ds_read_u8 v26, v1 offset:1
+; CHECK-NEXT: ds_read_u8 v27, v1 offset:2
+; CHECK-NEXT: ds_read_u8 v28, v1 offset:3
+; CHECK-NEXT: ds_read_u8 v29, v1 offset:4
+; CHECK-NEXT: ds_read_u8 v30, v1 offset:5
+; CHECK-NEXT: ds_read_u8 v31, v1 offset:6
+; CHECK-NEXT: ds_read_u8 v1, v1 offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(24)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(16)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz32_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(3) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v2, v1 offset:24
+; CHECK-NEXT: ds_read_u8 v3, v1 offset:25
+; CHECK-NEXT: ds_read_u8 v4, v1 offset:26
+; CHECK-NEXT: ds_read_u8 v5, v1 offset:27
+; CHECK-NEXT: ds_read_u8 v6, v1 offset:28
+; CHECK-NEXT: ds_read_u8 v7, v1 offset:29
+; CHECK-NEXT: ds_read_u8 v8, v1 offset:30
+; CHECK-NEXT: ds_read_u8 v9, v1 offset:31
+; CHECK-NEXT: ds_read_u8 v10, v1 offset:16
+; CHECK-NEXT: ds_read_u8 v11, v1 offset:17
+; CHECK-NEXT: ds_read_u8 v12, v1 offset:18
+; CHECK-NEXT: ds_read_u8 v13, v1 offset:19
+; CHECK-NEXT: ds_read_u8 v14, v1 offset:20
+; CHECK-NEXT: ds_read_u8 v15, v1 offset:21
+; CHECK-NEXT: ds_read_u8 v16, v1 offset:22
+; CHECK-NEXT: ds_read_u8 v17, v1 offset:23
+; CHECK-NEXT: ds_read_u8 v18, v1 offset:8
+; CHECK-NEXT: ds_read_u8 v19, v1 offset:9
+; CHECK-NEXT: ds_read_u8 v20, v1 offset:10
+; CHECK-NEXT: ds_read_u8 v21, v1 offset:11
+; CHECK-NEXT: ds_read_u8 v22, v1 offset:12
+; CHECK-NEXT: ds_read_u8 v23, v1 offset:13
+; CHECK-NEXT: ds_read_u8 v24, v1 offset:14
+; CHECK-NEXT: ds_read_u8 v25, v1 offset:15
+; CHECK-NEXT: ds_read_u8 v26, v1
+; CHECK-NEXT: ds_read_u8 v27, v1 offset:1
+; CHECK-NEXT: ds_read_u8 v28, v1 offset:2
+; CHECK-NEXT: ds_read_u8 v29, v1 offset:3
+; CHECK-NEXT: ds_read_u8 v30, v1 offset:4
+; CHECK-NEXT: ds_read_u8 v31, v1 offset:5
+; CHECK-NEXT: ds_read_u8 v32, v1 offset:6
+; CHECK-NEXT: ds_read_u8 v1, v1 offset:7
+; CHECK-NEXT: s_waitcnt lgkmcnt(24)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:31
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(16)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(8)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(3) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz16_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u16 v2, v1
+; CHECK-NEXT: ds_read_u16 v3, v1 offset:2
+; CHECK-NEXT: ds_read_u16 v4, v1 offset:4
+; CHECK-NEXT: ds_read_u16 v5, v1 offset:6
+; CHECK-NEXT: ds_read_u16 v6, v1 offset:8
+; CHECK-NEXT: ds_read_u16 v7, v1 offset:10
+; CHECK-NEXT: ds_read_u16 v8, v1 offset:12
+; CHECK-NEXT: ds_read_u16 v1, v1 offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz31_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u8 v2, v1 offset:30
+; CHECK-NEXT: ds_read_u16 v3, v1 offset:28
+; CHECK-NEXT: ds_read_u16 v4, v1 offset:26
+; CHECK-NEXT: ds_read_u16 v5, v1 offset:24
+; CHECK-NEXT: ds_read_u16 v6, v1 offset:22
+; CHECK-NEXT: ds_read_u16 v7, v1 offset:20
+; CHECK-NEXT: ds_read_u16 v8, v1 offset:18
+; CHECK-NEXT: ds_read_u16 v9, v1 offset:16
+; CHECK-NEXT: ds_read_u16 v10, v1 offset:14
+; CHECK-NEXT: ds_read_u16 v11, v1 offset:12
+; CHECK-NEXT: ds_read_u16 v12, v1 offset:10
+; CHECK-NEXT: ds_read_u16 v13, v1 offset:8
+; CHECK-NEXT: ds_read_u16 v14, v1 offset:6
+; CHECK-NEXT: ds_read_u16 v15, v1 offset:4
+; CHECK-NEXT: ds_read_u16 v16, v1 offset:2
+; CHECK-NEXT: ds_read_u16 v1, v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(14)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(11)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt lgkmcnt(10)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(9)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt lgkmcnt(8)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz32_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(3) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_u16 v2, v1 offset:30
+; CHECK-NEXT: ds_read_u16 v3, v1 offset:28
+; CHECK-NEXT: ds_read_u16 v4, v1 offset:26
+; CHECK-NEXT: ds_read_u16 v5, v1 offset:24
+; CHECK-NEXT: ds_read_u16 v6, v1 offset:22
+; CHECK-NEXT: ds_read_u16 v7, v1 offset:20
+; CHECK-NEXT: ds_read_u16 v8, v1 offset:18
+; CHECK-NEXT: ds_read_u16 v9, v1 offset:16
+; CHECK-NEXT: ds_read_u16 v10, v1 offset:14
+; CHECK-NEXT: ds_read_u16 v11, v1 offset:12
+; CHECK-NEXT: ds_read_u16 v12, v1 offset:10
+; CHECK-NEXT: ds_read_u16 v13, v1 offset:8
+; CHECK-NEXT: ds_read_u16 v14, v1 offset:6
+; CHECK-NEXT: ds_read_u16 v15, v1 offset:4
+; CHECK-NEXT: ds_read_u16 v16, v1 offset:2
+; CHECK-NEXT: ds_read_u16 v1, v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(15)
+; CHECK-NEXT: buffer_store_short v2, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(14)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(13)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt lgkmcnt(12)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(11)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt lgkmcnt(10)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(9)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt lgkmcnt(8)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(7)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt lgkmcnt(6)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt lgkmcnt(5)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(3) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz16_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz31_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b32 v[5:6], v1 offset0:4 offset1:5
+; CHECK-NEXT: ds_read_b32 v7, v1 offset:24
+; CHECK-NEXT: ds_read_u16 v8, v1 offset:28
+; CHECK-NEXT: ds_read_u8 v9, v1 offset:30
+; CHECK-NEXT: ds_read2_b64 v[1:4], v1 offset1:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz32_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(3) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b64 v[2:5], v1 offset1:1
+; CHECK-NEXT: ds_read2_b64 v[6:9], v1 offset0:2 offset1:3
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(3) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz16_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[1:4], v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz31_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read2_b32 v[5:6], v1 offset0:4 offset1:5
+; CHECK-NEXT: ds_read_b32 v7, v1 offset:24
+; CHECK-NEXT: ds_read_u16 v8, v1 offset:28
+; CHECK-NEXT: ds_read_u8 v9, v1 offset:30
+; CHECK-NEXT: ds_read_b128 v[1:4], v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(4)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p3_sz32_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(3) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p3_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ds_read_b128 v[2:5], v1
+; CHECK-NEXT: ds_read_b128 v[6:9], v1 offset:16
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(3) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz16_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:15
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz31_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:29
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:27
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:25
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:23
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:21
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:19
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:17
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ubyte v18, v[1:2], off offset:15
+; CHECK-NEXT: global_load_ubyte v19, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v20, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v21, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v22, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v23, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v24, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v25, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v26, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v27, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v28, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v29, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v30, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v31, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v32, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz32_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(4) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:31
+; CHECK-NEXT: global_load_ubyte v4, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ubyte v5, v[1:2], off offset:29
+; CHECK-NEXT: global_load_ubyte v6, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v7, v[1:2], off offset:27
+; CHECK-NEXT: global_load_ubyte v8, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:25
+; CHECK-NEXT: global_load_ubyte v10, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ubyte v11, v[1:2], off offset:23
+; CHECK-NEXT: global_load_ubyte v12, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ubyte v13, v[1:2], off offset:21
+; CHECK-NEXT: global_load_ubyte v14, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ubyte v15, v[1:2], off offset:19
+; CHECK-NEXT: global_load_ubyte v16, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ubyte v17, v[1:2], off offset:17
+; CHECK-NEXT: global_load_ubyte v18, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ubyte v19, v[1:2], off offset:15
+; CHECK-NEXT: global_load_ubyte v20, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ubyte v21, v[1:2], off offset:13
+; CHECK-NEXT: global_load_ubyte v22, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ubyte v23, v[1:2], off offset:11
+; CHECK-NEXT: global_load_ubyte v24, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ubyte v25, v[1:2], off offset:9
+; CHECK-NEXT: global_load_ubyte v26, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ubyte v27, v[1:2], off offset:7
+; CHECK-NEXT: global_load_ubyte v28, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ubyte v29, v[1:2], off offset:5
+; CHECK-NEXT: global_load_ubyte v30, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ubyte v31, v[1:2], off offset:3
+; CHECK-NEXT: global_load_ubyte v32, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ubyte v33, v[1:2], off offset:1
+; CHECK-NEXT: global_load_ubyte v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(31)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v33, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(4) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz16_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: global_load_ushort v3, v[1:2], off
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz31_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ubyte v3, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v11, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ushort v12, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v13, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v14, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v15, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v16, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v17, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz32_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(4) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: global_load_ushort v3, v[1:2], off offset:30
+; CHECK-NEXT: global_load_ushort v4, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ushort v5, v[1:2], off offset:26
+; CHECK-NEXT: global_load_ushort v6, v[1:2], off offset:24
+; CHECK-NEXT: global_load_ushort v7, v[1:2], off offset:22
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:20
+; CHECK-NEXT: global_load_ushort v9, v[1:2], off offset:18
+; CHECK-NEXT: global_load_ushort v10, v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v11, v[1:2], off offset:14
+; CHECK-NEXT: global_load_ushort v12, v[1:2], off offset:12
+; CHECK-NEXT: global_load_ushort v13, v[1:2], off offset:10
+; CHECK-NEXT: global_load_ushort v14, v[1:2], off offset:8
+; CHECK-NEXT: global_load_ushort v15, v[1:2], off offset:6
+; CHECK-NEXT: global_load_ushort v16, v[1:2], off offset:4
+; CHECK-NEXT: global_load_ushort v17, v[1:2], off offset:2
+; CHECK-NEXT: global_load_ushort v1, v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v17, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(4) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz16_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz31_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx3 v[5:7], v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:30
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz32_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(4) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(4) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz16_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz31_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: global_load_dwordx3 v[5:7], v[1:2], off offset:16
+; CHECK-NEXT: global_load_ushort v8, v[1:2], off offset:28
+; CHECK-NEXT: global_load_ubyte v9, v[1:2], off offset:30
+; CHECK-NEXT: global_load_dwordx4 v[1:4], v[1:2], off
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p4_sz32_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(4) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p4_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_dwordx4 v[3:6], v[1:2], off
+; CHECK-NEXT: global_load_dwordx4 v[7:10], v[1:2], off offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p4.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(4) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz16_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz16_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz31_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz31_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1e
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v18, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v19, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v20, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v21, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v22, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v23, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v24, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v25, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v26, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v27, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v28, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v29, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v30, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v31, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz32_align_1_1(ptr addrspace(5) align 1 %dst, ptr addrspace(5) align 1 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz32_align_1_1:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x1f
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:31
+; CHECK-NEXT: buffer_load_ubyte v3, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ubyte v4, v1, s[0:3], 0 offen offset:29
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ubyte v6, v1, s[0:3], 0 offen offset:27
+; CHECK-NEXT: buffer_load_ubyte v7, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ubyte v8, v1, s[0:3], 0 offen offset:25
+; CHECK-NEXT: buffer_load_ubyte v9, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v10, v1, s[0:3], 0 offen offset:23
+; CHECK-NEXT: buffer_load_ubyte v11, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ubyte v12, v1, s[0:3], 0 offen offset:21
+; CHECK-NEXT: buffer_load_ubyte v13, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ubyte v14, v1, s[0:3], 0 offen offset:19
+; CHECK-NEXT: buffer_load_ubyte v15, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ubyte v16, v1, s[0:3], 0 offen offset:17
+; CHECK-NEXT: buffer_load_ubyte v17, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v18, v1, s[0:3], 0 offen offset:15
+; CHECK-NEXT: buffer_load_ubyte v19, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ubyte v20, v1, s[0:3], 0 offen offset:13
+; CHECK-NEXT: buffer_load_ubyte v21, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ubyte v22, v1, s[0:3], 0 offen offset:11
+; CHECK-NEXT: buffer_load_ubyte v23, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ubyte v24, v1, s[0:3], 0 offen offset:9
+; CHECK-NEXT: buffer_load_ubyte v25, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ubyte v26, v1, s[0:3], 0 offen offset:7
+; CHECK-NEXT: buffer_load_ubyte v27, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ubyte v28, v1, s[0:3], 0 offen offset:5
+; CHECK-NEXT: buffer_load_ubyte v29, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ubyte v30, v1, s[0:3], 0 offen offset:3
+; CHECK-NEXT: buffer_load_ubyte v31, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ubyte v32, v1, s[0:3], 0 offen offset:1
+; CHECK-NEXT: buffer_load_ubyte v1, v1, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(31)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:31
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: buffer_store_byte v3, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(29)
+; CHECK-NEXT: buffer_store_byte v4, v0, s[0:3], 0 offen offset:29
+; CHECK-NEXT: s_waitcnt vmcnt(28)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(27)
+; CHECK-NEXT: buffer_store_byte v6, v0, s[0:3], 0 offen offset:27
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: buffer_store_byte v7, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(25)
+; CHECK-NEXT: buffer_store_byte v8, v0, s[0:3], 0 offen offset:25
+; CHECK-NEXT: s_waitcnt vmcnt(24)
+; CHECK-NEXT: buffer_store_byte v9, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(23)
+; CHECK-NEXT: buffer_store_byte v10, v0, s[0:3], 0 offen offset:23
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: buffer_store_byte v11, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(21)
+; CHECK-NEXT: buffer_store_byte v12, v0, s[0:3], 0 offen offset:21
+; CHECK-NEXT: s_waitcnt vmcnt(20)
+; CHECK-NEXT: buffer_store_byte v13, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(19)
+; CHECK-NEXT: buffer_store_byte v14, v0, s[0:3], 0 offen offset:19
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: buffer_store_byte v15, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(17)
+; CHECK-NEXT: buffer_store_byte v16, v0, s[0:3], 0 offen offset:17
+; CHECK-NEXT: s_waitcnt vmcnt(16)
+; CHECK-NEXT: buffer_store_byte v17, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v18, v0, s[0:3], 0 offen offset:15
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_byte v19, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_byte v20, v0, s[0:3], 0 offen offset:13
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_byte v21, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_byte v22, v0, s[0:3], 0 offen offset:11
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_byte v23, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_byte v24, v0, s[0:3], 0 offen offset:9
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_byte v25, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_byte v26, v0, s[0:3], 0 offen offset:7
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_byte v27, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v28, v0, s[0:3], 0 offen offset:5
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_byte v29, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_byte v30, v0, s[0:3], 0 offen offset:3
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_byte v31, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_byte v32, v0, s[0:3], 0 offen offset:1
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 1 %dst, ptr addrspace(5) noundef nonnull align 1 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz16_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz16_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_ushort v2, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v1, v1, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v2, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz31_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz31_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ubyte v2, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v10, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v11, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v12, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v13, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v14, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v15, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v16, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v1, v1, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_byte v2, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz32_align_2_2(ptr addrspace(5) align 2 %dst, ptr addrspace(5) align 2 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz32_align_2_2:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0xf
+; CHECK-NEXT: buffer_load_ushort v2, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v3, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_ushort v4, v1, s[0:3], 0 offen offset:26
+; CHECK-NEXT: buffer_load_ushort v5, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:22
+; CHECK-NEXT: buffer_load_ushort v7, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_ushort v8, v1, s[0:3], 0 offen offset:18
+; CHECK-NEXT: buffer_load_ushort v9, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ushort v10, v1, s[0:3], 0 offen offset:14
+; CHECK-NEXT: buffer_load_ushort v11, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: buffer_load_ushort v12, v1, s[0:3], 0 offen offset:10
+; CHECK-NEXT: buffer_load_ushort v13, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_ushort v14, v1, s[0:3], 0 offen offset:6
+; CHECK-NEXT: buffer_load_ushort v15, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_ushort v16, v1, s[0:3], 0 offen offset:2
+; CHECK-NEXT: buffer_load_ushort v1, v1, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(15)
+; CHECK-NEXT: buffer_store_short v2, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: buffer_store_short v3, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(13)
+; CHECK-NEXT: buffer_store_short v4, v0, s[0:3], 0 offen offset:26
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: buffer_store_short v5, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(11)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:22
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: buffer_store_short v7, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(9)
+; CHECK-NEXT: buffer_store_short v8, v0, s[0:3], 0 offen offset:18
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_short v9, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_short v10, v0, s[0:3], 0 offen offset:14
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_short v11, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_short v12, v0, s[0:3], 0 offen offset:10
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v13, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_short v14, v0, s[0:3], 0 offen offset:6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_short v15, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_short v16, v0, s[0:3], 0 offen offset:2
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 2 %dst, ptr addrspace(5) noundef nonnull align 2 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz16_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz16_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz31_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz31_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x8
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v9, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz32_align_8_8(ptr addrspace(5) align 8 %dst, ptr addrspace(5) align 8 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz32_align_8_8:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_dword v6, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 8 %dst, ptr addrspace(5) noundef nonnull align 8 %src, i64 32, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz16_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz16_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x3
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz31_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz31_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x8
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_ubyte v5, v1, s[0:3], 0 offen offset:30
+; CHECK-NEXT: buffer_load_ushort v6, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v9, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_byte v5, v0, s[0:3], 0 offen offset:30
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_short v6, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p5_p5_sz32_align_16_16(ptr addrspace(5) align 16 %dst, ptr addrspace(5) align 16 readonly %src) {
+; CHECK-LABEL: memmove_p5_p5_sz32_align_16_16:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_clause 0x7
+; CHECK-NEXT: buffer_load_dword v2, v1, s[0:3], 0 offen offset:16
+; CHECK-NEXT: buffer_load_dword v3, v1, s[0:3], 0 offen offset:20
+; CHECK-NEXT: buffer_load_dword v4, v1, s[0:3], 0 offen offset:24
+; CHECK-NEXT: buffer_load_dword v5, v1, s[0:3], 0 offen offset:28
+; CHECK-NEXT: buffer_load_dword v6, v1, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v7, v1, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_load_dword v8, v1, s[0:3], 0 offen offset:8
+; CHECK-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen offset:24
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:8
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) noundef nonnull align 16 %dst, ptr addrspace(5) noundef nonnull align 16 %src, i64 32, i1 false)
+ ret void
+}
+
+declare void @llvm.memmove.p0.p0.i64(ptr addrspace(0) nocapture writeonly, ptr addrspace(0) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p0.p1.i64(ptr addrspace(0) nocapture writeonly, ptr addrspace(1) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p0.p3.i64(ptr addrspace(0) nocapture writeonly, ptr addrspace(3) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p0.p4.i64(ptr addrspace(0) nocapture writeonly, ptr addrspace(4) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p0.p5.i64(ptr addrspace(0) nocapture writeonly, ptr addrspace(5) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p1.p0.i64(ptr addrspace(1) nocapture writeonly, ptr addrspace(0) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p1.p1.i64(ptr addrspace(1) nocapture writeonly, ptr addrspace(1) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p1.p3.i64(ptr addrspace(1) nocapture writeonly, ptr addrspace(3) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p1.p4.i64(ptr addrspace(1) nocapture writeonly, ptr addrspace(4) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p1.p5.i64(ptr addrspace(1) nocapture writeonly, ptr addrspace(5) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p3.p0.i64(ptr addrspace(3) nocapture writeonly, ptr addrspace(0) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p3.p1.i64(ptr addrspace(3) nocapture writeonly, ptr addrspace(1) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p3.p3.i64(ptr addrspace(3) nocapture writeonly, ptr addrspace(3) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p3.p4.i64(ptr addrspace(3) nocapture writeonly, ptr addrspace(4) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p3.p5.i64(ptr addrspace(3) nocapture writeonly, ptr addrspace(5) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p5.p0.i64(ptr addrspace(5) nocapture writeonly, ptr addrspace(0) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p5.p1.i64(ptr addrspace(5) nocapture writeonly, ptr addrspace(1) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p5.p3.i64(ptr addrspace(5) nocapture writeonly, ptr addrspace(3) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p5.p4.i64(ptr addrspace(5) nocapture writeonly, ptr addrspace(4) nocapture readonly, i64, i1 immarg) #2
+declare void @llvm.memmove.p5.p5.i64(ptr addrspace(5) nocapture writeonly, ptr addrspace(5) nocapture readonly, i64, i1 immarg) #2
+
+attributes #0 = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
+
diff --git a/llvm/test/CodeGen/AMDGPU/memmove-scalar-load.ll b/llvm/test/CodeGen/AMDGPU/memmove-scalar-load.ll
new file mode 100644
index 0000000000000..1b8483a54bb3b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/memmove-scalar-load.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 %s -o - | FileCheck %s
+
+; Testing codegen for memmove with scalar reads.
+
+
+define void @memmove_p1_p4_sz16_align_4_4(ptr addrspace(1) align 4 %dst, ptr addrspace(4) align 4 readonly inreg %src) {
+; CHECK-LABEL: memmove_p1_p4_sz16_align_4_4:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v2, s4
+; CHECK-NEXT: v_mov_b32_e32 v3, s5
+; CHECK-NEXT: v_mov_b32_e32 v4, s6
+; CHECK-NEXT: v_mov_b32_e32 v5, s7
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 4 %dst, ptr addrspace(4) noundef nonnull align 4 %src, i64 16, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz31_align_4_4(ptr addrspace(1) align 4 %dst, ptr addrspace(4) align 4 readonly inreg %src) {
+; CHECK-LABEL: memmove_p1_p4_sz31_align_4_4:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: global_load_ubyte v9, v2, s[4:5] offset:30
+; CHECK-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v2, s4
+; CHECK-NEXT: v_mov_b32_e32 v3, s5
+; CHECK-NEXT: v_mov_b32_e32 v4, s6
+; CHECK-NEXT: v_mov_b32_e32 v5, s7
+; CHECK-NEXT: v_mov_b32_e32 v10, s11
+; CHECK-NEXT: v_mov_b32_e32 v6, s8
+; CHECK-NEXT: v_mov_b32_e32 v7, s9
+; CHECK-NEXT: v_mov_b32_e32 v8, s10
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
+; CHECK-NEXT: global_store_short v[0:1], v10, off offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_store_byte v[0:1], v9, off offset:30
+; CHECK-NEXT: global_store_dwordx3 v[0:1], v[6:8], off offset:16
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 4 %dst, ptr addrspace(4) noundef nonnull align 4 %src, i64 31, i1 false)
+ ret void
+}
+
+define void @memmove_p1_p4_sz32_align_4_4(ptr addrspace(1) align 4 %dst, ptr addrspace(4) align 4 readonly inreg %src) {
+; CHECK-LABEL: memmove_p1_p4_sz32_align_4_4:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v2, s8
+; CHECK-NEXT: v_mov_b32_e32 v3, s9
+; CHECK-NEXT: v_mov_b32_e32 v4, s10
+; CHECK-NEXT: v_mov_b32_e32 v5, s11
+; CHECK-NEXT: v_mov_b32_e32 v9, s7
+; CHECK-NEXT: v_mov_b32_e32 v8, s6
+; CHECK-NEXT: v_mov_b32_e32 v7, s5
+; CHECK-NEXT: v_mov_b32_e32 v6, s4
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:16
+; CHECK-NEXT: global_store_dwordx4 v[0:1], v[6:9], off
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 4 %dst, ptr addrspace(4) noundef nonnull align 4 %src, i64 32, i1 false)
+ ret void
+}
+
+declare void @llvm.memmove.p1.p4.i64(ptr addrspace(1) nocapture writeonly, ptr addrspace(4) nocapture readonly, i64, i1 immarg) #2
+
+attributes #0 = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
+
>From 690ecee40cd2f3d6655af09272c89a2ea15ada60 Mon Sep 17 00:00:00 2001
From: Chen Zheng <czhengsz at cn.ibm.com>
Date: Wed, 3 Jul 2024 02:56:50 -0400
Subject: [PATCH 075/246] remove the symlink adding by mistake, NFC.
---
llvm/tools/clang | 1 -
1 file changed, 1 deletion(-)
delete mode 120000 llvm/tools/clang
diff --git a/llvm/tools/clang b/llvm/tools/clang
deleted file mode 120000
index 7700edcd10231..0000000000000
--- a/llvm/tools/clang
+++ /dev/null
@@ -1 +0,0 @@
-../../clang
\ No newline at end of file
>From c5b67dde981d81f9bbd9a6328d5c1eb4297ed3b4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Thorsten=20Sch=C3=BCtt?= <schuett at gmail.com>
Date: Wed, 3 Jul 2024 09:19:40 +0200
Subject: [PATCH 076/246] [GlobalIsel][NFC] Modernize UBFX combine (#97513)
Credits: https://reviews.llvm.org/D99283
---
.../include/llvm/Target/GlobalISel/Combine.td | 5 +-
.../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 12 +-
.../form-bitfield-extract-from-and.mir | 182 ++++++++++--------
3 files changed, 110 insertions(+), 89 deletions(-)
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index b0789fca630e8..3ef0636ebf1c7 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1026,7 +1026,10 @@ def and_or_disjoint_mask : GICombineRule<
def bitfield_extract_from_and : GICombineRule<
(defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_AND):$root,
+ (match (G_CONSTANT $mask, $imm2),
+ (G_CONSTANT $lsb, $imm1),
+ (G_LSHR $shift, $x, $lsb),
+ (G_AND $root, $shift, $mask):$root,
[{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
(apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 945bd8bab1648..ef2ef1e0ffb04 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -4521,19 +4521,21 @@ bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
}
/// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
-bool CombinerHelper::matchBitfieldExtractFromAnd(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
- assert(MI.getOpcode() == TargetOpcode::G_AND);
- Register Dst = MI.getOperand(0).getReg();
+bool CombinerHelper::matchBitfieldExtractFromAnd(MachineInstr &MI,
+ BuildFnTy &MatchInfo) {
+ GAnd *And = cast<GAnd>(&MI);
+ Register Dst = And->getReg(0);
LLT Ty = MRI.getType(Dst);
LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
+ // Note that isLegalOrBeforeLegalizer is stricter and does not take custom
+ // into account.
if (LI && !LI->isLegalOrCustom({TargetOpcode::G_UBFX, {Ty, ExtractTy}}))
return false;
int64_t AndImm, LSBImm;
Register ShiftSrc;
const unsigned Size = Ty.getScalarSizeInBits();
- if (!mi_match(MI.getOperand(0).getReg(), MRI,
+ if (!mi_match(And->getReg(0), MRI,
m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(LSBImm))),
m_ICst(AndImm))))
return false;
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir
index 141e6c4d47038..16e84a6c1af80 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir
@@ -6,6 +6,10 @@
#
# and (lshr x, cst), mask -> ubfx x, cst, width
+# LSB = 5
+# Width = LSB + trailing_ones(255) - 1 =
+# 5 + 8 - 1 = 12
+
...
---
name: ubfx_s32
@@ -15,18 +19,16 @@ body: |
bb.0:
liveins: $w0
- ; LSB = 5
- ; Width = LSB + trailing_ones(255) - 1 =
- ; 5 + 8 - 1 = 12
; CHECK-LABEL: name: ubfx_s32
; CHECK: liveins: $w0
- ; CHECK: %x:_(s32) = COPY $w0
- ; CHECK: %lsb:_(s32) = G_CONSTANT i32 5
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CHECK: %and:_(s32) = G_UBFX %x, %lsb(s32), [[C]]
- ; CHECK: $w0 = COPY %and(s32)
- ; CHECK: RET_ReallyLR implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 5
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; CHECK-NEXT: %and:_(s32) = G_UBFX %x, %lsb(s32), [[C]]
+ ; CHECK-NEXT: $w0 = COPY %and(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
%x:_(s32) = COPY $w0
%lsb:_(s32) = G_CONSTANT i32 5
%mask:_(s32) = G_CONSTANT i32 255
@@ -35,6 +37,10 @@ body: |
$w0 = COPY %and
RET_ReallyLR implicit $w0
+# LSB = 5
+# Width = LSB + trailing_ones(1) - 1 =
+# 5 + 1 - 1 = 5
+
...
---
name: ubfx_s64
@@ -44,18 +50,15 @@ body: |
bb.0:
liveins: $x0
- ; LSB = 5
- ; Width = LSB + trailing_ones(1) - 1 =
- ; 5 + 1 - 1 = 5
-
; CHECK-LABEL: name: ubfx_s64
; CHECK: liveins: $x0
- ; CHECK: %x:_(s64) = COPY $x0
- ; CHECK: %lsb:_(s64) = G_CONSTANT i64 5
- ; CHECK: %mask:_(s64) = G_CONSTANT i64 1
- ; CHECK: %and:_(s64) = G_UBFX %x, %lsb(s64), %mask
- ; CHECK: $x0 = COPY %and(s64)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %lsb:_(s64) = G_CONSTANT i64 5
+ ; CHECK-NEXT: %mask:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: %and:_(s64) = G_UBFX %x, %lsb(s64), %mask
+ ; CHECK-NEXT: $x0 = COPY %and(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%x:_(s64) = COPY $x0
%lsb:_(s64) = G_CONSTANT i64 5
%mask:_(s64) = G_CONSTANT i64 1
@@ -64,6 +67,8 @@ body: |
$x0 = COPY %and
RET_ReallyLR implicit $x0
+# UBFX needs to be selected to UBFMWri/UBFMXri, so we need constants.
+
...
---
name: dont_combine_no_and_cst
@@ -73,17 +78,17 @@ body: |
bb.0:
liveins: $w0, $w1
- ; UBFX needs to be selected to UBFMWri/UBFMXri, so we need constants.
; CHECK-LABEL: name: dont_combine_no_and_cst
; CHECK: liveins: $w0, $w1
- ; CHECK: %x:_(s32) = COPY $w0
- ; CHECK: %y:_(s32) = COPY $w1
- ; CHECK: %lsb:_(s32) = G_CONSTANT i32 5
- ; CHECK: %shift:_(s32) = G_LSHR %x, %lsb(s32)
- ; CHECK: %and:_(s32) = G_AND %shift, %y
- ; CHECK: $w0 = COPY %and(s32)
- ; CHECK: RET_ReallyLR implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 5
+ ; CHECK-NEXT: %shift:_(s32) = G_LSHR %x, %lsb(s32)
+ ; CHECK-NEXT: %and:_(s32) = G_AND %shift, %y
+ ; CHECK-NEXT: $w0 = COPY %and(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
%x:_(s32) = COPY $w0
%y:_(s32) = COPY $w1
%lsb:_(s32) = G_CONSTANT i32 5
@@ -102,13 +107,14 @@ body: |
liveins: $w0
; CHECK-LABEL: name: dont_combine_and_cst_not_mask
; CHECK: liveins: $w0
- ; CHECK: %x:_(s32) = COPY $w0
- ; CHECK: %lsb:_(s32) = G_CONSTANT i32 5
- ; CHECK: %not_a_mask:_(s32) = G_CONSTANT i32 2
- ; CHECK: %shift:_(s32) = G_LSHR %x, %lsb(s32)
- ; CHECK: %and:_(s32) = G_AND %shift, %not_a_mask
- ; CHECK: $w0 = COPY %and(s32)
- ; CHECK: RET_ReallyLR implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 5
+ ; CHECK-NEXT: %not_a_mask:_(s32) = G_CONSTANT i32 2
+ ; CHECK-NEXT: %shift:_(s32) = G_LSHR %x, %lsb(s32)
+ ; CHECK-NEXT: %and:_(s32) = G_AND %shift, %not_a_mask
+ ; CHECK-NEXT: $w0 = COPY %and(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
%x:_(s32) = COPY $w0
%lsb:_(s32) = G_CONSTANT i32 5
%not_a_mask:_(s32) = G_CONSTANT i32 2
@@ -127,14 +133,15 @@ body: |
liveins: $x0
; CHECK-LABEL: name: dont_combine_shift_more_than_one_use
; CHECK: liveins: $x0
- ; CHECK: %x:_(s64) = COPY $x0
- ; CHECK: %lsb:_(s64) = G_CONSTANT i64 5
- ; CHECK: %mask:_(s64) = G_CONSTANT i64 1
- ; CHECK: %shift:_(s64) = G_LSHR %x, %lsb(s64)
- ; CHECK: %and:_(s64) = G_AND %shift, %mask
- ; CHECK: %sub:_(s64) = G_SUB %and, %shift
- ; CHECK: $x0 = COPY %sub(s64)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %lsb:_(s64) = G_CONSTANT i64 5
+ ; CHECK-NEXT: %mask:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: %shift:_(s64) = G_LSHR %x, %lsb(s64)
+ ; CHECK-NEXT: %and:_(s64) = G_AND %shift, %mask
+ ; CHECK-NEXT: %sub:_(s64) = G_SUB %and, %shift
+ ; CHECK-NEXT: $x0 = COPY %sub(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%x:_(s64) = COPY $x0
%lsb:_(s64) = G_CONSTANT i64 5
%mask:_(s64) = G_CONSTANT i64 1
@@ -144,6 +151,8 @@ body: |
$x0 = COPY %sub
RET_ReallyLR implicit $x0
+# LSB must be in [0, reg_size)
+
...
---
name: dont_combine_negative_lsb
@@ -153,17 +162,17 @@ body: |
bb.0:
liveins: $w0
- ; LSB must be in [0, reg_size)
; CHECK-LABEL: name: dont_combine_negative_lsb
; CHECK: liveins: $w0
- ; CHECK: %x:_(s32) = COPY $w0
- ; CHECK: %negative:_(s32) = G_CONSTANT i32 -1
- ; CHECK: %mask:_(s32) = G_CONSTANT i32 255
- ; CHECK: %shift:_(s32) = G_LSHR %x, %negative(s32)
- ; CHECK: %and:_(s32) = G_AND %shift, %mask
- ; CHECK: $w0 = COPY %and(s32)
- ; CHECK: RET_ReallyLR implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %negative:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: %mask:_(s32) = G_CONSTANT i32 255
+ ; CHECK-NEXT: %shift:_(s32) = G_LSHR %x, %negative(s32)
+ ; CHECK-NEXT: %and:_(s32) = G_AND %shift, %mask
+ ; CHECK-NEXT: $w0 = COPY %and(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
%x:_(s32) = COPY $w0
%negative:_(s32) = G_CONSTANT i32 -1
%mask:_(s32) = G_CONSTANT i32 255
@@ -172,6 +181,8 @@ body: |
$w0 = COPY %and
RET_ReallyLR implicit $w0
+# LSB must be in [0, reg_size)
+
...
---
name: dont_combine_lsb_too_large
@@ -181,17 +192,17 @@ body: |
bb.0:
liveins: $w0
- ; LSB must be in [0, reg_size)
; CHECK-LABEL: name: dont_combine_lsb_too_large
; CHECK: liveins: $w0
- ; CHECK: %x:_(s32) = COPY $w0
- ; CHECK: %too_large:_(s32) = G_CONSTANT i32 32
- ; CHECK: %mask:_(s32) = G_CONSTANT i32 255
- ; CHECK: %shift:_(s32) = G_LSHR %x, %too_large(s32)
- ; CHECK: %and:_(s32) = G_AND %shift, %mask
- ; CHECK: $w0 = COPY %and(s32)
- ; CHECK: RET_ReallyLR implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %too_large:_(s32) = G_CONSTANT i32 32
+ ; CHECK-NEXT: %mask:_(s32) = G_CONSTANT i32 255
+ ; CHECK-NEXT: %shift:_(s32) = G_LSHR %x, %too_large(s32)
+ ; CHECK-NEXT: %and:_(s32) = G_AND %shift, %mask
+ ; CHECK-NEXT: $w0 = COPY %and(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
%x:_(s32) = COPY $w0
%too_large:_(s32) = G_CONSTANT i32 32
%mask:_(s32) = G_CONSTANT i32 255
@@ -210,15 +221,16 @@ body: |
liveins: $d0
; CHECK-LABEL: name: dont_combine_vector
; CHECK: liveins: $d0
- ; CHECK: %x:_(<2 x s32>) = COPY $d0
- ; CHECK: %lsb_cst:_(s32) = G_CONSTANT i32 5
- ; CHECK: %lsb:_(<2 x s32>) = G_BUILD_VECTOR %lsb_cst(s32), %lsb_cst(s32)
- ; CHECK: %mask_cst:_(s32) = G_CONSTANT i32 255
- ; CHECK: %mask:_(<2 x s32>) = G_BUILD_VECTOR %mask_cst(s32), %mask_cst(s32)
- ; CHECK: %shift:_(<2 x s32>) = G_LSHR %x, %lsb(<2 x s32>)
- ; CHECK: %and:_(<2 x s32>) = G_AND %shift, %mask
- ; CHECK: $d0 = COPY %and(<2 x s32>)
- ; CHECK: RET_ReallyLR implicit $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $d0
+ ; CHECK-NEXT: %lsb_cst:_(s32) = G_CONSTANT i32 5
+ ; CHECK-NEXT: %lsb:_(<2 x s32>) = G_BUILD_VECTOR %lsb_cst(s32), %lsb_cst(s32)
+ ; CHECK-NEXT: %mask_cst:_(s32) = G_CONSTANT i32 255
+ ; CHECK-NEXT: %mask:_(<2 x s32>) = G_BUILD_VECTOR %mask_cst(s32), %mask_cst(s32)
+ ; CHECK-NEXT: %shift:_(<2 x s32>) = G_LSHR %x, %lsb(<2 x s32>)
+ ; CHECK-NEXT: %and:_(<2 x s32>) = G_AND %shift, %mask
+ ; CHECK-NEXT: $d0 = COPY %and(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
%x:_(<2 x s32>) = COPY $d0
%lsb_cst:_(s32) = G_CONSTANT i32 5
%lsb:_(<2 x s32>) = G_BUILD_VECTOR %lsb_cst, %lsb_cst
@@ -229,6 +241,9 @@ body: |
$d0 = COPY %and
RET_ReallyLR implicit $d0
+# mask = 0111 1111 1111 ... 1111
+# mask + 1 = 1000 0000 0000 ... 0000
+
...
---
name: max_signed_int_mask
@@ -237,16 +252,15 @@ legalized: true
body: |
bb.0:
liveins: $x0
- ; mask = 0111 1111 1111 ... 1111
- ; mask + 1 = 1000 0000 0000 ... 0000
; CHECK-LABEL: name: max_signed_int_mask
; CHECK: liveins: $x0
- ; CHECK: %x:_(s64) = COPY $x0
- ; CHECK: %lsb:_(s64) = G_CONSTANT i64 0
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
- ; CHECK: %and:_(s64) = G_UBFX %x, %lsb(s64), [[C]]
- ; CHECK: $x0 = COPY %and(s64)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %lsb:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+ ; CHECK-NEXT: %and:_(s64) = G_UBFX %x, %lsb(s64), [[C]]
+ ; CHECK-NEXT: $x0 = COPY %and(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%x:_(s64) = COPY $x0
%lsb:_(s64) = G_CONSTANT i64 0
%mask:_(s64) = G_CONSTANT i64 9223372036854775807
@@ -255,6 +269,9 @@ body: |
$x0 = COPY %and
RET_ReallyLR implicit $x0
+# mask = 1111 1111 1111 ... 1111
+# mask + 1 = 0000 0000 0000 ... 000
+
...
---
name: max_unsigned_int_mask
@@ -263,16 +280,15 @@ legalized: true
body: |
bb.0:
liveins: $x0
- ; mask = 1111 1111 1111 ... 1111
- ; mask + 1 = 0000 0000 0000 ... 000
; CHECK-LABEL: name: max_unsigned_int_mask
; CHECK: liveins: $x0
- ; CHECK: %x:_(s64) = COPY $x0
- ; CHECK: %lsb:_(s64) = G_CONSTANT i64 5
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK: %and:_(s64) = G_UBFX %x, %lsb(s64), [[C]]
- ; CHECK: $x0 = COPY %and(s64)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %lsb:_(s64) = G_CONSTANT i64 5
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+ ; CHECK-NEXT: %and:_(s64) = G_UBFX %x, %lsb(s64), [[C]]
+ ; CHECK-NEXT: $x0 = COPY %and(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%x:_(s64) = COPY $x0
%lsb:_(s64) = G_CONSTANT i64 5
%mask:_(s64) = G_CONSTANT i64 18446744073709551615
>From a707d0883b5ab9de6a0a864df614ef14909a4de1 Mon Sep 17 00:00:00 2001
From: Joachim <jenke at itc.rwth-aachen.de>
Date: Wed, 3 Jul 2024 09:33:19 +0200
Subject: [PATCH 077/246] [OpenMP][OMPT] Indicate loop schedule for
worksharing-loop events (#97429)
Use more specific values from `ompt_work_t` to allow the tool identify
the schedule of a worksharing-loop. With this patch, the runtime will
report the schedule chosen by the runtime rather than necessarily the
schedule literally requested by the clause.
E.g., for guided + just one iteration per thread, the runtime would
choose and report static.
Fixes issue #63904
---
openmp/runtime/src/kmp_csupport.cpp | 4 +-
openmp/runtime/src/kmp_dispatch.cpp | 9 +-
openmp/runtime/src/kmp_sched.cpp | 4 +-
openmp/runtime/src/ompt-specific.h | 19 ++
openmp/runtime/test/ompt/callback.h | 183 +++++-------------
.../synchronization/ordered_dependences.c | 10 +-
openmp/runtime/test/ompt/tasks/taskloop.c | 4 +-
.../test/ompt/tasks/taskloop_dispatch.c | 2 +-
.../test/ompt/teams/distribute_dispatch.c | 8 +-
.../runtime/test/ompt/worksharing/for/auto.c | 3 +
.../ompt/worksharing/for/auto_serialized.c | 3 +
.../test/ompt/worksharing/for/auto_split.c | 3 +
.../runtime/test/ompt/worksharing/for/base.h | 32 +--
.../ompt/worksharing/for/base_serialized.h | 21 +-
.../test/ompt/worksharing/for/base_split.h | 35 ++--
.../ompt/worksharing/for/guided_serialized.c | 3 +
.../test/ompt/worksharing/for/loop_dispatch.c | 48 ++---
.../test/ompt/worksharing/for/runtime.c | 3 +
.../ompt/worksharing/for/runtime_serialized.c | 3 +
.../test/ompt/worksharing/for/runtime_split.c | 3 +
.../runtime/test/ompt/worksharing/sections.c | 6 +-
.../test/ompt/worksharing/sections_dispatch.c | 4 +-
openmp/runtime/test/ompt/worksharing/single.c | 4 +-
23 files changed, 199 insertions(+), 215 deletions(-)
diff --git a/openmp/runtime/src/kmp_csupport.cpp b/openmp/runtime/src/kmp_csupport.cpp
index d638acd85116d..b33c16fa79a65 100644
--- a/openmp/runtime/src/kmp_csupport.cpp
+++ b/openmp/runtime/src/kmp_csupport.cpp
@@ -2006,13 +2006,13 @@ void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid) {
#if OMPT_SUPPORT && OMPT_OPTIONAL
if (ompt_enabled.ompt_callback_work) {
- ompt_work_t ompt_work_type = ompt_work_loop;
+ ompt_work_t ompt_work_type = ompt_work_loop_static;
ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
// Determine workshare type
if (loc != NULL) {
if ((loc->flags & KMP_IDENT_WORK_LOOP) != 0) {
- ompt_work_type = ompt_work_loop;
+ ompt_work_type = ompt_work_loop_static;
} else if ((loc->flags & KMP_IDENT_WORK_SECTIONS) != 0) {
ompt_work_type = ompt_work_sections;
} else if ((loc->flags & KMP_IDENT_WORK_DISTRIBUTE) != 0) {
diff --git a/openmp/runtime/src/kmp_dispatch.cpp b/openmp/runtime/src/kmp_dispatch.cpp
index 8475e80f0b02d..3b4a1f34df040 100644
--- a/openmp/runtime/src/kmp_dispatch.cpp
+++ b/openmp/runtime/src/kmp_dispatch.cpp
@@ -1164,8 +1164,9 @@ __kmp_dispatch_init(ident_t *loc, int gtid, enum sched_type schedule, T lb,
ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
ompt_callbacks.ompt_callback(ompt_callback_work)(
- ompt_work_loop, ompt_scope_begin, &(team_info->parallel_data),
- &(task_info->task_data), pr->u.p.tc, OMPT_LOAD_RETURN_ADDRESS(gtid));
+ ompt_get_work_schedule(pr->schedule), ompt_scope_begin,
+ &(team_info->parallel_data), &(task_info->task_data), pr->u.p.tc,
+ OMPT_LOAD_RETURN_ADDRESS(gtid));
}
#endif
KMP_PUSH_PARTITIONED_TIMER(OMP_loop_dynamic);
@@ -2121,8 +2122,8 @@ int __kmp_dispatch_next_algorithm(int gtid,
ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL); \
ompt_task_info_t *task_info = __ompt_get_task_info_object(0); \
ompt_callbacks.ompt_callback(ompt_callback_work)( \
- ompt_work_loop, ompt_scope_end, &(team_info->parallel_data), \
- &(task_info->task_data), 0, codeptr); \
+ ompt_get_work_schedule(pr->schedule), ompt_scope_end, \
+ &(team_info->parallel_data), &(task_info->task_data), 0, codeptr); \
} \
}
#define OMPT_LOOP_DISPATCH(lb, ub, st, status) \
diff --git a/openmp/runtime/src/kmp_sched.cpp b/openmp/runtime/src/kmp_sched.cpp
index 4d764e441f283..2e0dfac6eeb3b 100644
--- a/openmp/runtime/src/kmp_sched.cpp
+++ b/openmp/runtime/src/kmp_sched.cpp
@@ -103,7 +103,7 @@ static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
#if OMPT_SUPPORT && OMPT_OPTIONAL
ompt_team_info_t *team_info = NULL;
ompt_task_info_t *task_info = NULL;
- ompt_work_t ompt_work_type = ompt_work_loop;
+ ompt_work_t ompt_work_type = ompt_work_loop_static;
static kmp_int8 warn = 0;
@@ -114,7 +114,7 @@ static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
// Determine workshare type
if (loc != NULL) {
if ((loc->flags & KMP_IDENT_WORK_LOOP) != 0) {
- ompt_work_type = ompt_work_loop;
+ ompt_work_type = ompt_work_loop_static;
} else if ((loc->flags & KMP_IDENT_WORK_SECTIONS) != 0) {
ompt_work_type = ompt_work_sections;
} else if ((loc->flags & KMP_IDENT_WORK_DISTRIBUTE) != 0) {
diff --git a/openmp/runtime/src/ompt-specific.h b/openmp/runtime/src/ompt-specific.h
index 63c59c3fb3984..7864ed6126c70 100644
--- a/openmp/runtime/src/ompt-specific.h
+++ b/openmp/runtime/src/ompt-specific.h
@@ -130,6 +130,25 @@ inline const char *ompt_get_runtime_version() {
return &__kmp_version_lib_ver[KMP_VERSION_MAGIC_LEN];
}
+inline ompt_work_t ompt_get_work_schedule(enum sched_type schedule) {
+ switch (SCHEDULE_WITHOUT_MODIFIERS(schedule)) {
+ case kmp_sch_static_chunked:
+ case kmp_sch_static_balanced:
+ case kmp_sch_static_greedy:
+ return ompt_work_loop_static;
+ case kmp_sch_dynamic_chunked:
+ case kmp_sch_static_steal:
+ return ompt_work_loop_dynamic;
+ case kmp_sch_guided_iterative_chunked:
+ case kmp_sch_guided_analytical_chunked:
+ case kmp_sch_guided_chunked:
+ case kmp_sch_guided_simd:
+ return ompt_work_loop_guided;
+ default:
+ return ompt_work_loop_other;
+ }
+}
+
class OmptReturnAddressGuard {
private:
bool SetAddress{false};
diff --git a/openmp/runtime/test/ompt/callback.h b/openmp/runtime/test/ompt/callback.h
index f07176cc5be66..c1093141e9126 100644
--- a/openmp/runtime/test/ompt/callback.h
+++ b/openmp/runtime/test/ompt/callback.h
@@ -47,6 +47,36 @@ static const char* ompt_cancel_flag_t_values[] = {
"ompt_cancel_discarded_task"
};
+static const char *ompt_work_t_values[] = {"undefined",
+ "ompt_work_loop",
+ "ompt_work_sections",
+ "ompt_work_single_executor",
+ "ompt_work_single_other",
+ "ompt_work_workshare",
+ "ompt_work_distribute",
+ "ompt_work_taskloop",
+ "ompt_work_scope",
+ "ompt_work_workdistribute",
+ "ompt_work_loop_static",
+ "ompt_work_loop_dynamic",
+ "ompt_work_loop_guided",
+ "ompt_work_loop_other"};
+
+static const char *ompt_work_events_t_values[] = {"undefined",
+ "ompt_event_loop",
+ "ompt_event_sections",
+ "ompt_event_single_in_block",
+ "ompt_event_single_others",
+ "ompt_event_workshare",
+ "ompt_event_distribute",
+ "ompt_event_taskloop",
+ "ompt_event_scope",
+ "ompt_event_workdistribute",
+ "ompt_event_loop_static",
+ "ompt_event_loop_dynamic",
+ "ompt_event_loop_guided",
+ "ompt_event_loop_other"};
+
static const char *ompt_dependence_type_t_values[36] = {
"ompt_dependence_type_UNDEFINED",
"ompt_dependence_type_in", // 1
@@ -852,144 +882,21 @@ on_ompt_callback_work(
{
switch(endpoint)
{
- case ompt_scope_begin:
- switch(wstype)
- {
- case ompt_work_loop:
- case ompt_work_loop_static:
- case ompt_work_loop_dynamic:
- case ompt_work_loop_guided:
- case ompt_work_loop_other:
- // TODO: add schedule attribute for the different work_loop types.
- // e.g., ", schedule=%s", ..., ompt_schedule_values[wstype]
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_loop_begin: parallel_id=%" PRIu64
- ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
- "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_sections:
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_sections_begin: parallel_id=%" PRIu64
- ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
- "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_single_executor:
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_single_in_block_begin: parallel_id=%" PRIu64
- ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
- "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_single_other:
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_single_others_begin: parallel_id=%" PRIu64
- ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_workshare:
- //impl
- break;
- case ompt_work_distribute:
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_distribute_begin: parallel_id=%" PRIu64
- ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
- "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_taskloop:
- //impl
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_taskloop_begin: parallel_id=%" PRIu64
- ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
- "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_scope:
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_scope_begin: parallel_id=%" PRIu64
- ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
- "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- }
- break;
- case ompt_scope_end:
- switch(wstype)
- {
- case ompt_work_loop:
- case ompt_work_loop_static:
- case ompt_work_loop_dynamic:
- case ompt_work_loop_guided:
- case ompt_work_loop_other:
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_loop_end: parallel_id=%" PRIu64
- ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_sections:
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_sections_end: parallel_id=%" PRIu64
- ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_single_executor:
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_single_in_block_end: parallel_id=%" PRIu64
- ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_single_other:
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_single_others_end: parallel_id=%" PRIu64
- ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_workshare:
- //impl
- break;
- case ompt_work_distribute:
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_distribute_end: parallel_id=%" PRIu64
- ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
- "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_taskloop:
- //impl
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_taskloop_end: parallel_id=%" PRIu64
- ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
- "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- case ompt_work_scope:
- printf("%" PRIu64 ":" _TOOL_PREFIX
- " ompt_event_scope_end: parallel_id=%" PRIu64
- ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
- "\n",
- ompt_get_thread_data()->value, parallel_data->value,
- task_data->value, codeptr_ra, count);
- break;
- }
- break;
- case ompt_scope_beginend:
- printf("ompt_scope_beginend should never be passed to %s\n", __func__);
- exit(-1);
+ case ompt_scope_begin:
+ printf("%" PRIu64 ":" _TOOL_PREFIX " %s_begin: parallel_id=%" PRIu64
+ ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
+ ompt_get_thread_data()->value, ompt_work_events_t_values[wstype],
+ parallel_data->value, task_data->value, codeptr_ra, count);
+ break;
+ case ompt_scope_end:
+ printf("%" PRIu64 ":" _TOOL_PREFIX " %s_end: parallel_id=%" PRIu64
+ ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
+ ompt_get_thread_data()->value, ompt_work_events_t_values[wstype],
+ parallel_data->value, task_data->value, codeptr_ra, count);
+ break;
+ case ompt_scope_beginend:
+ printf("ompt_scope_beginend should never be passed to %s\n", __func__);
+ exit(-1);
}
}
diff --git a/openmp/runtime/test/ompt/synchronization/ordered_dependences.c b/openmp/runtime/test/ompt/synchronization/ordered_dependences.c
index 5b7d56599f0e1..0eb64543d2a64 100644
--- a/openmp/runtime/test/ompt/synchronization/ordered_dependences.c
+++ b/openmp/runtime/test/ompt/synchronization/ordered_dependences.c
@@ -7,7 +7,7 @@
int main() {
int a[10][10];
#pragma omp parallel num_threads(2)
-#pragma omp for ordered(2)
+#pragma omp for ordered(2) schedule(static)
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++) {
a[i][j] = i + j + 1;
@@ -23,8 +23,8 @@ int main() {
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
-// CHECK: {{^}}[[MASTER:[0-9]+]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id={{[0-9]+}}, parent_task_id=[[ITASK:[0-9]+]],
+// CHECK: {{^}}[[MASTER:[0-9]+]]: ompt_event_loop_static_begin:
+// CHECK-SAME: parallel_id={{[0-9]+}}, task_id=[[ITASK:[0-9]+]],
// CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_source), (0,
@@ -38,8 +38,8 @@ int main() {
// CHECK-SAME: deps=[(0, ompt_dependence_type_source), (1,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
-// CHECK: {{^}}[[WORKER:[0-9]+]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id={{[0-9]+}}, parent_task_id=[[ITASK:[0-9]+]],
+// CHECK: {{^}}[[WORKER:[0-9]+]]: ompt_event_loop_static_begin:
+// CHECK-SAME: parallel_id={{[0-9]+}}, task_id=[[ITASK:[0-9]+]],
// CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_sink), (0,
diff --git a/openmp/runtime/test/ompt/tasks/taskloop.c b/openmp/runtime/test/ompt/tasks/taskloop.c
index af7f778c976ae..8489f8e0ed6dc 100644
--- a/openmp/runtime/test/ompt/tasks/taskloop.c
+++ b/openmp/runtime/test/ompt/tasks/taskloop.c
@@ -37,7 +37,7 @@ int main() {
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID1]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskloop_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]]
- // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]]
+ // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID1]]
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]], count=2
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]]
@@ -52,7 +52,7 @@ int main() {
// CHECK-NOT: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskloop_end:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]]
- // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]]
+ // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID1]]
// CHECK-SAME: count=2
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_wait_taskgroup_begin:
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_taskgroup_end:
diff --git a/openmp/runtime/test/ompt/tasks/taskloop_dispatch.c b/openmp/runtime/test/ompt/tasks/taskloop_dispatch.c
index d4ac609d0b3ed..84e8a71fc9933 100644
--- a/openmp/runtime/test/ompt/tasks/taskloop_dispatch.c
+++ b/openmp/runtime/test/ompt/tasks/taskloop_dispatch.c
@@ -32,7 +32,7 @@ int main() {
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskloop_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]]
- // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]]
+ // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID1]]
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]], count=16
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
diff --git a/openmp/runtime/test/ompt/teams/distribute_dispatch.c b/openmp/runtime/test/ompt/teams/distribute_dispatch.c
index 3254a287236fd..1dcede84f28ec 100644
--- a/openmp/runtime/test/ompt/teams/distribute_dispatch.c
+++ b/openmp/runtime/test/ompt/teams/distribute_dispatch.c
@@ -24,28 +24,28 @@ int main() {
// CHECK: {{^}}[[THREAD_ID0:[0-9]+]]: ompt_event_distribute_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID0:[0-9]+]]
-// CHECK-SAME: parent_task_id=[[TASK_ID0:[0-9]+]]
+// CHECK-SAME: task_id=[[TASK_ID0:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_distribute_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID0]], task_id=[[TASK_ID0]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
// CHECK: {{^}}[[THREAD_ID1:[0-9]+]]: ompt_event_distribute_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID1:[0-9]+]]
-// CHECK-SAME: parent_task_id=[[TASK_ID1:[0-9]+]]
+// CHECK-SAME: task_id=[[TASK_ID1:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID1]]: ompt_event_distribute_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID1]], task_id=[[TASK_ID1]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
// CHECK: {{^}}[[THREAD_ID2:[0-9]+]]: ompt_event_distribute_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID2:[0-9]+]]
-// CHECK-SAME: parent_task_id=[[TASK_ID2:[0-9]+]]
+// CHECK-SAME: task_id=[[TASK_ID2:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID2]]: ompt_event_distribute_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID2]], task_id=[[TASK_ID2]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
// CHECK: {{^}}[[THREAD_ID3:[0-9]+]]: ompt_event_distribute_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID3:[0-9]+]]
-// CHECK-SAME: parent_task_id=[[TASK_ID3:[0-9]+]]
+// CHECK-SAME: task_id=[[TASK_ID3:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID3]]: ompt_event_distribute_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID3]], task_id=[[TASK_ID3]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
diff --git a/openmp/runtime/test/ompt/worksharing/for/auto.c b/openmp/runtime/test/ompt/worksharing/for/auto.c
index 17d26f5e996b7..3ce66d511ab3c 100644
--- a/openmp/runtime/test/ompt/worksharing/for/auto.c
+++ b/openmp/runtime/test/ompt/worksharing/for/auto.c
@@ -4,4 +4,7 @@
// XFAIL: gcc
#define SCHEDULE auto
+// The runtime uses guided schedule for auto,
+// which is a reason choice
+#define SCHED_OUTPUT "guided"
#include "base.h"
diff --git a/openmp/runtime/test/ompt/worksharing/for/auto_serialized.c b/openmp/runtime/test/ompt/worksharing/for/auto_serialized.c
index f756166fe9826..400fcc8f3a78f 100644
--- a/openmp/runtime/test/ompt/worksharing/for/auto_serialized.c
+++ b/openmp/runtime/test/ompt/worksharing/for/auto_serialized.c
@@ -4,4 +4,7 @@
// XFAIL: gcc
#define SCHEDULE auto
+// The runtime uses static schedule for serialized loop,
+// which is a reason choice
+#define SCHED_OUTPUT "static"
#include "base_serialized.h"
diff --git a/openmp/runtime/test/ompt/worksharing/for/auto_split.c b/openmp/runtime/test/ompt/worksharing/for/auto_split.c
index d82e3fd1ef62a..c7b07d7407bad 100644
--- a/openmp/runtime/test/ompt/worksharing/for/auto_split.c
+++ b/openmp/runtime/test/ompt/worksharing/for/auto_split.c
@@ -5,4 +5,7 @@
// XFAIL: gcc
#define SCHEDULE auto
+// The runtime uses guided schedule for auto,
+// which is a reason choice
+#define SCHED_OUTPUT "guided"
#include "base_split.h"
diff --git a/openmp/runtime/test/ompt/worksharing/for/base.h b/openmp/runtime/test/ompt/worksharing/for/base.h
index 8a496d92083de..8edc894162f20 100644
--- a/openmp/runtime/test/ompt/worksharing/for/base.h
+++ b/openmp/runtime/test/ompt/worksharing/for/base.h
@@ -1,43 +1,53 @@
#include "callback.h"
#include <omp.h>
+#include <stdio.h>
+
+#define STRINGIFY(x) #x
+#define STR(x) STRINGIFY(x)
+#ifndef SCHED_OUTPUT
+#define SCHED_OUTPUT STR(SCHEDULE)
+#endif
int main()
{
unsigned int i;
+ printf("0: Schedule: " SCHED_OUTPUT "\n");
- #pragma omp parallel for num_threads(4) schedule(SCHEDULE)
- for (i = 0; i < 4; i++) {
+#pragma omp parallel for num_threads(4) schedule(SCHEDULE)
+ for (i = 0; i < 64; i++) {
}
+ // clang-format off
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_work'
-
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
+ // CHECK: 0: Schedule: [[SCHED:[a-z]+]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=0x{{[0-f]+}}, invoker={{[0-9]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
- // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=
- // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
+ // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_[[SCHED]]_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=
+ // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_[[SCHED]]_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
- // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=
- // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
+ // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_[[SCHED]]_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=
+ // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_[[SCHED]]_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
- // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=
- // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
+ // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_[[SCHED]]_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=
+ // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_[[SCHED]]_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
- // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=
- // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
+ // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_[[SCHED]]_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=
+ // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_[[SCHED]]_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
+ // clang-format on
return 0;
}
diff --git a/openmp/runtime/test/ompt/worksharing/for/base_serialized.h b/openmp/runtime/test/ompt/worksharing/for/base_serialized.h
index 3376b3702eae4..93d6a348553bb 100644
--- a/openmp/runtime/test/ompt/worksharing/for/base_serialized.h
+++ b/openmp/runtime/test/ompt/worksharing/for/base_serialized.h
@@ -1,14 +1,23 @@
#include "callback.h"
#include <omp.h>
+#include <stdio.h>
+
+#define STRINGIFY(x) #x
+#define STR(x) STRINGIFY(x)
+#ifndef SCHED_OUTPUT
+#define SCHED_OUTPUT STR(SCHEDULE)
+#endif
int main()
{
unsigned int i;
+ printf("0: Schedule: " SCHED_OUTPUT "\n");
- #pragma omp parallel for num_threads(1) schedule(SCHEDULE)
- for (i = 0; i < 1; i++) {
+#pragma omp parallel for num_threads(1) schedule(SCHEDULE)
+ for (i = 0; i < 64; i++) {
}
-
+
+ // clang-format off
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
@@ -17,12 +26,14 @@ int main()
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
+ // CHECK: 0: Schedule: [[SCHED:[a-z]+]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=1, codeptr_ra=0x{{[0-f]+}}, invoker={{[0-9]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
- // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=0x{{[0-f]+}}
- // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
+ // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_[[SCHED]]_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=0x{{[0-f]+}}
+ // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_[[SCHED]]_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[PARALLEL_ID,0]}}, task_id=[[IMPLICIT_TASK_ID]]
+ // clang-format on
return 0;
}
diff --git a/openmp/runtime/test/ompt/worksharing/for/base_split.h b/openmp/runtime/test/ompt/worksharing/for/base_split.h
index 0f1fed3c44940..5ba59afc04ab0 100644
--- a/openmp/runtime/test/ompt/worksharing/for/base_split.h
+++ b/openmp/runtime/test/ompt/worksharing/for/base_split.h
@@ -1,5 +1,12 @@
#include "callback.h"
#include <omp.h>
+#include <stdio.h>
+
+#define STRINGIFY(x) #x
+#define STR(x) STRINGIFY(x)
+#ifndef SCHED_OUTPUT
+#define SCHED_OUTPUT STR(SCHEDULE)
+#endif
/* With the combined parallel-for construct (base.h), the return-addresses are hard to compare.
With the separate parallel and for-nowait construct, the addresses become more predictable,
@@ -10,18 +17,20 @@
int main()
{
unsigned int i;
+ printf("0: Schedule: " SCHED_OUTPUT "\n");
- #pragma omp parallel num_threads(4)
+#pragma omp parallel num_threads(4)
{
print_current_address(0);
#pragma omp for schedule(SCHEDULE) nowait
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 64; i++) {
print_fuzzy_address(1);
}
print_fuzzy_address(2);
}
print_fuzzy_address(3);
+ // clang-format off
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
@@ -30,37 +39,39 @@ int main()
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
+ // CHECK: 0: Schedule: [[SCHED:[a-z]+]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[PARALLEL_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker={{[0-9]+}}
- // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id={{[0-9]+}}, codeptr_ra=[[LOOP_BEGIN_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
- // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
+ // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_[[SCHED]]_begin: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_BEGIN_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
+ // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_[[SCHED]]_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[LOOP_END_RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, invoker={{[0-9]+}}, codeptr_ra=[[PARALLEL_RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[PARALLEL_RETURN_ADDRESS]]
- // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
- // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
+ // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_loop_[[SCHED]]_begin: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
+ // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_[[SCHED]]_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[THREAD_ID]]: fuzzy_address={{.*}}[[LOOP_END_RETURN_ADDRESS]]
- // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
- // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
+ // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_loop_[[SCHED]]_begin: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
+ // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_[[SCHED]]_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[THREAD_ID]]: fuzzy_address={{.*}}[[LOOP_END_RETURN_ADDRESS]]
- // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
- // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
+ // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_loop_[[SCHED]]_begin: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
+ // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_[[SCHED]]_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[THREAD_ID]]: fuzzy_address={{.*}}[[LOOP_END_RETURN_ADDRESS]]
// CHECK-LOOP: 0: NULL_POINTER=[[NULL:.*$]]
+ // CHECK-LOOP: 0: Schedule: [[SCHED:[a-z]+]]
// CHECK-LOOP: 0: ompt_event_runtime_shutdown
// CHECK-LOOP: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra={{0x[0-f]+}}, invoker={{[0-9]+}}
- // CHECK-LOOP: {{^}}[[MASTER_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id={{[0-9]+}}, codeptr_ra=[[LOOP_BEGIN_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
+ // CHECK-LOOP: {{^}}[[MASTER_ID]]: ompt_event_loop_[[SCHED]]_begin: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_BEGIN_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-LOOP: {{^}}{{[0-9]+}}: fuzzy_address={{.*}}[[LOOP_BEGIN_RETURN_ADDRESS]]
// CHECK-LOOP: {{^}}{{[0-9]+}}: fuzzy_address={{.*}}[[LOOP_BEGIN_RETURN_ADDRESS]]
// CHECK-LOOP: {{^}}{{[0-9]+}}: fuzzy_address={{.*}}[[LOOP_BEGIN_RETURN_ADDRESS]]
// CHECK-LOOP: {{^}}{{[0-9]+}}: fuzzy_address={{.*}}[[LOOP_BEGIN_RETURN_ADDRESS]]
-
+ // clang-format on
return 0;
}
diff --git a/openmp/runtime/test/ompt/worksharing/for/guided_serialized.c b/openmp/runtime/test/ompt/worksharing/for/guided_serialized.c
index 4b5096d5679ce..a1e239e081d88 100644
--- a/openmp/runtime/test/ompt/worksharing/for/guided_serialized.c
+++ b/openmp/runtime/test/ompt/worksharing/for/guided_serialized.c
@@ -2,4 +2,7 @@
// REQUIRES: ompt
#define SCHEDULE guided
+// The runtime uses static schedule for serialized loop,
+// which is a reason choice
+#define SCHED_OUTPUT "static"
#include "base_serialized.h"
diff --git a/openmp/runtime/test/ompt/worksharing/for/loop_dispatch.c b/openmp/runtime/test/ompt/worksharing/for/loop_dispatch.c
index 2bd8af427f61d..4f10a67bcabb4 100644
--- a/openmp/runtime/test/ompt/worksharing/for/loop_dispatch.c
+++ b/openmp/runtime/test/ompt/worksharing/for/loop_dispatch.c
@@ -56,72 +56,72 @@ int main() {
// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_implicit_task_begin:
// CHECK-SAME: task_id=[[TASK_ID0:[0-9]+]]
-// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID0]]
+// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_static_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]]
// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
-// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID0]]
+// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_dynamic_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]]
// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1
-// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID0]]
+// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_guided_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]]
// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}}
// CHECK: {{^}}[[THREAD_ID1:[0-9]+]]: ompt_event_implicit_task_begin:
// CHECK-SAME: task_id=[[TASK_ID1:[0-9]+]]
-// CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID1]]
+// CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_static_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]]
// CHECK: {{^}}[[THREAD_ID1]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
-// CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID1]]
+// CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_dynamic_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]]
// CHECK: {{^}}[[THREAD_ID1]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1
-// CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID1]]
+// CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_guided_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]]
// CHECK: {{^}}[[THREAD_ID1]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}}
// CHECK: {{^}}[[THREAD_ID2:[0-9]+]]: ompt_event_implicit_task_begin:
// CHECK-SAME: task_id=[[TASK_ID2:[0-9]+]]
-// CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID2]]
+// CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_static_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]]
// CHECK: {{^}}[[THREAD_ID2]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
-// CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID2]]
+// CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_dynamic_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]]
// CHECK: {{^}}[[THREAD_ID2]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1
-// CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID2]]
+// CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_guided_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]]
// CHECK: {{^}}[[THREAD_ID2]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}}
// CHECK: {{^}}[[THREAD_ID3:[0-9]+]]: ompt_event_implicit_task_begin:
// CHECK-SAME: task_id=[[TASK_ID3:[0-9]+]]
-// CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID3]]
+// CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_static_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]]
// CHECK: {{^}}[[THREAD_ID3]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
-// CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID3]]
+// CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_dynamic_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]]
// CHECK: {{^}}[[THREAD_ID3]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1
-// CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_begin:
-// CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID3]]
+// CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_guided_begin:
+// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]]
// CHECK: {{^}}[[THREAD_ID3]]: ompt_event_ws_loop_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}}
diff --git a/openmp/runtime/test/ompt/worksharing/for/runtime.c b/openmp/runtime/test/ompt/worksharing/for/runtime.c
index bcf160faeb898..8c71c8b336246 100644
--- a/openmp/runtime/test/ompt/worksharing/for/runtime.c
+++ b/openmp/runtime/test/ompt/worksharing/for/runtime.c
@@ -2,4 +2,7 @@
// REQUIRES: ompt
#define SCHEDULE runtime
+// Without any schedule specified, the runtime uses static schedule,
+// which is a reason choice
+#define SCHED_OUTPUT "static"
#include "base.h"
diff --git a/openmp/runtime/test/ompt/worksharing/for/runtime_serialized.c b/openmp/runtime/test/ompt/worksharing/for/runtime_serialized.c
index 231d67d91aa9e..38e3a3ca7521c 100644
--- a/openmp/runtime/test/ompt/worksharing/for/runtime_serialized.c
+++ b/openmp/runtime/test/ompt/worksharing/for/runtime_serialized.c
@@ -2,4 +2,7 @@
// REQUIRES: ompt
#define SCHEDULE runtime
+// Without any schedule specified, the runtime uses static schedule,
+// which is a reason choice
+#define SCHED_OUTPUT "static"
#include "base_serialized.h"
diff --git a/openmp/runtime/test/ompt/worksharing/for/runtime_split.c b/openmp/runtime/test/ompt/worksharing/for/runtime_split.c
index 7a677edbfd0e3..5c70908862d7f 100644
--- a/openmp/runtime/test/ompt/worksharing/for/runtime_split.c
+++ b/openmp/runtime/test/ompt/worksharing/for/runtime_split.c
@@ -4,4 +4,7 @@
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#define SCHEDULE runtime
+// Without any schedule specified, the runtime uses static schedule,
+// which is a reason choice
+#define SCHED_OUTPUT "static"
#include "base_split.h"
diff --git a/openmp/runtime/test/ompt/worksharing/sections.c b/openmp/runtime/test/ompt/worksharing/sections.c
index bafb74312ff45..ec4cfdf5f082d 100644
--- a/openmp/runtime/test/ompt/worksharing/sections.c
+++ b/openmp/runtime/test/ompt/worksharing/sections.c
@@ -21,16 +21,18 @@ int main()
}
}
+ // clang-format off
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_work'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
- // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_sections_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], parent_task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[SECT_BEGIN:0x[0-f]+]], count=2
+ // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_sections_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[SECT_BEGIN:0x[0-f]+]], count=2
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_sections_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[SECT_END:0x[0-f]+]]
- // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_sections_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[SECT_BEGIN]], count=2
+ // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_sections_begin: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[SECT_BEGIN]], count=2
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_sections_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[SECT_END]]
+ // clang-format on
return 0;
}
diff --git a/openmp/runtime/test/ompt/worksharing/sections_dispatch.c b/openmp/runtime/test/ompt/worksharing/sections_dispatch.c
index bcf0bd0cde673..ae225ce17d456 100644
--- a/openmp/runtime/test/ompt/worksharing/sections_dispatch.c
+++ b/openmp/runtime/test/ompt/worksharing/sections_dispatch.c
@@ -29,7 +29,7 @@ int main()
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_sections_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
- // CHECK-SAME: parent_task_id=[[TASK_ID:[0-9]+]],
+ // CHECK-SAME: task_id=[[TASK_ID:[0-9]+]],
// CHECK-SAME: codeptr_ra=[[SECT_BEGIN:0x[0-f]+]], count=2
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_section_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]]
@@ -39,7 +39,7 @@ int main()
// CHECK-SAME: codeptr_ra=[[SECT_END:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_sections_begin:
- // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID:[0-9]+]],
+ // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID:[0-9]+]],
// CHECK-SAME: codeptr_ra=[[SECT_BEGIN]], count=2
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_section_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]]
diff --git a/openmp/runtime/test/ompt/worksharing/single.c b/openmp/runtime/test/ompt/worksharing/single.c
index 6b24f2d9398fe..b3b67d6539966 100644
--- a/openmp/runtime/test/ompt/worksharing/single.c
+++ b/openmp/runtime/test/ompt/worksharing/single.c
@@ -20,17 +20,19 @@ int main()
printf("x=%d\n", x);
+ // clang-format off
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_work'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
- // CHECK-DAG: {{^}}[[THREAD_ID_1:[0-9]+]]: ompt_event_single_in_block_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], parent_task_id=[[TASK_ID:[0-9]+]], codeptr_ra={{0x[0-f]+}}, count=1
+ // CHECK-DAG: {{^}}[[THREAD_ID_1:[0-9]+]]: ompt_event_single_in_block_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], task_id=[[TASK_ID:[0-9]+]], codeptr_ra={{0x[0-f]+}}, count=1
// CHECK-DAG: {{^}}[[THREAD_ID_1]]: in single
// CHECK-DAG: {{^}}[[THREAD_ID_1]]: ompt_event_single_in_block_end: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]+}}, count=1
// CHECK-DAG: {{^}}[[THREAD_ID_2:[0-9]+]]: ompt_event_single_others_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], task_id=[[TASK_ID:[0-9]+]], codeptr_ra={{0x[0-f]+}}, count=1
// CHECK-DAG: {{^}}[[THREAD_ID_2]]: ompt_event_single_others_end: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]+}}, count=1
+ // clang-format on
return 0;
}
>From b77e734e4e6c8f5e016ba3ac49526862e6039482 Mon Sep 17 00:00:00 2001
From: David Spickett <david.spickett at linaro.org>
Date: Wed, 3 Jul 2024 08:43:29 +0100
Subject: [PATCH 078/246] [lldb][AArch64] Add register field enum information
(#96887)
This enables XML output for enums and adds enums for 2 fields on AArch64
Linux:
* mte_ctrl.tcf, which controls how tag faults are delivered.
* fpcr.rmode, which sets the rounding mode for floating point
operations.
The other one we could do is cpsr.btype, but it is not clear what would
be useful here so I'm not including it in this change.
---
.../Process/Utility/RegisterFlagsDetector_arm64.cpp | 12 +++++++++---
.../gdb-remote/GDBRemoteCommunicationServerLLGS.cpp | 2 ++
.../register/register_command/TestRegisters.py | 8 +++++++-
.../postmortem/elf-core/TestLinuxCore.py | 7 ++++++-
.../TestAArch64LinuxMTEMemoryTagCoreFile.py | 13 ++++++++++---
.../Core/aarch64-freebsd-register-fields.test | 2 +-
6 files changed, 35 insertions(+), 9 deletions(-)
diff --git a/lldb/source/Plugins/Process/Utility/RegisterFlagsDetector_arm64.cpp b/lldb/source/Plugins/Process/Utility/RegisterFlagsDetector_arm64.cpp
index 024c6ad208689..7c8dba3680938 100644
--- a/lldb/source/Plugins/Process/Utility/RegisterFlagsDetector_arm64.cpp
+++ b/lldb/source/Plugins/Process/Utility/RegisterFlagsDetector_arm64.cpp
@@ -53,16 +53,22 @@ Arm64RegisterFlagsDetector::DetectMTECtrlFields(uint64_t hwcap,
// Represents the contents of NT_ARM_TAGGED_ADDR_CTRL and the value passed
// to prctl(PR_TAGGED_ADDR_CTRL...). Fields are derived from the defines
// used to build the value.
+
+ static const FieldEnum tcf_enum(
+ "tcf_enum",
+ {{0, "TCF_NONE"}, {1, "TCF_SYNC"}, {2, "TCF_ASYNC"}, {3, "TCF_ASYMM"}});
return {{"TAGS", 3, 18}, // 16 bit bitfield shifted up by PR_MTE_TAG_SHIFT.
- {"TCF_ASYNC", 2},
- {"TCF_SYNC", 1},
+ {"TCF", 1, 2, &tcf_enum},
{"TAGGED_ADDR_ENABLE", 0}};
}
Arm64RegisterFlagsDetector::Fields
Arm64RegisterFlagsDetector::DetectFPCRFields(uint64_t hwcap, uint64_t hwcap2) {
+ static const FieldEnum rmode_enum(
+ "rmode_enum", {{0, "RN"}, {1, "RP"}, {2, "RM"}, {3, "RZ"}});
+
std::vector<RegisterFlags::Field> fpcr_fields{
- {"AHP", 26}, {"DN", 25}, {"FZ", 24}, {"RMode", 22, 23},
+ {"AHP", 26}, {"DN", 25}, {"FZ", 24}, {"RMode", 22, 23, &rmode_enum},
// Bits 21-20 are "Stride" which is unused in AArch64 state.
};
diff --git a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
index ae1a77e5be832..08d5f5039d516 100644
--- a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
+++ b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
@@ -3083,6 +3083,7 @@ GDBRemoteCommunicationServerLLGS::BuildTargetXml() {
if (registers_count)
response.IndentMore();
+ llvm::StringSet<> field_enums_seen;
for (int reg_index = 0; reg_index < registers_count; reg_index++) {
const RegisterInfo *reg_info =
reg_context.GetRegisterInfoAtIndex(reg_index);
@@ -3096,6 +3097,7 @@ GDBRemoteCommunicationServerLLGS::BuildTargetXml() {
if (reg_info->flags_type) {
response.IndentMore();
+ reg_info->flags_type->EnumsToXML(response, field_enums_seen);
reg_info->flags_type->ToXML(response);
response.IndentLess();
}
diff --git a/lldb/test/API/commands/register/register/register_command/TestRegisters.py b/lldb/test/API/commands/register/register/register_command/TestRegisters.py
index dd887740c3c12..d1fc3e100af33 100644
--- a/lldb/test/API/commands/register/register/register_command/TestRegisters.py
+++ b/lldb/test/API/commands/register/register/register_command/TestRegisters.py
@@ -632,7 +632,13 @@ def test_register_read_fields(self):
self.expect("register read fpsr", substrs=["= (QC = 0, IDC = 0, IXC = 0"])
# AHP/DN/FZ/RMode always present, others may vary.
self.expect(
- "register read fpcr", substrs=["= (AHP = 0, DN = 0, FZ = 0, RMode = 0"]
+ "register read fpcr", substrs=["= (AHP = 0, DN = 0, FZ = 0, RMode = RN"]
+ )
+
+ # Should get enumerator descriptions for RMode.
+ self.expect(
+ "register info fpcr",
+ substrs=["RMode: 0 = RN, 1 = RP, 2 = RM, 3 = RZ"],
)
@skipUnlessPlatform(["linux"])
diff --git a/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py b/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py
index 1eaaa87d3b87d..0afac26367de0 100644
--- a/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py
+++ b/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py
@@ -583,7 +583,12 @@ def test_aarch64_sve_regs_full(self):
self.expect("register read fpsr", substrs=["= (QC = 0, IDC = 0, IXC = 0"])
# AHP/DN/FZ/RMode always present, others may vary.
self.expect(
- "register read fpcr", substrs=["= (AHP = 0, DN = 0, FZ = 0, RMode = 0"]
+ "register read fpcr", substrs=["= (AHP = 0, DN = 0, FZ = 0, RMode = RN"]
+ )
+ # RMode should have enumerator descriptions.
+ self.expect(
+ "register info fpcr",
+ substrs=["RMode: 0 = RN, 1 = RP, 2 = RM, 3 = RZ"],
)
@skipIfLLVMTargetMissing("AArch64")
diff --git a/lldb/test/API/linux/aarch64/mte_core_file/TestAArch64LinuxMTEMemoryTagCoreFile.py b/lldb/test/API/linux/aarch64/mte_core_file/TestAArch64LinuxMTEMemoryTagCoreFile.py
index 045f8c0a70108..0667759a341b8 100644
--- a/lldb/test/API/linux/aarch64/mte_core_file/TestAArch64LinuxMTEMemoryTagCoreFile.py
+++ b/lldb/test/API/linux/aarch64/mte_core_file/TestAArch64LinuxMTEMemoryTagCoreFile.py
@@ -238,8 +238,15 @@ def test_mte_ctrl_register(self):
expected = ["mte_ctrl = 0x000000000007fffb"]
if self.hasXMLSupport():
- expected.append(
- "(TAGS = 65535, TCF_ASYNC = 0, TCF_SYNC = 1, TAGGED_ADDR_ENABLE = 1)"
- )
+ expected.append("(TAGS = 65535, TCF = TCF_SYNC, TAGGED_ADDR_ENABLE = 1)")
self.expect("register read mte_ctrl", substrs=expected)
+
+ if self.hasXMLSupport():
+ # Should get enumerator descriptions for TCF
+ self.expect(
+ "register info mte_ctrl",
+ substrs=[
+ "TCF: 0 = TCF_NONE, 1 = TCF_SYNC, 2 = TCF_ASYNC, 3 = TCF_ASYMM"
+ ],
+ )
diff --git a/lldb/test/Shell/Register/Core/aarch64-freebsd-register-fields.test b/lldb/test/Shell/Register/Core/aarch64-freebsd-register-fields.test
index 0c8b52c14b964..1c90c5bff20b9 100644
--- a/lldb/test/Shell/Register/Core/aarch64-freebsd-register-fields.test
+++ b/lldb/test/Shell/Register/Core/aarch64-freebsd-register-fields.test
@@ -12,4 +12,4 @@ register read fpsr
# CHECK-NEXT: = (QC = 0, IDC = 0, IXC = 0, UFC = 0, OFC = 0, DZC = 0, IOC = 0)
register read fpcr
# CHECK: fpcr = 0x02000000
-# CHECK-NEXT: = (AHP = 0, DN = 1, FZ = 0, RMode = 0, IDE = 0, IXE = 0, UFE = 0, OFE = 0, DZE = 0, IOE = 0)
+# CHECK-NEXT: = (AHP = 0, DN = 1, FZ = 0, RMode = RN, IDE = 0, IXE = 0, UFE = 0, OFE = 0, DZE = 0, IOE = 0)
>From 3969d2c3b5f42e4a180f5205efa780b0f950d733 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 3 Jul 2024 09:47:01 +0200
Subject: [PATCH 079/246] [InstCombine] Disable select known bits fold for
vectors
This is not safe if the simplification ends up looking through
lane-crossing operations. For now, we don't have a good way to
limit this in computeKnownBits(), so just disable vector handling
entirely.
Fixes https://github.com/llvm/llvm-project/issues/97475.
---
llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp | 4 +++-
llvm/test/Transforms/InstCombine/select-binop-cmp.ll | 5 ++++-
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 736013395e8c2..394dfca262e13 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -4049,7 +4049,9 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
if (CondVal->getType() == SI.getType() && isKnownInversion(FalseVal, TrueVal))
return BinaryOperator::CreateXor(CondVal, FalseVal);
- if (SelType->isIntOrIntVectorTy() &&
+ // For vectors, this transform is only safe if the simplification does not
+ // look through any lane-crossing operations. For now, limit to scalars only.
+ if (SelType->isIntegerTy() &&
(!isa<Constant>(TrueVal) || !isa<Constant>(FalseVal))) {
// Try to simplify select arms based on KnownBits implied by the condition.
CondContext CC(CondVal);
diff --git a/llvm/test/Transforms/InstCombine/select-binop-cmp.ll b/llvm/test/Transforms/InstCombine/select-binop-cmp.ll
index 9ee2bc57c3b87..1fa0c09a9e987 100644
--- a/llvm/test/Transforms/InstCombine/select-binop-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/select-binop-cmp.ll
@@ -571,7 +571,10 @@ define <2 x i8> @select_xor_icmp_vec_bad(<2 x i8> %x, <2 x i8> %y, <2 x i8> %z)
define <2 x i32> @vec_select_no_equivalence(<2 x i32> %x) {
; CHECK-LABEL: @vec_select_no_equivalence(
-; CHECK-NEXT: ret <2 x i32> [[X:%.*]]
+; CHECK-NEXT: [[X10:%.*]] = shufflevector <2 x i32> [[X:%.*]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[COND:%.*]] = icmp eq <2 x i32> [[X]], zeroinitializer
+; CHECK-NEXT: [[S:%.*]] = select <2 x i1> [[COND]], <2 x i32> [[X10]], <2 x i32> [[X]]
+; CHECK-NEXT: ret <2 x i32> [[S]]
;
%x10 = shufflevector <2 x i32> %x, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
%cond = icmp eq <2 x i32> %x, zeroinitializer
>From b3be14896770b31c18f4ec32b05b334dc811f356 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Wed, 3 Jul 2024 16:01:23 +0800
Subject: [PATCH 080/246] [RISCV] Use LiveIntervals::extendToIndices to extend
AVL in insertVSETVLI (#97512)
In #96200 we handled extending AVL LiveIntervals across basic blocks,
which fixed a crash in a test case in
133ab9a8c82a31549f060da33fd7e14f1d7f39fd.
This was done by manually adding a single segment to the LiveInterval to
extend it from AVL def -> inserted vsetvli, but in hindsight this was
too simple and fails to handle cases where the vsetlvi is located before
the AVL def.
This patch fixes this by using LiveIntervals::extendToIndices instead
which can handle these cases.
(The crash that this fixes is separate from the crash in #97264)
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 5 +-
.../RISCV/rvv/vsetvli-insert-crossbb.ll | 56 ++++++++++++++-----
2 files changed, 44 insertions(+), 17 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 7e6eef47c121c..a1ae8a1250813 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -1161,10 +1161,7 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
// isn't always the case, e.g. PseudoVMV_X_S doesn't have an AVL operand or
// we've taken the AVL from the VL output of another vsetvli.
LiveInterval &LI = LIS->getInterval(AVLReg);
- // Need to get non-const VNInfo
- VNInfo *VNI = LI.getValNumInfo(Info.getAVLVNInfo()->id);
- LI.addSegment(LiveInterval::Segment(
- VNI->def, LIS->getInstructionIndex(*MI).getRegSlot(), VNI));
+ LIS->extendToIndices(LI, {LIS->getInstructionIndex(*MI).getRegSlot()});
}
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 884c756840eb9..701192839d6aa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -322,19 +322,19 @@ declare void @foo()
define <vscale x 1 x double> @test8(i64 %avl, i8 zeroext %cond, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: test8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -32
-; CHECK-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 1
-; CHECK-NEXT: sub sp, sp, a2
-; CHECK-NEXT: mv s0, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: beqz a1, .LBB6_2
; CHECK-NEXT: # %bb.1: # %if.then
; CHECK-NEXT: vfadd.vv v8, v8, v9
-; CHECK-NEXT: j .LBB6_3
+; CHECK-NEXT: ret
; CHECK-NEXT: .LBB6_2: # %if.else
+; CHECK-NEXT: addi sp, sp, -32
+; CHECK-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: mv s0, a0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: add a0, a0, sp
; CHECK-NEXT: addi a0, a0, 16
@@ -350,7 +350,6 @@ define <vscale x 1 x double> @test8(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
; CHECK-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v9, v8
-; CHECK-NEXT: .LBB6_3: # %if.then
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add sp, sp, a0
@@ -1063,17 +1062,48 @@ exit:
ret <vscale x 2 x i32> %c
}
+define void @cross_block_avl_extend_backwards(i1 %cond, <vscale x 8 x i8> %v, ptr %p, i64 %avl) {
+; CHECK-LABEL: cross_block_avl_extend_backwards:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: beqz a0, .LBB25_2
+; CHECK-NEXT: # %bb.1: # %exit
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB25_2: # %bar
+; CHECK-NEXT: addi a2, a2, 1
+; CHECK-NEXT: .LBB25_3: # %foo
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vse8.v v8, (a1)
+; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
+; CHECK-NEXT: vse8.v v8, (a1)
+; CHECK-NEXT: j .LBB25_3
+entry:
+ br i1 %cond, label %exit, label %bar
+foo:
+ ; Force a vl toggle
+ call void @llvm.riscv.vse.nxv8i8.i64(<vscale x 8 x i8> %v, ptr %p, i64 1)
+ ; %add's LiveRange needs to be extended backwards to here.
+ call void @llvm.riscv.vse.nxv8i8.i64(<vscale x 8 x i8> %v, ptr %p, i64 %add)
+ br label %foo
+exit:
+ ret void
+bar:
+ %add = add i64 %avl, 1
+ br label %foo
+}
+
define void @vlmax_avl_phi(i1 %cmp, ptr %p, i64 %a, i64 %b) {
; CHECK-LABEL: vlmax_avl_phi:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andi a0, a0, 1
-; CHECK-NEXT: beqz a0, .LBB25_2
+; CHECK-NEXT: beqz a0, .LBB26_2
; CHECK-NEXT: # %bb.1: # %foo
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
-; CHECK-NEXT: j .LBB25_3
-; CHECK-NEXT: .LBB25_2: # %bar
+; CHECK-NEXT: j .LBB26_3
+; CHECK-NEXT: .LBB26_2: # %bar
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma
-; CHECK-NEXT: .LBB25_3: # %exit
+; CHECK-NEXT: .LBB26_3: # %exit
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vse8.v v8, (a1)
>From f3a02253e9daba0e5c11b94c090dfa9e2e9ad5db Mon Sep 17 00:00:00 2001
From: Jay Foad <jay.foad at amd.com>
Date: Wed, 3 Jul 2024 09:02:31 +0100
Subject: [PATCH 081/246] [test] Remove immarg parameter attribute from calls
(#97432)
It is documented that immarg is only valid on intrinsic declarations,
although the verifier also tolerates it on intrinsic calls.
This patch updates tests that are not specifically testing the
behavior of the IR parser or verifier.
---
llvm/test/Bitcode/upgrade-memory-intrinsics.ll | 12 ++++++------
.../GlobalISel/llvm.amdgcn.wqm.demote.ll | 12 ++++++------
llvm/test/CodeGen/AMDGPU/amdgpu-reloc-const.ll | 2 +-
.../CodeGen/AMDGPU/cse-phi-incoming-val.ll | 2 +-
.../CodeGen/AMDGPU/dual-source-blend-export.ll | 12 ++++++------
llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll | 2 +-
.../CodeGen/AMDGPU/llvm.amdgcn.wqm.demote.ll | 12 ++++++------
.../AMDGPU/load-local-redundant-copies.ll | 18 +++++++++---------
llvm/test/CodeGen/AMDGPU/memory_clause.ll | 4 ++--
.../AMDGPU/multi-divergent-exit-region.ll | 4 ++--
llvm/test/CodeGen/AMDGPU/skip-if-dead.ll | 4 ++--
llvm/test/CodeGen/AMDGPU/wqm-gfx11.ll | 6 +++---
llvm/test/CodeGen/X86/masked_store.ll | 2 +-
llvm/test/Transforms/InstCombine/select.ll | 4 ++--
.../InstSimplify/select-inseltpoison.ll | 4 ++--
llvm/test/Transforms/InstSimplify/select.ll | 4 ++--
16 files changed, 52 insertions(+), 52 deletions(-)
diff --git a/llvm/test/Bitcode/upgrade-memory-intrinsics.ll b/llvm/test/Bitcode/upgrade-memory-intrinsics.ll
index 27c8d3668b40d..397e171978ce4 100644
--- a/llvm/test/Bitcode/upgrade-memory-intrinsics.ll
+++ b/llvm/test/Bitcode/upgrade-memory-intrinsics.ll
@@ -30,12 +30,12 @@ define void @test2(ptr %p1, ptr %p2, ptr %p3) {
; Make sure that attributes are not dropped
define void @test3(ptr %p1, ptr %p2, ptr %p3) {
; CHECK-LABEL: @test
-; CHECK: call void @llvm.memset.p0.i64(ptr nonnull align 4 %p1, i8 signext 55, i64 zeroext 100, i1 immarg false)
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 %p1, ptr readonly align 4 %p2, i64 zeroext 50, i1 immarg false)
-; CHECK: call void @llvm.memmove.p0.p0.i64(ptr nonnull align 4 %p2, ptr readonly align 4 %p3, i64 zeroext 1000, i1 immarg false)
- call void @llvm.memset.p0.i64(ptr nonnull %p1, i8 signext 55, i64 zeroext 100, i32 signext 4, i1 immarg false)
- call void @llvm.memcpy.p0.p0.i64(ptr nonnull %p1, ptr readonly %p2, i64 zeroext 50, i32 signext 4, i1 immarg false)
- call void @llvm.memmove.p0.p0.i64(ptr nonnull %p2, ptr readonly %p3, i64 zeroext 1000, i32 signext 4, i1 immarg false)
+; CHECK: call void @llvm.memset.p0.i64(ptr nonnull align 4 %p1, i8 signext 55, i64 zeroext 100, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 %p1, ptr readonly align 4 %p2, i64 zeroext 50, i1 false)
+; CHECK: call void @llvm.memmove.p0.p0.i64(ptr nonnull align 4 %p2, ptr readonly align 4 %p3, i64 zeroext 1000, i1 false)
+ call void @llvm.memset.p0.i64(ptr nonnull %p1, i8 signext 55, i64 zeroext 100, i32 signext 4, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr nonnull %p1, ptr readonly %p2, i64 zeroext 50, i32 signext 4, i1 false)
+ call void @llvm.memmove.p0.p0.i64(ptr nonnull %p2, ptr readonly %p3, i64 zeroext 1000, i32 signext 4, i1 false)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.wqm.demote.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.wqm.demote.ll
index a36b25ccfa48e..2d9fc9b25cfc8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.wqm.demote.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.wqm.demote.ll
@@ -847,8 +847,8 @@ define amdgpu_ps void @wqm_deriv(<2 x float> %input, float %arg, i32 %index) {
.entry:
%p0 = extractelement <2 x float> %input, i32 0
%p1 = extractelement <2 x float> %input, i32 1
- %x0 = call float @llvm.amdgcn.interp.p1(float %p0, i32 immarg 0, i32 immarg 0, i32 %index) #2
- %x1 = call float @llvm.amdgcn.interp.p2(float %x0, float %p1, i32 immarg 0, i32 immarg 0, i32 %index) #2
+ %x0 = call float @llvm.amdgcn.interp.p1(float %p0, i32 0, i32 0, i32 %index) #2
+ %x1 = call float @llvm.amdgcn.interp.p2(float %x0, float %p1, i32 0, i32 0, i32 %index) #2
%argi = fptosi float %arg to i32
%cond0 = icmp eq i32 %argi, 0
br i1 %cond0, label %.continue0, label %.demote0
@@ -875,7 +875,7 @@ define amdgpu_ps void @wqm_deriv(<2 x float> %input, float %arg, i32 %index) {
br label %.continue1
.continue1:
- call void @llvm.amdgcn.exp.compr.v2f16(i32 immarg 0, i32 immarg 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 immarg true, i1 immarg true) #3
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 true, i1 true) #3
ret void
}
@@ -1141,8 +1141,8 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
.entry:
%p0 = extractelement <2 x float> %input, i32 0
%p1 = extractelement <2 x float> %input, i32 1
- %x0 = call float @llvm.amdgcn.interp.p1(float %p0, i32 immarg 0, i32 immarg 0, i32 %index) #2
- %x1 = call float @llvm.amdgcn.interp.p2(float %x0, float %p1, i32 immarg 0, i32 immarg 0, i32 %index) #2
+ %x0 = call float @llvm.amdgcn.interp.p1(float %p0, i32 0, i32 0, i32 %index) #2
+ %x1 = call float @llvm.amdgcn.interp.p2(float %x0, float %p1, i32 0, i32 0, i32 %index) #2
%argi = fptosi float %arg to i32
%cond0 = icmp eq i32 %argi, 0
br i1 %cond0, label %.continue0, label %.demote0
@@ -1175,7 +1175,7 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
br i1 %loop.cond, label %.continue0, label %.return
.return:
- call void @llvm.amdgcn.exp.compr.v2f16(i32 immarg 0, i32 immarg 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 immarg true, i1 immarg true) #3
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 true, i1 true) #3
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-reloc-const.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-reloc-const.ll
index c5dbfb0f219bd..f3a9e761605a0 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-reloc-const.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-reloc-const.ll
@@ -17,7 +17,7 @@
define amdgpu_ps void @ps_main(i32 %arg, i32 inreg %arg1, i32 inreg %arg2) local_unnamed_addr #0 {
%rc = call i32 @llvm.amdgcn.reloc.constant(metadata !1)
%rcf = bitcast i32 %rc to float
- call void @llvm.amdgcn.exp.f32(i32 immarg 40, i32 immarg 15, float %rcf, float undef, float undef, float undef, i1 immarg false, i1 immarg false) #0
+ call void @llvm.amdgcn.exp.f32(i32 40, i32 15, float %rcf, float undef, float undef, float undef, i1 false, i1 false) #0
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/cse-phi-incoming-val.ll b/llvm/test/CodeGen/AMDGPU/cse-phi-incoming-val.ll
index c98da81264744..4f0d040e3217c 100644
--- a/llvm/test/CodeGen/AMDGPU/cse-phi-incoming-val.ll
+++ b/llvm/test/CodeGen/AMDGPU/cse-phi-incoming-val.ll
@@ -31,7 +31,7 @@ bb9: ; preds = %bb5
bb10: ; preds = %bb9, %bb5, %bb3, %bb
%tmp11 = phi float [ 1.000000e+00, %bb3 ], [ 0.000000e+00, %bb9 ], [ 1.000000e+00, %bb ], [ undef, %bb5 ]
- call void @llvm.amdgcn.exp.f32(i32 immarg 40, i32 immarg 15, float %tmp11, float undef, float undef, float undef, i1 immarg false, i1 immarg false) #0
+ call void @llvm.amdgcn.exp.f32(i32 40, i32 15, float %tmp11, float undef, float undef, float undef, i1 false, i1 false) #0
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/dual-source-blend-export.ll b/llvm/test/CodeGen/AMDGPU/dual-source-blend-export.ll
index 6626945f49c8c..2512d181069be 100644
--- a/llvm/test/CodeGen/AMDGPU/dual-source-blend-export.ll
+++ b/llvm/test/CodeGen/AMDGPU/dual-source-blend-export.ll
@@ -44,10 +44,10 @@ define amdgpu_ps void @_amdgpu_ps_main(i32 inreg %PrimMask, <2 x float> %InterpC
.entry:
%InterpCenter.i0 = extractelement <2 x float> %InterpCenter, i64 0
%InterpCenter.i1 = extractelement <2 x float> %InterpCenter, i64 1
- %i6 = call float @llvm.amdgcn.lds.param.load(i32 immarg 0, i32 immarg 1, i32 %PrimMask)
- %i7 = call float @llvm.amdgcn.lds.param.load(i32 immarg 1, i32 immarg 1, i32 %PrimMask)
- %i8 = call float @llvm.amdgcn.lds.param.load(i32 immarg 2, i32 immarg 1, i32 %PrimMask)
- %i9 = call float @llvm.amdgcn.lds.param.load(i32 immarg 3, i32 immarg 1, i32 %PrimMask)
+ %i6 = call float @llvm.amdgcn.lds.param.load(i32 0, i32 1, i32 %PrimMask)
+ %i7 = call float @llvm.amdgcn.lds.param.load(i32 1, i32 1, i32 %PrimMask)
+ %i8 = call float @llvm.amdgcn.lds.param.load(i32 2, i32 1, i32 %PrimMask)
+ %i9 = call float @llvm.amdgcn.lds.param.load(i32 3, i32 1, i32 %PrimMask)
%i14 = call float @llvm.amdgcn.interp.inreg.p10(float %i8, float %InterpCenter.i0, float %i8)
%i15 = call float @llvm.amdgcn.interp.inreg.p2(float %i8, float %InterpCenter.i1, float %i14)
@@ -83,8 +83,8 @@ define amdgpu_ps void @_amdgpu_ps_main(i32 inreg %PrimMask, <2 x float> %InterpC
%i50 = select i1 %.not, i32 %i47, i32 %i45
%i51 = call i32 @llvm.amdgcn.mov.dpp8.i32(i32 %i50, i32 14570689)
%i52 = bitcast i32 %i51 to float
- call void @llvm.amdgcn.exp.f32(i32 immarg 21, i32 immarg 3, float %i41, float %i49, float undef, float undef, i1 immarg false, i1 immarg true)
- call void @llvm.amdgcn.exp.f32(i32 immarg 22, i32 immarg 3, float %i44, float %i52, float undef, float undef, i1 immarg true, i1 immarg true)
+ call void @llvm.amdgcn.exp.f32(i32 21, i32 3, float %i41, float %i49, float undef, float undef, i1 false, i1 true)
+ call void @llvm.amdgcn.exp.f32(i32 22, i32 3, float %i44, float %i52, float undef, float undef, i1 true, i1 true)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
index 8f2bdff016345..63d272607563c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
@@ -314,7 +314,7 @@ bb33: ; preds = %bb
bb35: ; preds = %bb33, %.entry
%tmp36 = phi float [ %tmp34, %bb33 ], [ 1.000000e+00, %.entry ]
- call void @llvm.amdgcn.exp.f32(i32 immarg 0, i32 immarg 15, float %tmp36, float %tmp36, float %tmp36, float %tmp36, i1 immarg true, i1 immarg true) #3
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp36, float %tmp36, float %tmp36, float %tmp36, i1 true, i1 true) #3
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wqm.demote.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wqm.demote.ll
index 2e47cc505ee69..d37b4b41fac3d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wqm.demote.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wqm.demote.ll
@@ -849,8 +849,8 @@ define amdgpu_ps void @wqm_deriv(<2 x float> %input, float %arg, i32 %index) {
.entry:
%p0 = extractelement <2 x float> %input, i32 0
%p1 = extractelement <2 x float> %input, i32 1
- %x0 = call float @llvm.amdgcn.interp.p1(float %p0, i32 immarg 0, i32 immarg 0, i32 %index) #2
- %x1 = call float @llvm.amdgcn.interp.p2(float %x0, float %p1, i32 immarg 0, i32 immarg 0, i32 %index) #2
+ %x0 = call float @llvm.amdgcn.interp.p1(float %p0, i32 0, i32 0, i32 %index) #2
+ %x1 = call float @llvm.amdgcn.interp.p2(float %x0, float %p1, i32 0, i32 0, i32 %index) #2
%argi = fptosi float %arg to i32
%cond0 = icmp eq i32 %argi, 0
br i1 %cond0, label %.continue0, label %.demote0
@@ -877,7 +877,7 @@ define amdgpu_ps void @wqm_deriv(<2 x float> %input, float %arg, i32 %index) {
br label %.continue1
.continue1:
- call void @llvm.amdgcn.exp.compr.v2f16(i32 immarg 0, i32 immarg 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 immarg true, i1 immarg true) #3
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 true, i1 true) #3
ret void
}
@@ -1142,8 +1142,8 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
.entry:
%p0 = extractelement <2 x float> %input, i32 0
%p1 = extractelement <2 x float> %input, i32 1
- %x0 = call float @llvm.amdgcn.interp.p1(float %p0, i32 immarg 0, i32 immarg 0, i32 %index) #2
- %x1 = call float @llvm.amdgcn.interp.p2(float %x0, float %p1, i32 immarg 0, i32 immarg 0, i32 %index) #2
+ %x0 = call float @llvm.amdgcn.interp.p1(float %p0, i32 0, i32 0, i32 %index) #2
+ %x1 = call float @llvm.amdgcn.interp.p2(float %x0, float %p1, i32 0, i32 0, i32 %index) #2
%argi = fptosi float %arg to i32
%cond0 = icmp eq i32 %argi, 0
br i1 %cond0, label %.continue0, label %.demote0
@@ -1176,7 +1176,7 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
br i1 %loop.cond, label %.continue0, label %.return
.return:
- call void @llvm.amdgcn.exp.compr.v2f16(i32 immarg 0, i32 immarg 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 immarg true, i1 immarg true) #3
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 true, i1 true) #3
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll b/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll
index 8d148b56c280e..01bab28a49858 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll
@@ -20,10 +20,10 @@ define amdgpu_vs void @test(ptr addrspace(8) inreg %arg1, ptr addrspace(3) %arg2
; CHECK-NEXT: v_mov_b32_e32 v4, 0
; CHECK-NEXT: tbuffer_store_format_xyzw v[0:3], v4, s[0:3], 0 format:[BUF_DATA_FORMAT_32_32_32_32,BUF_NUM_FORMAT_FLOAT] idxen
; CHECK-NEXT: s_endpgm
- call void @llvm.amdgcn.exp.f32(i32 immarg 0, i32 immarg 0, float undef, float undef, float undef, float undef, i1 immarg false, i1 immarg false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float undef, float undef, float undef, float undef, i1 false, i1 false)
%var1 = load <6 x float>, ptr addrspace(3) %arg2, align 4
%var2 = shufflevector <6 x float> %var1, <6 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- call void @llvm.amdgcn.struct.ptr.tbuffer.store.v4f32(<4 x float> %var2, ptr addrspace(8) %arg1, i32 0, i32 0, i32 0, i32 immarg 126, i32 immarg 0)
+ call void @llvm.amdgcn.struct.ptr.tbuffer.store.v4f32(<4 x float> %var2, ptr addrspace(8) %arg1, i32 0, i32 0, i32 0, i32 126, i32 0)
ret void
}
@@ -53,9 +53,9 @@ define amdgpu_vs void @test_2(ptr addrspace(8) inreg %arg1, i32 %arg2, i32 inreg
; CHECK-NEXT: s_endpgm
%load = load <8 x float>, ptr addrspace(3) %arg4, align 4
%vec1 = shufflevector <8 x float> %load, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- call void @llvm.amdgcn.struct.ptr.tbuffer.store.v4f32(<4 x float> %vec1, ptr addrspace(8) %arg1, i32 %arg2, i32 0, i32 %arg3, i32 immarg 77, i32 immarg 3)
+ call void @llvm.amdgcn.struct.ptr.tbuffer.store.v4f32(<4 x float> %vec1, ptr addrspace(8) %arg1, i32 %arg2, i32 0, i32 %arg3, i32 77, i32 3)
%vec2 = shufflevector <8 x float> %load, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- call void @llvm.amdgcn.struct.ptr.tbuffer.store.v4f32(<4 x float> %vec2, ptr addrspace(8) %arg1, i32 %arg2, i32 16, i32 %arg3, i32 immarg 77, i32 immarg 3)
+ call void @llvm.amdgcn.struct.ptr.tbuffer.store.v4f32(<4 x float> %vec2, ptr addrspace(8) %arg1, i32 %arg2, i32 16, i32 %arg3, i32 77, i32 3)
ret void
}
@@ -103,17 +103,17 @@ define amdgpu_vs void @test_3(i32 inreg %arg1, i32 inreg %arg2, ptr addrspace(8)
; CHECK-NEXT: s_endpgm
%load1 = load <6 x float>, ptr addrspace(3) %arg5, align 4
%vec11 = shufflevector <6 x float> %load1, <6 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- call void @llvm.amdgcn.struct.ptr.tbuffer.store.v4f32(<4 x float> %vec11, ptr addrspace(8) %arg3, i32 %arg1, i32 264, i32 %arg2, i32 immarg 77, i32 immarg 3)
+ call void @llvm.amdgcn.struct.ptr.tbuffer.store.v4f32(<4 x float> %vec11, ptr addrspace(8) %arg3, i32 %arg1, i32 264, i32 %arg2, i32 77, i32 3)
%vec12 = shufflevector <6 x float> %load1, <6 x float> undef, <2 x i32> <i32 4, i32 5>
- call void @llvm.amdgcn.struct.ptr.tbuffer.store.v2f32(<2 x float> %vec12, ptr addrspace(8) %arg3, i32 %arg1, i32 280, i32 %arg2, i32 immarg 64, i32 immarg 3)
+ call void @llvm.amdgcn.struct.ptr.tbuffer.store.v2f32(<2 x float> %vec12, ptr addrspace(8) %arg3, i32 %arg1, i32 280, i32 %arg2, i32 64, i32 3)
- call void @llvm.amdgcn.exp.f32(i32 immarg 0, i32 immarg 0, float undef, float undef, float undef, float undef, i1 immarg false, i1 immarg false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float undef, float undef, float undef, float undef, i1 false, i1 false)
%load2 = load <6 x float>, ptr addrspace(3) %arg6, align 4
%vec21 = shufflevector <6 x float> %load2, <6 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- call void @llvm.amdgcn.struct.ptr.tbuffer.store.v4f32(<4 x float> %vec21, ptr addrspace(8) %arg3, i32 %arg1, i32 240, i32 %arg2, i32 immarg 77, i32 immarg 3)
+ call void @llvm.amdgcn.struct.ptr.tbuffer.store.v4f32(<4 x float> %vec21, ptr addrspace(8) %arg3, i32 %arg1, i32 240, i32 %arg2, i32 77, i32 3)
%vec22 = shufflevector <6 x float> %load2, <6 x float> undef, <2 x i32> <i32 4, i32 5>
- call void @llvm.amdgcn.struct.ptr.tbuffer.store.v2f32(<2 x float> %vec22, ptr addrspace(8) %arg3, i32 %arg1, i32 256, i32 %arg2, i32 immarg 64, i32 immarg 3)
+ call void @llvm.amdgcn.struct.ptr.tbuffer.store.v2f32(<2 x float> %vec22, ptr addrspace(8) %arg3, i32 %arg1, i32 256, i32 %arg2, i32 64, i32 3)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/memory_clause.ll b/llvm/test/CodeGen/AMDGPU/memory_clause.ll
index 940287d44d8d1..c49e0501665c5 100644
--- a/llvm/test/CodeGen/AMDGPU/memory_clause.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory_clause.ll
@@ -446,7 +446,7 @@ define amdgpu_kernel void @flat_scratch_load(float %a, float %b, <8 x i32> %desc
%val = call <2 x float> @llvm.amdgcn.image.sample.2d.v2f32.f32(i32 9, float %a, float %b, <8 x i32> %desc, <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 0>, i1 false, i32 0, i32 0)
%val0 = extractelement <2 x float> %val, i32 0
%valadd = fadd float %load, %val0
- call void @llvm.amdgcn.exp.f32(i32 immarg 0, i32 immarg 1, float %valadd, float undef, float undef, float undef, i1 immarg true, i1 immarg true)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 1, float %valadd, float undef, float undef, float undef, i1 true, i1 true)
ret void
}
@@ -504,7 +504,7 @@ define amdgpu_kernel void @flat_scratch_load_clause(float %a, float %b, <8 x i32
%load0 = load float, ptr addrspace(5) %alloca
%load1 = load float, ptr addrspace(5) %alloca2
%valadd = fadd float %load0, %load1
- call void @llvm.amdgcn.exp.f32(i32 immarg 0, i32 immarg 1, float %valadd, float undef, float undef, float undef, i1 immarg true, i1 immarg true)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 1, float %valadd, float undef, float undef, float undef, i1 true, i1 true)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll b/llvm/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll
index 4eefff504f19e..92b2f516277bc 100644
--- a/llvm/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll
+++ b/llvm/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll
@@ -728,7 +728,7 @@ bb5: ; preds = %bb3
; IR-NEXT: br i1 false, label %DummyReturnBlock, label %[[LOOP]]
; IR: [[EXP]]:
-; IR-NEXT: call void @llvm.amdgcn.exp.compr.v2f16(i32 immarg 0, i32 immarg 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 immarg true, i1 immarg true)
+; IR-NEXT: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 true, i1 true)
; IR-NEXT: ret void
; IR: DummyReturnBlock:
@@ -743,7 +743,7 @@ loop: ; preds = %loop, %.entry
br label %loop
bb27: ; preds = %.entry
- call void @llvm.amdgcn.exp.compr.v2f16(i32 immarg 0, i32 immarg 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 immarg true, i1 immarg true)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> <half 0xH3C00, half 0xH0000>, <2 x half> <half 0xH0000, half 0xH3C00>, i1 true, i1 true)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
index d19ef75cb08cd..a440b87efc81f 100644
--- a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
+++ b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
@@ -1697,7 +1697,7 @@ live:
export:
%proxy = phi float [ undef, %kill ], [ %scale, %live ]
- call void @llvm.amdgcn.exp.f32(i32 immarg 0, i32 immarg 15, float %proxy, float %proxy, float %proxy, float %proxy, i1 immarg true, i1 immarg true) #3
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %proxy, float %proxy, float %proxy, float %proxy, i1 true, i1 true) #3
ret void
}
@@ -1907,7 +1907,7 @@ latch:
._crit_edge:
%tmp = phi i32 [ -1, %.entry ], [ %ctr.next, %latch ]
%out = bitcast i32 %tmp to float
- call void @llvm.amdgcn.exp.f32(i32 immarg 0, i32 immarg 15, float %out, float %out, float undef, float undef, i1 immarg true, i1 immarg true)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %out, float %out, float undef, float undef, i1 true, i1 true)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/wqm-gfx11.ll b/llvm/test/CodeGen/AMDGPU/wqm-gfx11.ll
index d7ce562292b86..e68f232a9f506 100644
--- a/llvm/test/CodeGen/AMDGPU/wqm-gfx11.ll
+++ b/llvm/test/CodeGen/AMDGPU/wqm-gfx11.ll
@@ -18,9 +18,9 @@ define amdgpu_ps <3 x float> @test_param_load(i32 inreg %attr, <3 x float> %to_a
; CHECK-NEXT: v_add_f32_e32 v2, v5, v2
; CHECK-NEXT: ; return to shader part epilog
main_body:
- %a = call float @llvm.amdgcn.lds.param.load(i32 immarg 0, i32 immarg 0, i32 %attr) #1
- %b = call float @llvm.amdgcn.lds.param.load(i32 immarg 1, i32 immarg 0, i32 %attr) #1
- %c = call float @llvm.amdgcn.lds.param.load(i32 immarg 2, i32 immarg 0, i32 %attr) #1
+ %a = call float @llvm.amdgcn.lds.param.load(i32 0, i32 0, i32 %attr) #1
+ %b = call float @llvm.amdgcn.lds.param.load(i32 1, i32 0, i32 %attr) #1
+ %c = call float @llvm.amdgcn.lds.param.load(i32 2, i32 0, i32 %attr) #1
%tmp_0 = insertelement <3 x float> undef, float %a, i32 0
%tmp_1 = insertelement <3 x float> %tmp_0, float %b, i32 1
%tmp_2 = insertelement <3 x float> %tmp_1, float %c, i32 2
diff --git a/llvm/test/CodeGen/X86/masked_store.ll b/llvm/test/CodeGen/X86/masked_store.ll
index 6aa0a81c90204..c7ec5e87dcc6b 100644
--- a/llvm/test/CodeGen/X86/masked_store.ll
+++ b/llvm/test/CodeGen/X86/masked_store.ll
@@ -6016,7 +6016,7 @@ define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigge
%val = load <24 x i32>, ptr %val.ptr
%mask.src = icmp slt <24 x i32> %trigger, zeroinitializer
%mask = and <24 x i1> %mask.src, <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>
- call void @llvm.masked.store.v24i32.p0(<24 x i32> %val, ptr %dst, i32 immarg 1, <24 x i1> %mask)
+ call void @llvm.masked.store.v24i32.p0(<24 x i32> %val, ptr %dst, i32 1, <24 x i1> %mask)
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll
index 63a4f74cbaaea..d66ffb9a63ac1 100644
--- a/llvm/test/Transforms/InstCombine/select.ll
+++ b/llvm/test/Transforms/InstCombine/select.ll
@@ -2736,13 +2736,13 @@ define void @select_freeze_icmp_multuses(i32 %x, i32 %y) {
define i32 @pr47322_more_poisonous_replacement(i32 %arg) {
; CHECK-LABEL: @pr47322_more_poisonous_replacement(
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ARG:%.*]], 0
-; CHECK-NEXT: [[TRAILING:%.*]] = call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[ARG]], i1 immarg true)
+; CHECK-NEXT: [[TRAILING:%.*]] = call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[ARG]], i1 true)
; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[ARG]], [[TRAILING]]
; CHECK-NEXT: [[R1_SROA_0_1:%.*]] = select i1 [[CMP]], i32 0, i32 [[SHIFTED]]
; CHECK-NEXT: ret i32 [[R1_SROA_0_1]]
;
%cmp = icmp eq i32 %arg, 0
- %trailing = call i32 @llvm.cttz.i32(i32 %arg, i1 immarg true)
+ %trailing = call i32 @llvm.cttz.i32(i32 %arg, i1 true)
%shifted = lshr i32 %arg, %trailing
%r1.sroa.0.1 = select i1 %cmp, i32 0, i32 %shifted
ret i32 %r1.sroa.0.1
diff --git a/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll b/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll
index fcf8c31b25eed..33670128af439 100644
--- a/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll
+++ b/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll
@@ -940,13 +940,13 @@ define i1 @expand_binop_undef(i32 %x, i32 %y) {
define i32 @pr47322_more_poisonous_replacement(i32 %arg) {
; CHECK-LABEL: @pr47322_more_poisonous_replacement(
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ARG:%.*]], 0
-; CHECK-NEXT: [[TRAILING:%.*]] = call i32 @llvm.cttz.i32(i32 [[ARG]], i1 immarg true)
+; CHECK-NEXT: [[TRAILING:%.*]] = call i32 @llvm.cttz.i32(i32 [[ARG]], i1 true)
; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[ARG]], [[TRAILING]]
; CHECK-NEXT: [[R1_SROA_0_1:%.*]] = select i1 [[CMP]], i32 0, i32 [[SHIFTED]]
; CHECK-NEXT: ret i32 [[R1_SROA_0_1]]
;
%cmp = icmp eq i32 %arg, 0
- %trailing = call i32 @llvm.cttz.i32(i32 %arg, i1 immarg true)
+ %trailing = call i32 @llvm.cttz.i32(i32 %arg, i1 true)
%shifted = lshr i32 %arg, %trailing
%r1.sroa.0.1 = select i1 %cmp, i32 0, i32 %shifted
ret i32 %r1.sroa.0.1
diff --git a/llvm/test/Transforms/InstSimplify/select.ll b/llvm/test/Transforms/InstSimplify/select.ll
index 4eb6491eec5a4..1e503afae1a69 100644
--- a/llvm/test/Transforms/InstSimplify/select.ll
+++ b/llvm/test/Transforms/InstSimplify/select.ll
@@ -985,13 +985,13 @@ define i1 @expand_binop_undef(i32 %x, i32 %y) {
define i32 @pr47322_more_poisonous_replacement(i32 %arg) {
; CHECK-LABEL: @pr47322_more_poisonous_replacement(
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ARG:%.*]], 0
-; CHECK-NEXT: [[TRAILING:%.*]] = call i32 @llvm.cttz.i32(i32 [[ARG]], i1 immarg true)
+; CHECK-NEXT: [[TRAILING:%.*]] = call i32 @llvm.cttz.i32(i32 [[ARG]], i1 true)
; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[ARG]], [[TRAILING]]
; CHECK-NEXT: [[R1_SROA_0_1:%.*]] = select i1 [[CMP]], i32 0, i32 [[SHIFTED]]
; CHECK-NEXT: ret i32 [[R1_SROA_0_1]]
;
%cmp = icmp eq i32 %arg, 0
- %trailing = call i32 @llvm.cttz.i32(i32 %arg, i1 immarg true)
+ %trailing = call i32 @llvm.cttz.i32(i32 %arg, i1 true)
%shifted = lshr i32 %arg, %trailing
%r1.sroa.0.1 = select i1 %cmp, i32 0, i32 %shifted
ret i32 %r1.sroa.0.1
>From 76c84e702bd9af7db2bb9373ba6de0508f1e57a9 Mon Sep 17 00:00:00 2001
From: Shengchen Kan <shengchen.kan at intel.com>
Date: Wed, 3 Jul 2024 16:02:58 +0800
Subject: [PATCH 082/246] [Driver][X86] Add flang visibility for
-mapx-features= (#97525)
The default visibility of `m_x86_Features_Group` is `ClangOption,
CLOption`. For x86, we expose `-march` to flang but not `-m<cpuid>`.
`apxf` is special b/c it contains several independent features like
`egpr, ndd, ppx, push2pop2, ccmp, nf, cf` and `zu`.
Users may would like to turn on part of features in different cases
1. enable `ndd` only when writing a code-size sensitive library
2. disable `ccmp`, `cf` when building component has strong security
request on caches
Hence, we expose `-mapx-features=` to flang for Fortran users.
---
clang/include/clang/Driver/Options.td | 4 ++--
flang/test/Driver/target-cpu-features.f90 | 12 ++++++++++++
2 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 1c2b8cfeef6ce..1ede75d3782cd 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -6319,9 +6319,9 @@ def mno_gather : Flag<["-"], "mno-gather">, Group<m_Group>,
def mno_scatter : Flag<["-"], "mno-scatter">, Group<m_Group>,
HelpText<"Disable generation of scatter instructions in auto-vectorization(x86 only)">;
def mapx_features_EQ : CommaJoined<["-"], "mapx-features=">, Group<m_x86_Features_Group>,
- HelpText<"Enable features of APX">, Values<"egpr,push2pop2,ppx,ndd,ccmp,nf,cf,zu">;
+ HelpText<"Enable features of APX">, Values<"egpr,push2pop2,ppx,ndd,ccmp,nf,cf,zu">, Visibility<[ClangOption, CLOption, FlangOption]>;
def mno_apx_features_EQ : CommaJoined<["-"], "mno-apx-features=">, Group<m_x86_Features_Group>,
- HelpText<"Disable features of APX">, Values<"egpr,push2pop2,ppx,ndd,ccmp,nf,cf,zu">;
+ HelpText<"Disable features of APX">, Values<"egpr,push2pop2,ppx,ndd,ccmp,nf,cf,zu">, Visibility<[ClangOption, CLOption, FlangOption]>;
// For stability, we only add a feature to -mapxf after it passes the validation of llvm-test-suite && cpu2017 on Intel SDE.
def mapxf : Flag<["-"], "mapxf">, Alias<mapx_features_EQ>, AliasArgs<["egpr","push2pop2","ppx","ndd","ccmp","nf","cf"]>;
def mno_apxf : Flag<["-"], "mno-apxf">, Alias<mno_apx_features_EQ>, AliasArgs<["egpr","push2pop2","ppx","ndd","ccmp","nf","cf"]>;
diff --git a/flang/test/Driver/target-cpu-features.f90 b/flang/test/Driver/target-cpu-features.f90
index e78c3516db45a..0f19e4ebff2a0 100644
--- a/flang/test/Driver/target-cpu-features.f90
+++ b/flang/test/Driver/target-cpu-features.f90
@@ -17,6 +17,12 @@
! RUN: %flang --target=x86_64-linux-gnu -march=skylake -c %s -### 2>&1 \
! RUN: | FileCheck %s -check-prefix=CHECK-SKYLAKE
+! RUN: %flang --target=x86_64-linux-gnu -mapx-features=egpr -c %s -### 2>&1 \
+! RUN: | FileCheck %s -check-prefix=CHECK-APX
+
+! RUN: %flang --target=x86_64-linux-gnu -mno-apx-features=ccmp -c %s -### 2>&1 \
+! RUN: | FileCheck %s -check-prefix=CHECK-NO-APX
+
! RUN: %flang --target=x86_64h-linux-gnu -c %s -### 2>&1 \
! RUN: | FileCheck %s -check-prefix=CHECK-X86_64H
@@ -51,6 +57,12 @@
! CHECK-SKYLAKE: "-fc1" "-triple" "x86_64-unknown-linux-gnu"
! CHECK-SKYLAKE-SAME: "-target-cpu" "skylake"
+! CHECK-APX: "-fc1" "-triple" "x86_64-unknown-linux-gnu"
+! CHECK-APX-SAME: "-target-feature" "+egpr"
+
+! CHECK-NO-APX: "-fc1" "-triple" "x86_64-unknown-linux-gnu"
+! CHECK-NO-APX-SAME: "-target-feature" "-ccmp"
+
! CHECK-X86_64H: "-fc1" "-triple" "x86_64h-unknown-linux-gnu"
! CHECK-X86_64H-SAME: "-target-cpu" "x86-64" "-target-feature" "-rdrnd" "-target-feature" "-aes" "-target-feature" "-pclmul" "-target-feature" "-rtm" "-target-feature" "-fsgsbase"
>From 2a14c0643597c5932af85f22172c99800f9b4a6c Mon Sep 17 00:00:00 2001
From: Pavel Labath <pavel at labath.sk>
Date: Wed, 3 Jul 2024 10:29:17 +0200
Subject: [PATCH 083/246] [lldb] Make Broadcaster mutexes non-recursive
(#97400)
Non-recursive mutexes encourage better locking discipline and avoid bugs
like #96750, where one can unexpectedly re-enter the critical section on
the same thread, and interrupt a presumed-indivisible operation.
In this case, the only needed fix was to remove locking from some
BroadcastManager functions, which were only called from the Listener
class (and the listener already locked those mutexes to preserve lock
ordering).
While doing that, I noticed we don't have unit tests for these
functions, so I added one.
---
lldb/include/lldb/Utility/Broadcaster.h | 17 ++++++-----
lldb/source/Utility/Broadcaster.cpp | 33 ++++++++++-----------
lldb/source/Utility/Listener.cpp | 12 ++++----
lldb/unittests/Utility/ListenerTest.cpp | 39 +++++++++++++++++++++++++
4 files changed, 70 insertions(+), 31 deletions(-)
diff --git a/lldb/include/lldb/Utility/Broadcaster.h b/lldb/include/lldb/Utility/Broadcaster.h
index 58436ddb9f26d..c6f63f1916573 100644
--- a/lldb/include/lldb/Utility/Broadcaster.h
+++ b/lldb/include/lldb/Utility/Broadcaster.h
@@ -87,12 +87,6 @@ class BroadcasterManager
~BroadcasterManager() = default;
- uint32_t RegisterListenerForEvents(const lldb::ListenerSP &listener_sp,
- const BroadcastEventSpec &event_spec);
-
- bool UnregisterListenerForEvents(const lldb::ListenerSP &listener_sp,
- const BroadcastEventSpec &event_spec);
-
lldb::ListenerSP
GetListenerForEventSpec(const BroadcastEventSpec &event_spec) const;
@@ -105,13 +99,20 @@ class BroadcasterManager
void Clear();
private:
+ uint32_t
+ RegisterListenerForEventsNoLock(const lldb::ListenerSP &listener_sp,
+ const BroadcastEventSpec &event_spec);
+
+ bool UnregisterListenerForEventsNoLock(const lldb::ListenerSP &listener_sp,
+ const BroadcastEventSpec &event_spec);
+
typedef std::pair<BroadcastEventSpec, lldb::ListenerSP> event_listener_key;
typedef std::map<BroadcastEventSpec, lldb::ListenerSP> collection;
typedef std::set<lldb::ListenerSP> listener_collection;
collection m_event_map;
listener_collection m_listeners;
- mutable std::recursive_mutex m_manager_mutex;
+ mutable std::mutex m_manager_mutex;
};
/// \class Broadcaster Broadcaster.h "lldb/Utility/Broadcaster.h" An event
@@ -441,7 +442,7 @@ class Broadcaster {
collection m_listeners;
/// A mutex that protects \a m_listeners.
- std::recursive_mutex m_listeners_mutex;
+ std::mutex m_listeners_mutex;
/// See the discussion of Broadcasters and Listeners above.
lldb::ListenerSP m_primary_listener_sp;
diff --git a/lldb/source/Utility/Broadcaster.cpp b/lldb/source/Utility/Broadcaster.cpp
index b6d8ae39325d3..c6b2606afe0c8 100644
--- a/lldb/source/Utility/Broadcaster.cpp
+++ b/lldb/source/Utility/Broadcaster.cpp
@@ -87,7 +87,7 @@ bool Broadcaster::BroadcasterImpl::HasListeners(uint32_t event_mask) {
}
void Broadcaster::BroadcasterImpl::Clear() {
- std::lock_guard<std::recursive_mutex> guard(m_listeners_mutex);
+ std::lock_guard<std::mutex> guard(m_listeners_mutex);
// Make sure the listener forgets about this broadcaster. We do this in the
// broadcaster in case the broadcaster object initiates the removal.
@@ -137,7 +137,7 @@ Broadcaster::BroadcasterImpl::AddListener(const lldb::ListenerSP &listener_sp,
if (!listener_sp)
return 0;
- std::lock_guard<std::recursive_mutex> guard(m_listeners_mutex);
+ std::lock_guard<std::mutex> guard(m_listeners_mutex);
// See if we already have this listener, and if so, update its mask
@@ -171,7 +171,7 @@ Broadcaster::BroadcasterImpl::AddListener(const lldb::ListenerSP &listener_sp,
}
bool Broadcaster::BroadcasterImpl::EventTypeHasListeners(uint32_t event_type) {
- std::lock_guard<std::recursive_mutex> guard(m_listeners_mutex);
+ std::lock_guard<std::mutex> guard(m_listeners_mutex);
if (!m_hijacking_listeners.empty() && event_type & m_hijacking_masks.back())
return true;
@@ -195,7 +195,7 @@ bool Broadcaster::BroadcasterImpl::RemoveListener(
return true;
}
- std::lock_guard<std::recursive_mutex> guard(m_listeners_mutex);
+ std::lock_guard<std::mutex> guard(m_listeners_mutex);
for (auto it = m_listeners.begin(); it != m_listeners.end();) {
lldb::ListenerSP curr_listener_sp(it->first.lock());
@@ -243,7 +243,7 @@ void Broadcaster::BroadcasterImpl::PrivateBroadcastEvent(EventSP &event_sp,
const uint32_t event_type = event_sp->GetType();
- std::lock_guard<std::recursive_mutex> guard(m_listeners_mutex);
+ std::lock_guard<std::mutex> guard(m_listeners_mutex);
ListenerSP hijacking_listener_sp;
@@ -327,7 +327,7 @@ void Broadcaster::BroadcasterImpl::SetPrimaryListener(lldb::ListenerSP
bool Broadcaster::BroadcasterImpl::HijackBroadcaster(
const lldb::ListenerSP &listener_sp, uint32_t event_mask) {
- std::lock_guard<std::recursive_mutex> guard(m_listeners_mutex);
+ std::lock_guard<std::mutex> guard(m_listeners_mutex);
Log *log = GetLog(LLDBLog::Events);
LLDB_LOG(
@@ -341,7 +341,7 @@ bool Broadcaster::BroadcasterImpl::HijackBroadcaster(
}
bool Broadcaster::BroadcasterImpl::IsHijackedForEvent(uint32_t event_mask) {
- std::lock_guard<std::recursive_mutex> guard(m_listeners_mutex);
+ std::lock_guard<std::mutex> guard(m_listeners_mutex);
if (!m_hijacking_listeners.empty())
return (event_mask & m_hijacking_masks.back()) != 0;
@@ -356,7 +356,7 @@ const char *Broadcaster::BroadcasterImpl::GetHijackingListenerName() {
}
void Broadcaster::BroadcasterImpl::RestoreBroadcaster() {
- std::lock_guard<std::recursive_mutex> guard(m_listeners_mutex);
+ std::lock_guard<std::mutex> guard(m_listeners_mutex);
if (!m_hijacking_listeners.empty()) {
ListenerSP listener_sp = m_hijacking_listeners.back();
@@ -391,10 +391,8 @@ lldb::BroadcasterManagerSP BroadcasterManager::MakeBroadcasterManager() {
return lldb::BroadcasterManagerSP(new BroadcasterManager());
}
-uint32_t BroadcasterManager::RegisterListenerForEvents(
+uint32_t BroadcasterManager::RegisterListenerForEventsNoLock(
const lldb::ListenerSP &listener_sp, const BroadcastEventSpec &event_spec) {
- std::lock_guard<std::recursive_mutex> guard(m_manager_mutex);
-
collection::iterator iter = m_event_map.begin(), end_iter = m_event_map.end();
uint32_t available_bits = event_spec.GetEventBits();
@@ -419,9 +417,8 @@ uint32_t BroadcasterManager::RegisterListenerForEvents(
return available_bits;
}
-bool BroadcasterManager::UnregisterListenerForEvents(
+bool BroadcasterManager::UnregisterListenerForEventsNoLock(
const lldb::ListenerSP &listener_sp, const BroadcastEventSpec &event_spec) {
- std::lock_guard<std::recursive_mutex> guard(m_manager_mutex);
bool removed_some = false;
if (m_listeners.erase(listener_sp) == 0)
@@ -464,7 +461,7 @@ bool BroadcasterManager::UnregisterListenerForEvents(
ListenerSP BroadcasterManager::GetListenerForEventSpec(
const BroadcastEventSpec &event_spec) const {
- std::lock_guard<std::recursive_mutex> guard(m_manager_mutex);
+ std::lock_guard<std::mutex> guard(m_manager_mutex);
auto event_spec_matches =
[&event_spec](const event_listener_key &input) -> bool {
@@ -479,7 +476,7 @@ ListenerSP BroadcasterManager::GetListenerForEventSpec(
}
void BroadcasterManager::RemoveListener(Listener *listener) {
- std::lock_guard<std::recursive_mutex> guard(m_manager_mutex);
+ std::lock_guard<std::mutex> guard(m_manager_mutex);
auto listeners_predicate =
[&listener](const lldb::ListenerSP &input) -> bool {
return input.get() == listener;
@@ -504,7 +501,7 @@ void BroadcasterManager::RemoveListener(Listener *listener) {
}
void BroadcasterManager::RemoveListener(const lldb::ListenerSP &listener_sp) {
- std::lock_guard<std::recursive_mutex> guard(m_manager_mutex);
+ std::lock_guard<std::mutex> guard(m_manager_mutex);
auto listener_matches =
[&listener_sp](const event_listener_key &input) -> bool {
@@ -526,7 +523,7 @@ void BroadcasterManager::RemoveListener(const lldb::ListenerSP &listener_sp) {
void BroadcasterManager::SignUpListenersForBroadcaster(
Broadcaster &broadcaster) {
- std::lock_guard<std::recursive_mutex> guard(m_manager_mutex);
+ std::lock_guard<std::mutex> guard(m_manager_mutex);
collection::iterator iter = m_event_map.begin(), end_iter = m_event_map.end();
@@ -544,7 +541,7 @@ void BroadcasterManager::SignUpListenersForBroadcaster(
}
void BroadcasterManager::Clear() {
- std::lock_guard<std::recursive_mutex> guard(m_manager_mutex);
+ std::lock_guard<std::mutex> guard(m_manager_mutex);
for (auto &listener : m_listeners)
listener->BroadcasterManagerWillDestruct(this->shared_from_this());
diff --git a/lldb/source/Utility/Listener.cpp b/lldb/source/Utility/Listener.cpp
index 5aacb4104e1cf..0b28cb5cdc642 100644
--- a/lldb/source/Utility/Listener.cpp
+++ b/lldb/source/Utility/Listener.cpp
@@ -356,11 +356,10 @@ Listener::StartListeningForEventSpec(const BroadcasterManagerSP &manager_sp,
};
// The BroadcasterManager mutex must be locked before m_broadcasters_mutex to
// avoid violating the lock hierarchy (manager before broadcasters).
- std::lock_guard<std::recursive_mutex> manager_guard(
- manager_sp->m_manager_mutex);
+ std::lock_guard<std::mutex> manager_guard(manager_sp->m_manager_mutex);
std::lock_guard<std::recursive_mutex> guard(m_broadcasters_mutex);
- uint32_t bits_acquired = manager_sp->RegisterListenerForEvents(
+ uint32_t bits_acquired = manager_sp->RegisterListenerForEventsNoLock(
this->shared_from_this(), event_spec);
if (bits_acquired) {
BroadcasterManagerWP manager_wp(manager_sp);
@@ -377,9 +376,12 @@ bool Listener::StopListeningForEventSpec(const BroadcasterManagerSP &manager_sp,
if (!manager_sp)
return false;
+ // The BroadcasterManager mutex must be locked before m_broadcasters_mutex to
+ // avoid violating the lock hierarchy (manager before broadcasters).
+ std::lock_guard<std::mutex> manager_guard(manager_sp->m_manager_mutex);
std::lock_guard<std::recursive_mutex> guard(m_broadcasters_mutex);
- return manager_sp->UnregisterListenerForEvents(this->shared_from_this(),
- event_spec);
+ return manager_sp->UnregisterListenerForEventsNoLock(this->shared_from_this(),
+ event_spec);
}
ListenerSP Listener::MakeListener(const char *name) {
diff --git a/lldb/unittests/Utility/ListenerTest.cpp b/lldb/unittests/Utility/ListenerTest.cpp
index e586d99fd6305..f7aa0f59d1848 100644
--- a/lldb/unittests/Utility/ListenerTest.cpp
+++ b/lldb/unittests/Utility/ListenerTest.cpp
@@ -9,6 +9,7 @@
#include "gtest/gtest.h"
#include "lldb/Utility/Broadcaster.h"
+#include "lldb/Utility/Event.h"
#include "lldb/Utility/Listener.h"
#include <future>
#include <thread>
@@ -111,3 +112,41 @@ TEST(ListenerTest, GetEventWait) {
&broadcaster, event_mask, event_sp, std::nullopt));
async_broadcast.get();
}
+
+TEST(ListenerTest, StartStopListeningForEventSpec) {
+ constexpr uint32_t event_mask = 1;
+ static constexpr llvm::StringLiteral broadcaster_class = "broadcaster-class";
+
+ class TestBroadcaster : public Broadcaster {
+ using Broadcaster::Broadcaster;
+ llvm::StringRef GetBroadcasterClass() const override {
+ return broadcaster_class;
+ }
+ };
+
+ BroadcasterManagerSP manager_sp =
+ BroadcasterManager::MakeBroadcasterManager();
+ ListenerSP listener_sp = Listener::MakeListener("test-listener");
+
+ // Create two broadcasters, one while we're waiting for new broadcasters, and
+ // one when we're not.
+ ASSERT_EQ(listener_sp->StartListeningForEventSpec(
+ manager_sp, BroadcastEventSpec(broadcaster_class, event_mask)),
+ event_mask);
+ TestBroadcaster broadcaster1(manager_sp, "test-broadcaster-1");
+ broadcaster1.CheckInWithManager();
+ ASSERT_TRUE(listener_sp->StopListeningForEventSpec(
+ manager_sp, BroadcastEventSpec(broadcaster_class, event_mask)));
+ TestBroadcaster broadcaster2(manager_sp, "test-broadcaster-2");
+ broadcaster2.CheckInWithManager();
+
+ // Use both broadcasters to send an event.
+ for (auto *b : {&broadcaster1, &broadcaster2})
+ b->BroadcastEvent(event_mask, nullptr);
+
+ // Use should only get the event from the first one.
+ EventSP event_sp;
+ ASSERT_TRUE(listener_sp->GetEvent(event_sp, std::chrono::seconds(0)));
+ ASSERT_EQ(event_sp->GetBroadcaster(), &broadcaster1);
+ ASSERT_FALSE(listener_sp->GetEvent(event_sp, std::chrono::seconds(0)));
+}
>From 1787d4b28417ea9f26c0213e8f597cc5bb289144 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Stefan=20Gr=C3=A4nitz?= <stefan.graenitz at gmail.com>
Date: Wed, 3 Jul 2024 10:32:50 +0200
Subject: [PATCH 084/246] [clang-repl] Fix RuntimeInterfaceBuilder for 32-bit
systems (#97071)
When generating runtime interface bindings, extend integral types to the
native register size rather than 64-bit per se
Fixes #94994
---
clang/lib/Interpreter/Interpreter.cpp | 8 +++++---
clang/unittests/Interpreter/InterpreterTest.cpp | 4 ----
2 files changed, 5 insertions(+), 7 deletions(-)
diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp
index 49dc92d60233a..b4882ab5d2236 100644
--- a/clang/lib/Interpreter/Interpreter.cpp
+++ b/clang/lib/Interpreter/Interpreter.cpp
@@ -686,10 +686,12 @@ class InterfaceKindVisitor
}
private:
- // Force cast these types to uint64 to reduce the number of overloads of
- // `__clang_Interpreter_SetValueNoAlloc`.
+ // Force cast these types to the uint that fits the register size. That way we
+ // reduce the number of overloads of `__clang_Interpreter_SetValueNoAlloc`.
void HandleIntegralOrEnumType(const Type *Ty) {
- TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ctx.UnsignedLongLongTy);
+ uint64_t PtrBits = Ctx.getTypeSize(Ctx.VoidPtrTy);
+ QualType UIntTy = Ctx.getBitIntType(/*Unsigned=*/true, PtrBits);
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(UIntTy);
ExprResult CastedExpr =
S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E);
assert(!CastedExpr.isInvalid() && "Cannot create cstyle cast expr");
diff --git a/clang/unittests/Interpreter/InterpreterTest.cpp b/clang/unittests/Interpreter/InterpreterTest.cpp
index 29c5ead60b81e..a2e960f143111 100644
--- a/clang/unittests/Interpreter/InterpreterTest.cpp
+++ b/clang/unittests/Interpreter/InterpreterTest.cpp
@@ -282,9 +282,6 @@ TEST_F(InterpreterTest, InstantiateTemplate) {
EXPECT_EQ(42, fn(NewA.getPtr()));
}
-// This test exposes an ARM specific problem in the interpreter, see
-// https://github.com/llvm/llvm-project/issues/94994.
-#ifndef __arm__
TEST_F(InterpreterTest, Value) {
std::vector<const char *> Args = {"-fno-sized-deallocation"};
std::unique_ptr<Interpreter> Interp = createInterpreter(Args);
@@ -383,6 +380,5 @@ TEST_F(InterpreterTest, Value) {
EXPECT_EQ(V9.getKind(), Value::K_PtrOrObj);
EXPECT_TRUE(V9.isManuallyAlloc());
}
-#endif /* ifndef __arm__ */
} // end anonymous namespace
>From aa0851a5a6fd0c8d66dfd8b259c215dba3fabd1e Mon Sep 17 00:00:00 2001
From: Michael Buch <michaelbuch12 at gmail.com>
Date: Wed, 3 Jul 2024 09:34:11 +0200
Subject: [PATCH 085/246] [lldb][DataFormatter][NFC] Remove redundant variables
in std::map formatter
Redundant since:
```
commit be3be28b5d5c97de1c26bf069e0b82043d938f30
Author: Enrico Granata <egranata at apple.com>
Date: Mon Oct 3 23:33:00 2016 +0000
Changes to the std::multimap formatter to make it work against trunk libc++
Fixes rdar://28237486
llvm-svn: 283160
```
---
lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp | 3 ---
1 file changed, 3 deletions(-)
diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
index 0929d49e55eac..96d9bcc3f2cd7 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
+++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
@@ -30,7 +30,6 @@ class MapEntry {
: m_entry_sp(entry ? entry->GetSP() : ValueObjectSP()) {}
ValueObjectSP left() const {
- static ConstString g_left("__left_");
if (!m_entry_sp)
return m_entry_sp;
return m_entry_sp->GetSyntheticChildAtOffset(
@@ -38,7 +37,6 @@ class MapEntry {
}
ValueObjectSP right() const {
- static ConstString g_right("__right_");
if (!m_entry_sp)
return m_entry_sp;
return m_entry_sp->GetSyntheticChildAtOffset(
@@ -47,7 +45,6 @@ class MapEntry {
}
ValueObjectSP parent() const {
- static ConstString g_parent("__parent_");
if (!m_entry_sp)
return m_entry_sp;
return m_entry_sp->GetSyntheticChildAtOffset(
>From da62f5f8dfe4d4196191b40dc41e1ef2de1bf5cb Mon Sep 17 00:00:00 2001
From: Michael Buch <michaelbuch12 at gmail.com>
Date: Wed, 3 Jul 2024 09:51:26 +0200
Subject: [PATCH 086/246] [lldb][DataFormatter][NFC] std::map: Add comments and
other minor cleanups
---
.../Plugins/Language/CPlusPlus/LibCxxMap.cpp | 15 +++++----------
1 file changed, 5 insertions(+), 10 deletions(-)
diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
index 96d9bcc3f2cd7..2a241e3764b19 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
+++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
@@ -80,17 +80,10 @@ class MapEntry {
class MapIterator {
public:
- MapIterator() = default;
- MapIterator(MapEntry entry, size_t depth = 0)
- : m_entry(std::move(entry)), m_max_depth(depth), m_error(false) {}
- MapIterator(ValueObjectSP entry, size_t depth = 0)
- : m_entry(std::move(entry)), m_max_depth(depth), m_error(false) {}
- MapIterator(const MapIterator &rhs)
- : m_entry(rhs.m_entry), m_max_depth(rhs.m_max_depth), m_error(false) {}
MapIterator(ValueObject *entry, size_t depth = 0)
: m_entry(entry), m_max_depth(depth), m_error(false) {}
- MapIterator &operator=(const MapIterator &) = default;
+ MapIterator() = default;
ValueObjectSP value() { return m_entry.GetEntry(); }
@@ -108,7 +101,9 @@ class MapIterator {
return m_entry.GetEntry();
}
-protected:
+private:
+ /// Mimicks libc++'s __tree_next algorithm, which libc++ uses
+ /// in its __tree_iteartor::operator++.
void next() {
if (m_entry.null())
return;
@@ -133,7 +128,7 @@ class MapIterator {
m_entry = MapEntry(m_entry.parent());
}
-private:
+ /// Mimicks libc++'s __tree_min algorithm.
MapEntry tree_min(MapEntry x) {
if (x.null())
return MapEntry();
>From e89890e8e510f2b76c8c4a2b2a6fc323b1e837ad Mon Sep 17 00:00:00 2001
From: Michael Buch <michaelbuch12 at gmail.com>
Date: Wed, 3 Jul 2024 10:31:34 +0200
Subject: [PATCH 087/246] [lldb][DataFormatter][NFC] std::map: minor
restructuring in GetChildAtIndex to use early-return
---
.../Plugins/Language/CPlusPlus/LibCxxMap.cpp | 71 ++++++++++---------
1 file changed, 37 insertions(+), 34 deletions(-)
diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
index 2a241e3764b19..44fe294ced722 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
+++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
@@ -267,6 +267,7 @@ void lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd::GetValueOffset(
uint64_t bit_offset;
if (node_type.GetIndexOfFieldWithName("__value_", nullptr, &bit_offset) !=
UINT32_MAX) {
+ // Old layout (pre 089a7cc5dea)
m_skip_size = bit_offset / 8u;
} else {
auto ast_ctx = node_type.GetTypeSystem().dyn_cast_or_null<TypeSystemClang>();
@@ -328,45 +329,47 @@ lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd::GetChildAtIndex(
nullptr; // this will stop all future searches until an Update() happens
return iterated_sp;
}
- if (GetDataType()) {
- if (!need_to_skip) {
- Status error;
- iterated_sp = iterated_sp->Dereference(error);
- if (!iterated_sp || error.Fail()) {
- m_tree = nullptr;
- return lldb::ValueObjectSP();
- }
- GetValueOffset(iterated_sp);
- auto child_sp = iterated_sp->GetChildMemberWithName("__value_");
- if (child_sp)
- iterated_sp = child_sp;
- else
- iterated_sp = iterated_sp->GetSyntheticChildAtOffset(
- m_skip_size, m_element_type, true);
- if (!iterated_sp) {
- m_tree = nullptr;
- return lldb::ValueObjectSP();
- }
- } else {
- // because of the way our debug info is made, we need to read item 0
- // first so that we can cache information used to generate other elements
- if (m_skip_size == UINT32_MAX)
- GetChildAtIndex(0);
- if (m_skip_size == UINT32_MAX) {
- m_tree = nullptr;
- return lldb::ValueObjectSP();
- }
+
+ if (!GetDataType()) {
+ m_tree = nullptr;
+ return lldb::ValueObjectSP();
+ }
+
+ if (!need_to_skip) {
+ Status error;
+ iterated_sp = iterated_sp->Dereference(error);
+ if (!iterated_sp || error.Fail()) {
+ m_tree = nullptr;
+ return lldb::ValueObjectSP();
+ }
+ GetValueOffset(iterated_sp);
+ auto child_sp = iterated_sp->GetChildMemberWithName("__value_");
+ if (child_sp)
+ iterated_sp = child_sp;
+ else
iterated_sp = iterated_sp->GetSyntheticChildAtOffset(
m_skip_size, m_element_type, true);
- if (!iterated_sp) {
- m_tree = nullptr;
- return lldb::ValueObjectSP();
- }
+ if (!iterated_sp) {
+ m_tree = nullptr;
+ return lldb::ValueObjectSP();
}
} else {
- m_tree = nullptr;
- return lldb::ValueObjectSP();
+ // because of the way our debug info is made, we need to read item 0
+ // first so that we can cache information used to generate other elements
+ if (m_skip_size == UINT32_MAX)
+ GetChildAtIndex(0);
+ if (m_skip_size == UINT32_MAX) {
+ m_tree = nullptr;
+ return lldb::ValueObjectSP();
+ }
+ iterated_sp = iterated_sp->GetSyntheticChildAtOffset(m_skip_size,
+ m_element_type, true);
+ if (!iterated_sp) {
+ m_tree = nullptr;
+ return lldb::ValueObjectSP();
+ }
}
+
// at this point we have a valid
// we need to copy current_sp into a new object otherwise we will end up with
// all items named __value_
>From 40278bb1193720c42911f7297d3bcb4c5af5bc9c Mon Sep 17 00:00:00 2001
From: Slava Zakharin <szakharin at nvidia.com>
Date: Wed, 3 Jul 2024 02:03:46 -0700
Subject: [PATCH 088/246] [mlir][acc] Added async to data clause operations.
(#97307)
As long as the data clause operations are not tightly
"associated" with the compute/data operations (e.g.
they can be optimized as SSA producers and made block
arguments), the information about the original async()
clause should be attached to the data clause operations
to make it easier to generate proper runtime actions
for them. This change propagates the async() information
from the OpenACC data/compute constructs to the data clause
operations. This change also adds the CurrentDeviceIdResource
to guarantee proper ordering of the operations that read
and write the current device identifier.
---
flang/lib/Lower/OpenACC.cpp | 411 ++++++++++++------
flang/test/Lower/OpenACC/acc-data.f90 | 6 +-
flang/test/Lower/OpenACC/acc-enter-data.f90 | 12 +-
flang/test/Lower/OpenACC/acc-exit-data.f90 | 16 +-
flang/test/Lower/OpenACC/acc-parallel.f90 | 14 +-
flang/test/Lower/OpenACC/acc-serial.f90 | 4 +-
flang/test/Lower/OpenACC/acc-update.f90 | 24 +-
mlir/include/mlir/Dialect/OpenACC/OpenACC.h | 20 +
.../mlir/Dialect/OpenACC/OpenACCOps.td | 235 +++++++++-
mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp | 30 ++
10 files changed, 580 insertions(+), 192 deletions(-)
diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp
index 166fa686cd883..6266a5056ace8 100644
--- a/flang/lib/Lower/OpenACC.cpp
+++ b/flang/lib/Lower/OpenACC.cpp
@@ -58,13 +58,34 @@ genOperandLocation(Fortran::lower::AbstractConverter &converter,
return loc;
}
+static void addOperands(llvm::SmallVectorImpl<mlir::Value> &operands,
+ llvm::SmallVectorImpl<int32_t> &operandSegments,
+ llvm::ArrayRef<mlir::Value> clauseOperands) {
+ operands.append(clauseOperands.begin(), clauseOperands.end());
+ operandSegments.push_back(clauseOperands.size());
+}
+
+static void addOperand(llvm::SmallVectorImpl<mlir::Value> &operands,
+ llvm::SmallVectorImpl<int32_t> &operandSegments,
+ const mlir::Value &clauseOperand) {
+ if (clauseOperand) {
+ operands.push_back(clauseOperand);
+ operandSegments.push_back(1);
+ } else {
+ operandSegments.push_back(0);
+ }
+}
+
template <typename Op>
-static Op createDataEntryOp(fir::FirOpBuilder &builder, mlir::Location loc,
- mlir::Value baseAddr, std::stringstream &name,
- mlir::SmallVector<mlir::Value> bounds,
- bool structured, bool implicit,
- mlir::acc::DataClause dataClause, mlir::Type retTy,
- mlir::Value isPresent = {}) {
+static Op
+createDataEntryOp(fir::FirOpBuilder &builder, mlir::Location loc,
+ mlir::Value baseAddr, std::stringstream &name,
+ mlir::SmallVector<mlir::Value> bounds, bool structured,
+ bool implicit, mlir::acc::DataClause dataClause,
+ mlir::Type retTy, llvm::ArrayRef<mlir::Value> async,
+ llvm::ArrayRef<mlir::Attribute> asyncDeviceTypes,
+ llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes,
+ mlir::Value isPresent = {}) {
mlir::Value varPtrPtr;
if (auto boxTy = mlir::dyn_cast<fir::BaseBoxType>(baseAddr.getType())) {
if (isPresent) {
@@ -92,20 +113,25 @@ static Op createDataEntryOp(fir::FirOpBuilder &builder, mlir::Location loc,
retTy = baseAddr.getType();
}
- Op op = builder.create<Op>(loc, retTy, baseAddr);
+ llvm::SmallVector<mlir::Value, 8> operands;
+ llvm::SmallVector<int32_t, 8> operandSegments;
+
+ addOperand(operands, operandSegments, baseAddr);
+ addOperand(operands, operandSegments, varPtrPtr);
+ addOperands(operands, operandSegments, bounds);
+ addOperands(operands, operandSegments, async);
+
+ Op op = builder.create<Op>(loc, retTy, operands);
op.setNameAttr(builder.getStringAttr(name.str()));
op.setStructured(structured);
op.setImplicit(implicit);
op.setDataClause(dataClause);
-
- unsigned insPos = 1;
- if (varPtrPtr)
- op->insertOperands(insPos++, varPtrPtr);
- if (bounds.size() > 0)
- op->insertOperands(insPos, bounds);
op->setAttr(Op::getOperandSegmentSizeAttr(),
- builder.getDenseI32ArrayAttr(
- {1, varPtrPtr ? 1 : 0, static_cast<int32_t>(bounds.size())}));
+ builder.getDenseI32ArrayAttr(operandSegments));
+ if (!asyncDeviceTypes.empty())
+ op.setAsyncOperandsDeviceTypeAttr(builder.getArrayAttr(asyncDeviceTypes));
+ if (!asyncOnlyDeviceTypes.empty())
+ op.setAsyncOnlyAttr(builder.getArrayAttr(asyncOnlyDeviceTypes));
return op;
}
@@ -174,7 +200,8 @@ static void createDeclareAllocFuncWithArg(mlir::OpBuilder &modBuilder,
createDataEntryOp<mlir::acc::UpdateDeviceOp>(
builder, loc, registerFuncOp.getArgument(0), asFortranDesc, bounds,
/*structured=*/false, /*implicit=*/true,
- mlir::acc::DataClause::acc_update_device, descTy);
+ mlir::acc::DataClause::acc_update_device, descTy,
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
llvm::SmallVector<int32_t> operandSegments{0, 0, 0, 1};
llvm::SmallVector<mlir::Value> operands{updateDeviceOp.getResult()};
createSimpleOp<mlir::acc::UpdateOp>(builder, loc, operands, operandSegments);
@@ -185,7 +212,8 @@ static void createDeclareAllocFuncWithArg(mlir::OpBuilder &modBuilder,
addDeclareAttr(builder, boxAddrOp.getOperation(), clause);
EntryOp entryOp = createDataEntryOp<EntryOp>(
builder, loc, boxAddrOp.getResult(), asFortran, bounds,
- /*structured=*/false, /*implicit=*/false, clause, boxAddrOp.getType());
+ /*structured=*/false, /*implicit=*/false, clause, boxAddrOp.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
builder.create<mlir::acc::DeclareEnterOp>(
loc, mlir::acc::DeclareTokenType::get(entryOp.getContext()),
mlir::ValueRange(entryOp.getAccPtr()));
@@ -217,8 +245,8 @@ static void createDeclareDeallocFuncWithArg(
mlir::acc::GetDevicePtrOp entryOp =
createDataEntryOp<mlir::acc::GetDevicePtrOp>(
builder, loc, boxAddrOp.getResult(), asFortran, bounds,
- /*structured=*/false, /*implicit=*/false, clause,
- boxAddrOp.getType());
+ /*structured=*/false, /*implicit=*/false, clause, boxAddrOp.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
builder.create<mlir::acc::DeclareExitOp>(
loc, mlir::Value{}, mlir::ValueRange(entryOp.getAccPtr()));
@@ -226,12 +254,16 @@ static void createDeclareDeallocFuncWithArg(
std::is_same_v<ExitOp, mlir::acc::UpdateHostOp>)
builder.create<ExitOp>(entryOp.getLoc(), entryOp.getAccPtr(),
entryOp.getVarPtr(), entryOp.getBounds(),
- entryOp.getDataClause(),
+ entryOp.getAsyncOperands(),
+ entryOp.getAsyncOperandsDeviceTypeAttr(),
+ entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
/*structured=*/false, /*implicit=*/false,
builder.getStringAttr(*entryOp.getName()));
else
builder.create<ExitOp>(entryOp.getLoc(), entryOp.getAccPtr(),
- entryOp.getBounds(), entryOp.getDataClause(),
+ entryOp.getBounds(), entryOp.getAsyncOperands(),
+ entryOp.getAsyncOperandsDeviceTypeAttr(),
+ entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
/*structured=*/false, /*implicit=*/false,
builder.getStringAttr(*entryOp.getName()));
@@ -248,7 +280,8 @@ static void createDeclareDeallocFuncWithArg(
createDataEntryOp<mlir::acc::UpdateDeviceOp>(
builder, loc, loadOp, asFortran, bounds,
/*structured=*/false, /*implicit=*/true,
- mlir::acc::DataClause::acc_update_device, loadOp.getType());
+ mlir::acc::DataClause::acc_update_device, loadOp.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
llvm::SmallVector<int32_t> operandSegments{0, 0, 0, 1};
llvm::SmallVector<mlir::Value> operands{updateDeviceOp.getResult()};
createSimpleOp<mlir::acc::UpdateOp>(builder, loc, operands, operandSegments);
@@ -290,7 +323,10 @@ genDataOperandOperations(const Fortran::parser::AccObjectList &objectList,
Fortran::lower::StatementContext &stmtCtx,
llvm::SmallVectorImpl<mlir::Value> &dataOperands,
mlir::acc::DataClause dataClause, bool structured,
- bool implicit, bool setDeclareAttr = false) {
+ bool implicit, llvm::ArrayRef<mlir::Value> async,
+ llvm::ArrayRef<mlir::Attribute> asyncDeviceTypes,
+ llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes,
+ bool setDeclareAttr = false) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
Fortran::evaluate::ExpressionAnalyzer ea{semanticsContext};
for (const auto &accObject : objectList.v) {
@@ -316,7 +352,8 @@ genDataOperandOperations(const Fortran::parser::AccObjectList &objectList,
: info.addr;
Op op = createDataEntryOp<Op>(builder, operandLocation, baseAddr, asFortran,
bounds, structured, implicit, dataClause,
- baseAddr.getType(), info.isPresent);
+ baseAddr.getType(), async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes, info.isPresent);
dataOperands.push_back(op.getAccPtr());
}
}
@@ -345,7 +382,8 @@ static void genDeclareDataOperandOperations(
operandLocation, asFortran, bounds);
EntryOp op = createDataEntryOp<EntryOp>(
builder, operandLocation, info.addr, asFortran, bounds, structured,
- implicit, dataClause, info.addr.getType());
+ implicit, dataClause, info.addr.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
dataOperands.push_back(op.getAccPtr());
addDeclareAttr(builder, op.getVarPtr().getDefiningOp(), dataClause);
if (mlir::isa<fir::BaseBoxType>(fir::unwrapRefType(info.addr.getType()))) {
@@ -397,13 +435,16 @@ static void genDataExitOperations(fir::FirOpBuilder &builder,
std::is_same_v<ExitOp, mlir::acc::UpdateHostOp>)
builder.create<ExitOp>(
entryOp.getLoc(), entryOp.getAccPtr(), entryOp.getVarPtr(),
- entryOp.getBounds(), entryOp.getDataClause(), structured,
- entryOp.getImplicit(), builder.getStringAttr(*entryOp.getName()));
+ entryOp.getBounds(), entryOp.getAsyncOperands(),
+ entryOp.getAsyncOperandsDeviceTypeAttr(), entryOp.getAsyncOnlyAttr(),
+ entryOp.getDataClause(), structured, entryOp.getImplicit(),
+ builder.getStringAttr(*entryOp.getName()));
else
- builder.create<ExitOp>(entryOp.getLoc(), entryOp.getAccPtr(),
- entryOp.getBounds(), entryOp.getDataClause(),
- structured, entryOp.getImplicit(),
- builder.getStringAttr(*entryOp.getName()));
+ builder.create<ExitOp>(
+ entryOp.getLoc(), entryOp.getAccPtr(), entryOp.getBounds(),
+ entryOp.getAsyncOperands(), entryOp.getAsyncOperandsDeviceTypeAttr(),
+ entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(), structured,
+ entryOp.getImplicit(), builder.getStringAttr(*entryOp.getName()));
}
}
@@ -783,7 +824,10 @@ genPrivatizations(const Fortran::parser::AccObjectList &objectList,
Fortran::semantics::SemanticsContext &semanticsContext,
Fortran::lower::StatementContext &stmtCtx,
llvm::SmallVectorImpl<mlir::Value> &dataOperands,
- llvm::SmallVector<mlir::Attribute> &privatizations) {
+ llvm::SmallVector<mlir::Attribute> &privatizations,
+ llvm::ArrayRef<mlir::Value> async,
+ llvm::ArrayRef<mlir::Attribute> asyncDeviceTypes,
+ llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
Fortran::evaluate::ExpressionAnalyzer ea{semanticsContext};
for (const auto &accObject : objectList.v) {
@@ -808,7 +852,8 @@ genPrivatizations(const Fortran::parser::AccObjectList &objectList,
operandLocation, retTy);
auto op = createDataEntryOp<mlir::acc::PrivateOp>(
builder, operandLocation, info.addr, asFortran, bounds, true,
- /*implicit=*/false, mlir::acc::DataClause::acc_private, retTy);
+ /*implicit=*/false, mlir::acc::DataClause::acc_private, retTy, async,
+ asyncDeviceTypes, asyncOnlyDeviceTypes);
dataOperands.push_back(op.getAccPtr());
} else {
std::string suffix =
@@ -819,7 +864,8 @@ genPrivatizations(const Fortran::parser::AccObjectList &objectList,
builder, recipeName, operandLocation, retTy, bounds);
auto op = createDataEntryOp<mlir::acc::FirstprivateOp>(
builder, operandLocation, info.addr, asFortran, bounds, true,
- /*implicit=*/false, mlir::acc::DataClause::acc_firstprivate, retTy);
+ /*implicit=*/false, mlir::acc::DataClause::acc_firstprivate, retTy,
+ async, asyncDeviceTypes, asyncOnlyDeviceTypes);
dataOperands.push_back(op.getAccPtr());
}
privatizations.push_back(mlir::SymbolRefAttr::get(
@@ -1354,7 +1400,10 @@ genReductions(const Fortran::parser::AccObjectListWithReduction &objectList,
Fortran::semantics::SemanticsContext &semanticsContext,
Fortran::lower::StatementContext &stmtCtx,
llvm::SmallVectorImpl<mlir::Value> &reductionOperands,
- llvm::SmallVector<mlir::Attribute> &reductionRecipes) {
+ llvm::SmallVector<mlir::Attribute> &reductionRecipes,
+ llvm::ArrayRef<mlir::Value> async,
+ llvm::ArrayRef<mlir::Attribute> asyncDeviceTypes,
+ llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
const auto &objects = std::get<Fortran::parser::AccObjectList>(objectList.t);
const auto &op = std::get<Fortran::parser::ReductionOperator>(objectList.t);
@@ -1383,7 +1432,8 @@ genReductions(const Fortran::parser::AccObjectListWithReduction &objectList,
auto op = createDataEntryOp<mlir::acc::ReductionOp>(
builder, operandLocation, info.addr, asFortran, bounds,
/*structured=*/true, /*implicit=*/false,
- mlir::acc::DataClause::acc_reduction, info.addr.getType());
+ mlir::acc::DataClause::acc_reduction, info.addr.getType(), async,
+ asyncDeviceTypes, asyncOnlyDeviceTypes);
mlir::Type ty = op.getAccPtr().getType();
if (!areAllBoundConstant(bounds) ||
fir::isAssumedShape(info.addr.getType()) ||
@@ -1404,25 +1454,6 @@ genReductions(const Fortran::parser::AccObjectListWithReduction &objectList,
}
}
-static void
-addOperands(llvm::SmallVectorImpl<mlir::Value> &operands,
- llvm::SmallVectorImpl<int32_t> &operandSegments,
- const llvm::SmallVectorImpl<mlir::Value> &clauseOperands) {
- operands.append(clauseOperands.begin(), clauseOperands.end());
- operandSegments.push_back(clauseOperands.size());
-}
-
-static void addOperand(llvm::SmallVectorImpl<mlir::Value> &operands,
- llvm::SmallVectorImpl<int32_t> &operandSegments,
- const mlir::Value &clauseOperand) {
- if (clauseOperand) {
- operands.push_back(clauseOperand);
- operandSegments.push_back(1);
- } else {
- operandSegments.push_back(0);
- }
-}
-
template <typename Op, typename Terminator>
static Op
createRegionOp(fir::FirOpBuilder &builder, mlir::Location loc,
@@ -1656,7 +1687,8 @@ static void privatizeIv(Fortran::lower::AbstractConverter &converter,
std::stringstream asFortran;
auto op = createDataEntryOp<mlir::acc::PrivateOp>(
builder, loc, ivValue, asFortran, {}, true, /*implicit=*/true,
- mlir::acc::DataClause::acc_private, ivValue.getType());
+ mlir::acc::DataClause::acc_private, ivValue.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
privateOperands.push_back(op.getAccPtr());
privatizations.push_back(mlir::SymbolRefAttr::get(builder.getContext(),
@@ -1897,12 +1929,14 @@ static mlir::acc::LoopOp createLoopOp(
&clause.u)) {
genPrivatizations<mlir::acc::PrivateRecipeOp>(
privateClause->v, converter, semanticsContext, stmtCtx,
- privateOperands, privatizations);
+ privateOperands, privatizations, /*async=*/{},
+ /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
} else if (const auto *reductionClause =
std::get_if<Fortran::parser::AccClause::Reduction>(
&clause.u)) {
genReductions(reductionClause->v, converter, semanticsContext, stmtCtx,
- reductionOperands, reductionRecipes);
+ reductionOperands, reductionRecipes, /*async=*/{},
+ /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
} else if (std::get_if<Fortran::parser::AccClause::Seq>(&clause.u)) {
for (auto crtDeviceTypeAttr : crtDeviceTypes)
seqDeviceTypes.push_back(crtDeviceTypeAttr);
@@ -2088,6 +2122,9 @@ static void genDataOperandOperationsWithModifier(
llvm::SmallVectorImpl<mlir::Value> &dataClauseOperands,
const mlir::acc::DataClause clause,
const mlir::acc::DataClause clauseWithModifier,
+ llvm::ArrayRef<mlir::Value> async,
+ llvm::ArrayRef<mlir::Attribute> asyncDeviceTypes,
+ llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes,
bool setDeclareAttr = false) {
const Fortran::parser::AccObjectListWithModifier &listWithModifier = x->v;
const auto &accObjectList =
@@ -2099,7 +2136,8 @@ static void genDataOperandOperationsWithModifier(
(modifier && (*modifier).v == mod) ? clauseWithModifier : clause;
genDataOperandOperations<Op>(accObjectList, converter, semanticsContext,
stmtCtx, dataClauseOperands, dataClause,
- /*structured=*/true, /*implicit=*/false,
+ /*structured=*/true, /*implicit=*/false, async,
+ asyncDeviceTypes, asyncOnlyDeviceTypes,
setDeclareAttr);
}
@@ -2150,8 +2188,9 @@ static Op createComputeOp(
// Lower clauses values mapped to operands and array attributes.
// Keep track of each group of operands separately as clauses can appear
// more than once.
+
+ // Process the clauses that may have a specified device_type first.
for (const Fortran::parser::AccClause &clause : accClauseList.v) {
- mlir::Location clauseLocation = converter.genLocation(clause.source);
if (const auto *asyncClause =
std::get_if<Fortran::parser::AccClause::Async>(&clause.u)) {
genAsyncClause(converter, asyncClause, async, asyncDeviceTypes,
@@ -2193,8 +2232,19 @@ static Op createComputeOp(
vectorLength.push_back(vectorLengthValue);
vectorLengthDeviceTypes.push_back(crtDeviceTypeAttr);
}
- } else if (const auto *ifClause =
- std::get_if<Fortran::parser::AccClause::If>(&clause.u)) {
+ } else if (const auto *deviceTypeClause =
+ std::get_if<Fortran::parser::AccClause::DeviceType>(
+ &clause.u)) {
+ crtDeviceTypes.clear();
+ gatherDeviceTypeAttrs(builder, deviceTypeClause, crtDeviceTypes);
+ }
+ }
+
+ // Process the clauses independent of device_type.
+ for (const Fortran::parser::AccClause &clause : accClauseList.v) {
+ mlir::Location clauseLocation = converter.genLocation(clause.source);
+ if (const auto *ifClause =
+ std::get_if<Fortran::parser::AccClause::If>(&clause.u)) {
genIfClause(converter, clauseLocation, ifClause, ifCond, stmtCtx);
} else if (const auto *selfClause =
std::get_if<Fortran::parser::AccClause::Self>(&clause.u)) {
@@ -2237,7 +2287,8 @@ static Op createComputeOp(
genDataOperandOperations<mlir::acc::CopyinOp>(
copyClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_copy,
- /*structured=*/true, /*implicit=*/false);
+ /*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
copyEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *copyinClause =
@@ -2247,7 +2298,8 @@ static Op createComputeOp(
copyinClause, converter, semanticsContext, stmtCtx,
Fortran::parser::AccDataModifier::Modifier::ReadOnly,
dataClauseOperands, mlir::acc::DataClause::acc_copyin,
- mlir::acc::DataClause::acc_copyin_readonly);
+ mlir::acc::DataClause::acc_copyin_readonly, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *copyoutClause =
std::get_if<Fortran::parser::AccClause::Copyout>(
&clause.u)) {
@@ -2257,7 +2309,8 @@ static Op createComputeOp(
copyoutClause, converter, semanticsContext, stmtCtx,
Fortran::parser::AccDataModifier::Modifier::ReadOnly,
dataClauseOperands, mlir::acc::DataClause::acc_copyout,
- mlir::acc::DataClause::acc_copyout_zero);
+ mlir::acc::DataClause::acc_copyout_zero, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
copyoutEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *createClause =
@@ -2268,7 +2321,8 @@ static Op createComputeOp(
createClause, converter, semanticsContext, stmtCtx,
Fortran::parser::AccDataModifier::Modifier::Zero, dataClauseOperands,
mlir::acc::DataClause::acc_create,
- mlir::acc::DataClause::acc_create_zero);
+ mlir::acc::DataClause::acc_create_zero, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
createEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *noCreateClause =
@@ -2277,28 +2331,32 @@ static Op createComputeOp(
genDataOperandOperations<mlir::acc::NoCreateOp>(
noCreateClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_no_create,
- /*structured=*/true, /*implicit=*/false);
+ /*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *presentClause =
std::get_if<Fortran::parser::AccClause::Present>(
&clause.u)) {
genDataOperandOperations<mlir::acc::PresentOp>(
presentClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_present,
- /*structured=*/true, /*implicit=*/false);
+ /*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *devicePtrClause =
std::get_if<Fortran::parser::AccClause::Deviceptr>(
&clause.u)) {
genDataOperandOperations<mlir::acc::DevicePtrOp>(
devicePtrClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_deviceptr,
- /*structured=*/true, /*implicit=*/false);
+ /*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *attachClause =
std::get_if<Fortran::parser::AccClause::Attach>(&clause.u)) {
auto crtDataStart = dataClauseOperands.size();
genDataOperandOperations<mlir::acc::AttachOp>(
attachClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_attach,
- /*structured=*/true, /*implicit=*/false);
+ /*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
attachEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *privateClause =
@@ -2307,13 +2365,15 @@ static Op createComputeOp(
if (!combinedConstructs)
genPrivatizations<mlir::acc::PrivateRecipeOp>(
privateClause->v, converter, semanticsContext, stmtCtx,
- privateOperands, privatizations);
+ privateOperands, privatizations, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *firstprivateClause =
std::get_if<Fortran::parser::AccClause::Firstprivate>(
&clause.u)) {
genPrivatizations<mlir::acc::FirstprivateRecipeOp>(
firstprivateClause->v, converter, semanticsContext, stmtCtx,
- firstprivateOperands, firstPrivatizations);
+ firstprivateOperands, firstPrivatizations, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *reductionClause =
std::get_if<Fortran::parser::AccClause::Reduction>(
&clause.u)) {
@@ -2324,14 +2384,16 @@ static Op createComputeOp(
// instead.
if (!combinedConstructs) {
genReductions(reductionClause->v, converter, semanticsContext, stmtCtx,
- reductionOperands, reductionRecipes);
+ reductionOperands, reductionRecipes, async,
+ asyncDeviceTypes, asyncOnlyDeviceTypes);
} else {
auto crtDataStart = dataClauseOperands.size();
genDataOperandOperations<mlir::acc::CopyinOp>(
std::get<Fortran::parser::AccObjectList>(reductionClause->v.t),
converter, semanticsContext, stmtCtx, dataClauseOperands,
mlir::acc::DataClause::acc_reduction,
- /*structured=*/true, /*implicit=*/true);
+ /*structured=*/true, /*implicit=*/true, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
copyEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
}
@@ -2343,11 +2405,6 @@ static Op createComputeOp(
else if ((defaultClause->v).v ==
llvm::acc::DefaultValue::ACC_Default_present)
hasDefaultPresent = true;
- } else if (const auto *deviceTypeClause =
- std::get_if<Fortran::parser::AccClause::DeviceType>(
- &clause.u)) {
- crtDeviceTypes.clear();
- gatherDeviceTypeAttrs(builder, deviceTypeClause, crtDeviceTypes);
}
}
@@ -2480,6 +2537,28 @@ static void genACCDataOp(Fortran::lower::AbstractConverter &converter,
// Lower clauses values mapped to operands and array attributes.
// Keep track of each group of operands separately as clauses can appear
// more than once.
+
+ // Process the clauses that may have a specified device_type first.
+ for (const Fortran::parser::AccClause &clause : accClauseList.v) {
+ if (const auto *asyncClause =
+ std::get_if<Fortran::parser::AccClause::Async>(&clause.u)) {
+ genAsyncClause(converter, asyncClause, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes, crtDeviceTypes, stmtCtx);
+ } else if (const auto *waitClause =
+ std::get_if<Fortran::parser::AccClause::Wait>(&clause.u)) {
+ genWaitClauseWithDeviceType(converter, waitClause, waitOperands,
+ waitOperandsDeviceTypes, waitOnlyDeviceTypes,
+ hasWaitDevnums, waitOperandsSegments,
+ crtDeviceTypes, stmtCtx);
+ } else if (const auto *deviceTypeClause =
+ std::get_if<Fortran::parser::AccClause::DeviceType>(
+ &clause.u)) {
+ crtDeviceTypes.clear();
+ gatherDeviceTypeAttrs(builder, deviceTypeClause, crtDeviceTypes);
+ }
+ }
+
+ // Process the clauses independent of device_type.
for (const Fortran::parser::AccClause &clause : accClauseList.v) {
mlir::Location clauseLocation = converter.genLocation(clause.source);
if (const auto *ifClause =
@@ -2491,7 +2570,8 @@ static void genACCDataOp(Fortran::lower::AbstractConverter &converter,
genDataOperandOperations<mlir::acc::CopyinOp>(
copyClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_copy,
- /*structured=*/true, /*implicit=*/false);
+ /*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
copyEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *copyinClause =
@@ -2501,7 +2581,8 @@ static void genACCDataOp(Fortran::lower::AbstractConverter &converter,
copyinClause, converter, semanticsContext, stmtCtx,
Fortran::parser::AccDataModifier::Modifier::ReadOnly,
dataClauseOperands, mlir::acc::DataClause::acc_copyin,
- mlir::acc::DataClause::acc_copyin_readonly);
+ mlir::acc::DataClause::acc_copyin_readonly, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *copyoutClause =
std::get_if<Fortran::parser::AccClause::Copyout>(
&clause.u)) {
@@ -2511,7 +2592,8 @@ static void genACCDataOp(Fortran::lower::AbstractConverter &converter,
copyoutClause, converter, semanticsContext, stmtCtx,
Fortran::parser::AccDataModifier::Modifier::Zero, dataClauseOperands,
mlir::acc::DataClause::acc_copyout,
- mlir::acc::DataClause::acc_copyout_zero);
+ mlir::acc::DataClause::acc_copyout_zero, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
copyoutEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *createClause =
@@ -2522,7 +2604,8 @@ static void genACCDataOp(Fortran::lower::AbstractConverter &converter,
createClause, converter, semanticsContext, stmtCtx,
Fortran::parser::AccDataModifier::Modifier::Zero, dataClauseOperands,
mlir::acc::DataClause::acc_create,
- mlir::acc::DataClause::acc_create_zero);
+ mlir::acc::DataClause::acc_create_zero, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
createEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *noCreateClause =
@@ -2531,51 +2614,40 @@ static void genACCDataOp(Fortran::lower::AbstractConverter &converter,
genDataOperandOperations<mlir::acc::NoCreateOp>(
noCreateClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_no_create,
- /*structured=*/true, /*implicit=*/false);
+ /*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *presentClause =
std::get_if<Fortran::parser::AccClause::Present>(
&clause.u)) {
genDataOperandOperations<mlir::acc::PresentOp>(
presentClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_present,
- /*structured=*/true, /*implicit=*/false);
+ /*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *deviceptrClause =
std::get_if<Fortran::parser::AccClause::Deviceptr>(
&clause.u)) {
genDataOperandOperations<mlir::acc::DevicePtrOp>(
deviceptrClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_deviceptr,
- /*structured=*/true, /*implicit=*/false);
+ /*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *attachClause =
std::get_if<Fortran::parser::AccClause::Attach>(&clause.u)) {
auto crtDataStart = dataClauseOperands.size();
genDataOperandOperations<mlir::acc::AttachOp>(
attachClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_attach,
- /*structured=*/true, /*implicit=*/false);
+ /*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
attachEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
- } else if (const auto *asyncClause =
- std::get_if<Fortran::parser::AccClause::Async>(&clause.u)) {
- genAsyncClause(converter, asyncClause, async, asyncDeviceTypes,
- asyncOnlyDeviceTypes, crtDeviceTypes, stmtCtx);
- } else if (const auto *waitClause =
- std::get_if<Fortran::parser::AccClause::Wait>(&clause.u)) {
- genWaitClauseWithDeviceType(converter, waitClause, waitOperands,
- waitOperandsDeviceTypes, waitOnlyDeviceTypes,
- hasWaitDevnums, waitOperandsSegments,
- crtDeviceTypes, stmtCtx);
} else if(const auto *defaultClause =
std::get_if<Fortran::parser::AccClause::Default>(&clause.u)) {
if ((defaultClause->v).v == llvm::acc::DefaultValue::ACC_Default_none)
hasDefaultNone = true;
else if ((defaultClause->v).v == llvm::acc::DefaultValue::ACC_Default_present)
hasDefaultPresent = true;
- } else if (const auto *deviceTypeClause =
- std::get_if<Fortran::parser::AccClause::DeviceType>(
- &clause.u)) {
- crtDeviceTypes.clear();
- gatherDeviceTypeAttrs(builder, deviceTypeClause, crtDeviceTypes);
}
}
@@ -2655,7 +2727,8 @@ genACCHostDataOp(Fortran::lower::AbstractConverter &converter,
genDataOperandOperations<mlir::acc::UseDeviceOp>(
useDevice->v, converter, semanticsContext, stmtCtx, dataOperands,
mlir::acc::DataClause::acc_use_device,
- /*structured=*/true, /*implicit=*/false);
+ /*structured=*/true, /*implicit=*/false, /*async=*/{},
+ /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
} else if (std::get_if<Fortran::parser::AccClause::IfPresent>(&clause.u)) {
addIfPresentAttr = true;
}
@@ -2792,14 +2865,34 @@ genACCEnterDataOp(Fortran::lower::AbstractConverter &converter,
// Lower clauses values mapped to operands.
// Keep track of each group of operands separately as clauses can appear
// more than once.
+
+ // Process the async clause first.
+ for (const Fortran::parser::AccClause &clause : accClauseList.v) {
+ if (const auto *asyncClause =
+ std::get_if<Fortran::parser::AccClause::Async>(&clause.u)) {
+ genAsyncClause(converter, asyncClause, async, addAsyncAttr, stmtCtx);
+ }
+ }
+
+ // The async clause of 'enter data' applies to all device types,
+ // so propagate the async clause to copyin/create/attach ops
+ // as if it is an async clause without preceding device_type clause.
+ llvm::SmallVector<mlir::Attribute> asyncDeviceTypes, asyncOnlyDeviceTypes;
+ llvm::SmallVector<mlir::Value> asyncValues;
+ auto noneDeviceTypeAttr = mlir::acc::DeviceTypeAttr::get(
+ firOpBuilder.getContext(), mlir::acc::DeviceType::None);
+ if (addAsyncAttr) {
+ asyncOnlyDeviceTypes.push_back(noneDeviceTypeAttr);
+ } else if (async) {
+ asyncValues.push_back(async);
+ asyncDeviceTypes.push_back(noneDeviceTypeAttr);
+ }
+
for (const Fortran::parser::AccClause &clause : accClauseList.v) {
mlir::Location clauseLocation = converter.genLocation(clause.source);
if (const auto *ifClause =
std::get_if<Fortran::parser::AccClause::If>(&clause.u)) {
genIfClause(converter, clauseLocation, ifClause, ifCond, stmtCtx);
- } else if (const auto *asyncClause =
- std::get_if<Fortran::parser::AccClause::Async>(&clause.u)) {
- genAsyncClause(converter, asyncClause, async, addAsyncAttr, stmtCtx);
} else if (const auto *waitClause =
std::get_if<Fortran::parser::AccClause::Wait>(&clause.u)) {
genWaitClause(converter, waitClause, waitOperands, waitDevnum,
@@ -2813,7 +2906,8 @@ genACCEnterDataOp(Fortran::lower::AbstractConverter &converter,
genDataOperandOperations<mlir::acc::CopyinOp>(
accObjectList, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_copyin, false,
- /*implicit=*/false);
+ /*implicit=*/false, asyncValues, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *createClause =
std::get_if<Fortran::parser::AccClause::Create>(&clause.u)) {
const Fortran::parser::AccObjectListWithModifier &listWithModifier =
@@ -2829,14 +2923,16 @@ genACCEnterDataOp(Fortran::lower::AbstractConverter &converter,
clause = mlir::acc::DataClause::acc_create_zero;
genDataOperandOperations<mlir::acc::CreateOp>(
accObjectList, converter, semanticsContext, stmtCtx,
- dataClauseOperands, clause, false, /*implicit=*/false);
+ dataClauseOperands, clause, false, /*implicit=*/false, asyncValues,
+ asyncDeviceTypes, asyncOnlyDeviceTypes);
} else if (const auto *attachClause =
std::get_if<Fortran::parser::AccClause::Attach>(&clause.u)) {
genDataOperandOperations<mlir::acc::AttachOp>(
attachClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_attach, false,
- /*implicit=*/false);
- } else {
+ /*implicit=*/false, asyncValues, asyncDeviceTypes,
+ asyncOnlyDeviceTypes);
+ } else if (!std::get_if<Fortran::parser::AccClause::Async>(&clause.u)) {
llvm::report_fatal_error(
"Unknown clause in ENTER DATA directive lowering");
}
@@ -2882,14 +2978,34 @@ genACCExitDataOp(Fortran::lower::AbstractConverter &converter,
// Lower clauses values mapped to operands.
// Keep track of each group of operands separately as clauses can appear
// more than once.
+
+ // Process the async clause first.
+ for (const Fortran::parser::AccClause &clause : accClauseList.v) {
+ if (const auto *asyncClause =
+ std::get_if<Fortran::parser::AccClause::Async>(&clause.u)) {
+ genAsyncClause(converter, asyncClause, async, addAsyncAttr, stmtCtx);
+ }
+ }
+
+ // The async clause of 'exit data' applies to all device types,
+ // so propagate the async clause to copyin/create/attach ops
+ // as if it is an async clause without preceding device_type clause.
+ llvm::SmallVector<mlir::Attribute> asyncDeviceTypes, asyncOnlyDeviceTypes;
+ llvm::SmallVector<mlir::Value> asyncValues;
+ auto noneDeviceTypeAttr = mlir::acc::DeviceTypeAttr::get(
+ builder.getContext(), mlir::acc::DeviceType::None);
+ if (addAsyncAttr) {
+ asyncOnlyDeviceTypes.push_back(noneDeviceTypeAttr);
+ } else if (async) {
+ asyncValues.push_back(async);
+ asyncDeviceTypes.push_back(noneDeviceTypeAttr);
+ }
+
for (const Fortran::parser::AccClause &clause : accClauseList.v) {
mlir::Location clauseLocation = converter.genLocation(clause.source);
if (const auto *ifClause =
std::get_if<Fortran::parser::AccClause::If>(&clause.u)) {
genIfClause(converter, clauseLocation, ifClause, ifCond, stmtCtx);
- } else if (const auto *asyncClause =
- std::get_if<Fortran::parser::AccClause::Async>(&clause.u)) {
- genAsyncClause(converter, asyncClause, async, addAsyncAttr, stmtCtx);
} else if (const auto *waitClause =
std::get_if<Fortran::parser::AccClause::Wait>(&clause.u)) {
genWaitClause(converter, waitClause, waitOperands, waitDevnum,
@@ -2903,17 +3019,20 @@ genACCExitDataOp(Fortran::lower::AbstractConverter &converter,
std::get<Fortran::parser::AccObjectList>(listWithModifier.t);
genDataOperandOperations<mlir::acc::GetDevicePtrOp>(
accObjectList, converter, semanticsContext, stmtCtx, copyoutOperands,
- mlir::acc::DataClause::acc_copyout, false, /*implicit=*/false);
+ mlir::acc::DataClause::acc_copyout, false, /*implicit=*/false,
+ asyncValues, asyncDeviceTypes, asyncOnlyDeviceTypes);
} else if (const auto *deleteClause =
std::get_if<Fortran::parser::AccClause::Delete>(&clause.u)) {
genDataOperandOperations<mlir::acc::GetDevicePtrOp>(
deleteClause->v, converter, semanticsContext, stmtCtx, deleteOperands,
- mlir::acc::DataClause::acc_delete, false, /*implicit=*/false);
+ mlir::acc::DataClause::acc_delete, false, /*implicit=*/false,
+ asyncValues, asyncDeviceTypes, asyncOnlyDeviceTypes);
} else if (const auto *detachClause =
std::get_if<Fortran::parser::AccClause::Detach>(&clause.u)) {
genDataOperandOperations<mlir::acc::GetDevicePtrOp>(
detachClause->v, converter, semanticsContext, stmtCtx, detachOperands,
- mlir::acc::DataClause::acc_detach, false, /*implicit=*/false);
+ mlir::acc::DataClause::acc_detach, false, /*implicit=*/false,
+ asyncValues, asyncDeviceTypes, asyncOnlyDeviceTypes);
} else if (std::get_if<Fortran::parser::AccClause::Finalize>(&clause.u)) {
addFinalizeAttr = true;
}
@@ -3089,13 +3208,11 @@ genACCUpdateOp(Fortran::lower::AbstractConverter &converter,
// Lower clauses values mapped to operands and array attributes.
// Keep track of each group of operands separately as clauses can appear
// more than once.
+
+ // Process the clauses that may have a specified device_type first.
for (const Fortran::parser::AccClause &clause : accClauseList.v) {
- mlir::Location clauseLocation = converter.genLocation(clause.source);
- if (const auto *ifClause =
- std::get_if<Fortran::parser::AccClause::If>(&clause.u)) {
- genIfClause(converter, clauseLocation, ifClause, ifCond, stmtCtx);
- } else if (const auto *asyncClause =
- std::get_if<Fortran::parser::AccClause::Async>(&clause.u)) {
+ if (const auto *asyncClause =
+ std::get_if<Fortran::parser::AccClause::Async>(&clause.u)) {
genAsyncClause(converter, asyncClause, asyncOperands,
asyncOperandsDeviceTypes, asyncOnlyDeviceTypes,
crtDeviceTypes, stmtCtx);
@@ -3110,18 +3227,29 @@ genACCUpdateOp(Fortran::lower::AbstractConverter &converter,
&clause.u)) {
crtDeviceTypes.clear();
gatherDeviceTypeAttrs(builder, deviceTypeClause, crtDeviceTypes);
+ }
+ }
+
+ // Process the clauses independent of device_type.
+ for (const Fortran::parser::AccClause &clause : accClauseList.v) {
+ mlir::Location clauseLocation = converter.genLocation(clause.source);
+ if (const auto *ifClause =
+ std::get_if<Fortran::parser::AccClause::If>(&clause.u)) {
+ genIfClause(converter, clauseLocation, ifClause, ifCond, stmtCtx);
} else if (const auto *hostClause =
std::get_if<Fortran::parser::AccClause::Host>(&clause.u)) {
genDataOperandOperations<mlir::acc::GetDevicePtrOp>(
hostClause->v, converter, semanticsContext, stmtCtx,
updateHostOperands, mlir::acc::DataClause::acc_update_host, false,
- /*implicit=*/false);
+ /*implicit=*/false, asyncOperands, asyncOperandsDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (const auto *deviceClause =
std::get_if<Fortran::parser::AccClause::Device>(&clause.u)) {
genDataOperandOperations<mlir::acc::UpdateDeviceOp>(
deviceClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_update_device, false,
- /*implicit=*/false);
+ /*implicit=*/false, asyncOperands, asyncOperandsDeviceTypes,
+ asyncOnlyDeviceTypes);
} else if (std::get_if<Fortran::parser::AccClause::IfPresent>(&clause.u)) {
ifPresent = true;
} else if (const auto *selfClause =
@@ -3134,7 +3262,8 @@ genACCUpdateOp(Fortran::lower::AbstractConverter &converter,
genDataOperandOperations<mlir::acc::GetDevicePtrOp>(
*accObjectList, converter, semanticsContext, stmtCtx,
updateHostOperands, mlir::acc::DataClause::acc_update_self, false,
- /*implicit=*/false);
+ /*implicit=*/false, asyncOperands, asyncOperandsDeviceTypes,
+ asyncOnlyDeviceTypes);
}
}
@@ -3275,7 +3404,8 @@ static void createDeclareGlobalOp(mlir::OpBuilder &modBuilder,
llvm::SmallVector<mlir::Value> bounds;
EntryOp entryOp = createDataEntryOp<EntryOp>(
builder, loc, addrOp.getResTy(), asFortran, bounds,
- /*structured=*/false, implicit, clause, addrOp.getResTy().getType());
+ /*structured=*/false, implicit, clause, addrOp.getResTy().getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
if constexpr (std::is_same_v<DeclareOp, mlir::acc::DeclareEnterOp>)
builder.create<DeclareOp>(
loc, mlir::acc::DeclareTokenType::get(entryOp.getContext()),
@@ -3285,7 +3415,9 @@ static void createDeclareGlobalOp(mlir::OpBuilder &modBuilder,
mlir::ValueRange(entryOp.getAccPtr()));
if constexpr (std::is_same_v<GlobalOp, mlir::acc::GlobalDestructorOp>) {
builder.create<ExitOp>(entryOp.getLoc(), entryOp.getAccPtr(),
- entryOp.getBounds(), entryOp.getDataClause(),
+ entryOp.getBounds(), entryOp.getAsyncOperands(),
+ entryOp.getAsyncOperandsDeviceTypeAttr(),
+ entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
/*structured=*/false, /*implicit=*/false,
builder.getStringAttr(*entryOp.getName()));
}
@@ -3319,7 +3451,8 @@ static void createDeclareAllocFunc(mlir::OpBuilder &modBuilder,
createDataEntryOp<mlir::acc::UpdateDeviceOp>(
builder, loc, addrOp, asFortranDesc, bounds,
/*structured=*/false, /*implicit=*/true,
- mlir::acc::DataClause::acc_update_device, addrOp.getType());
+ mlir::acc::DataClause::acc_update_device, addrOp.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
llvm::SmallVector<int32_t> operandSegments{0, 0, 0, 1};
llvm::SmallVector<mlir::Value> operands{updateDeviceOp.getResult()};
createSimpleOp<mlir::acc::UpdateOp>(builder, loc, operands, operandSegments);
@@ -3329,7 +3462,8 @@ static void createDeclareAllocFunc(mlir::OpBuilder &modBuilder,
addDeclareAttr(builder, boxAddrOp.getOperation(), clause);
EntryOp entryOp = createDataEntryOp<EntryOp>(
builder, loc, boxAddrOp.getResult(), asFortran, bounds,
- /*structured=*/false, /*implicit=*/false, clause, boxAddrOp.getType());
+ /*structured=*/false, /*implicit=*/false, clause, boxAddrOp.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
builder.create<mlir::acc::DeclareEnterOp>(
loc, mlir::acc::DeclareTokenType::get(entryOp.getContext()),
mlir::ValueRange(entryOp.getAccPtr()));
@@ -3366,8 +3500,8 @@ static void createDeclareDeallocFunc(mlir::OpBuilder &modBuilder,
mlir::acc::GetDevicePtrOp entryOp =
createDataEntryOp<mlir::acc::GetDevicePtrOp>(
builder, loc, boxAddrOp.getResult(), asFortran, bounds,
- /*structured=*/false, /*implicit=*/false, clause,
- boxAddrOp.getType());
+ /*structured=*/false, /*implicit=*/false, clause, boxAddrOp.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
builder.create<mlir::acc::DeclareExitOp>(
loc, mlir::Value{}, mlir::ValueRange(entryOp.getAccPtr()));
@@ -3376,12 +3510,16 @@ static void createDeclareDeallocFunc(mlir::OpBuilder &modBuilder,
std::is_same_v<ExitOp, mlir::acc::UpdateHostOp>)
builder.create<ExitOp>(entryOp.getLoc(), entryOp.getAccPtr(),
entryOp.getVarPtr(), entryOp.getBounds(),
- entryOp.getDataClause(),
+ entryOp.getAsyncOperands(),
+ entryOp.getAsyncOperandsDeviceTypeAttr(),
+ entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
/*structured=*/false, /*implicit=*/false,
builder.getStringAttr(*entryOp.getName()));
else
builder.create<ExitOp>(entryOp.getLoc(), entryOp.getAccPtr(),
- entryOp.getBounds(), entryOp.getDataClause(),
+ entryOp.getBounds(), entryOp.getAsyncOperands(),
+ entryOp.getAsyncOperandsDeviceTypeAttr(),
+ entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
/*structured=*/false, /*implicit=*/false,
builder.getStringAttr(*entryOp.getName()));
@@ -3400,7 +3538,8 @@ static void createDeclareDeallocFunc(mlir::OpBuilder &modBuilder,
createDataEntryOp<mlir::acc::UpdateDeviceOp>(
builder, loc, addrOp, asFortran, bounds,
/*structured=*/false, /*implicit=*/true,
- mlir::acc::DataClause::acc_update_device, addrOp.getType());
+ mlir::acc::DataClause::acc_update_device, addrOp.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
llvm::SmallVector<int32_t> operandSegments{0, 0, 0, 1};
llvm::SmallVector<mlir::Value> operands{updateDeviceOp.getResult()};
createSimpleOp<mlir::acc::UpdateOp>(builder, loc, operands, operandSegments);
@@ -4046,7 +4185,9 @@ genACC(Fortran::lower::AbstractConverter &converter,
genDataOperandOperations<mlir::acc::CacheOp>(
accObjectList, converter, semanticsContext, stmtCtx, cacheOperands,
dataClause,
- /*structured=*/true, /*implicit=*/false, /*setDeclareAttr*/ false);
+ /*structured=*/true, /*implicit=*/false,
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{},
+ /*setDeclareAttr*/ false);
loopOp.getCacheOperandsMutable().append(cacheOperands);
} else {
llvm::report_fatal_error(
diff --git a/flang/test/Lower/OpenACC/acc-data.f90 b/flang/test/Lower/OpenACC/acc-data.f90
index f120be272991a..6e0ecb9129061 100644
--- a/flang/test/Lower/OpenACC/acc-data.f90
+++ b/flang/test/Lower/OpenACC/acc-data.f90
@@ -155,11 +155,13 @@ subroutine acc_data
! CHECK: acc.data dataOperands(%{{.*}}) {
! CHECK: } attributes {asyncOnly = [#acc.device_type<none>]}
- !$acc data present(a) async(1)
+ !$acc data copy(a) async(1)
!$acc end data
-! CHECK: acc.data async(%{{.*}} : i32) dataOperands(%{{.*}}) {
+! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%{{.*}} : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC:.*]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
+! CHECK: acc.data async(%[[ASYNC]] : i32) dataOperands(%[[COPYIN]] : !fir.ref<!fir.array<10x10xf32>>) {
! CHECK: }{{$}}
+! CHECK: acc.copyout accPtr(%[[COPYIN]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC]] : i32) to varPtr(%{{.*}} : !fir.ref<!fir.array<10x10xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "a"}
!$acc data present(a) wait
!$acc end data
diff --git a/flang/test/Lower/OpenACC/acc-enter-data.f90 b/flang/test/Lower/OpenACC/acc-enter-data.f90
index 251edbf9c2dd0..80326a1012376 100644
--- a/flang/test/Lower/OpenACC/acc-enter-data.f90
+++ b/flang/test/Lower/OpenACC/acc-enter-data.f90
@@ -94,7 +94,7 @@ subroutine acc_enter_data
!$acc enter data create(a) async
!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
+!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {asyncOnly = [#acc.device_type<none>], name = "a", structured = false}
!CHECK: acc.enter_data dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>) attributes {async}
!$acc enter data create(a) wait
@@ -106,22 +106,22 @@ subroutine acc_enter_data
!$acc enter data create(a) async wait
!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
+!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {asyncOnly = [#acc.device_type<none>], name = "a", structured = false}
!CHECK: acc.enter_data dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>) attributes {async, wait}
!$acc enter data create(a) async(1)
+!CHECK: %[[ASYNC1:.*]] = arith.constant 1 : i32
!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: %[[ASYNC1:.*]] = arith.constant 1 : i32
+!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) async(%[[ASYNC1]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
!CHECK: acc.enter_data async(%[[ASYNC1]] : i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
!$acc enter data create(a) async(async)
+!CHECK: %[[ASYNC2:.*]] = fir.load %{{.*}} : !fir.ref<i32>
!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: %[[ASYNC2:.*]] = fir.load %{{.*}} : !fir.ref<i32>
+!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) async(%[[ASYNC2]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
!CHECK: acc.enter_data async(%[[ASYNC2]] : i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
!$acc enter data create(a) wait(1)
diff --git a/flang/test/Lower/OpenACC/acc-exit-data.f90 b/flang/test/Lower/OpenACC/acc-exit-data.f90
index 6600f08d5bcfe..017f1f38f8397 100644
--- a/flang/test/Lower/OpenACC/acc-exit-data.f90
+++ b/flang/test/Lower/OpenACC/acc-exit-data.f90
@@ -56,9 +56,9 @@ subroutine acc_exit_data
!CHECK: acc.detach accPtr(%[[DEVPTR_D]] : !fir.ptr<f32>) {name = "d", structured = false}
!$acc exit data delete(a) async
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
+!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {asyncOnly = [#acc.device_type<none>], dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
!CHECK: acc.exit_data dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) attributes {async}
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
+!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {asyncOnly = [#acc.device_type<none>], name = "a", structured = false}
!$acc exit data delete(a) wait
!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
@@ -66,22 +66,22 @@ subroutine acc_exit_data
!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
!$acc exit data delete(a) async wait
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
+!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {asyncOnly = [#acc.device_type<none>], dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
!CHECK: acc.exit_data dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) attributes {async, wait}
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
+!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {asyncOnly = [#acc.device_type<none>], name = "a", structured = false}
!$acc exit data delete(a) async(1)
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
!CHECK: %[[ASYNC1:.*]] = arith.constant 1 : i32
+!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC1]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
!CHECK: acc.exit_data async(%[[ASYNC1]] : i32) dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
+!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC1]] : i32) {name = "a", structured = false}
!$acc exit data delete(a) async(async)
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
!CHECK: %[[ASYNC2:.*]] = fir.load %{{.*}} : !fir.ref<i32>
+!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC2]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
!CHECK: acc.exit_data async(%[[ASYNC2]] : i32) dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
+!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC2]] : i32) {name = "a", structured = false}
!$acc exit data delete(a) wait(1)
!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
diff --git a/flang/test/Lower/OpenACC/acc-parallel.f90 b/flang/test/Lower/OpenACC/acc-parallel.f90
index 4b18a8d037f22..5197e2b0bee09 100644
--- a/flang/test/Lower/OpenACC/acc-parallel.f90
+++ b/flang/test/Lower/OpenACC/acc-parallel.f90
@@ -60,7 +60,7 @@ subroutine acc_parallel
!$acc parallel async
!$acc end parallel
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {asyncOnly = [#acc.device_type<none>]}
@@ -76,7 +76,7 @@ subroutine acc_parallel
!$acc end parallel
! CHECK: [[ASYNC2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.parallel async([[ASYNC2]] : i32) {
+! CHECK-NEXT: acc.parallel async([[ASYNC2]] : i32) {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -324,13 +324,13 @@ subroutine acc_parallel
! CHECK: acc.detach accPtr(%[[ATTACH_D]] : !fir.ptr<f32>) {dataClause = #acc<data_clause acc_attach>, name = "d"}
! CHECK: acc.detach accPtr(%[[ATTACH_E]] : !fir.ptr<f32>) {dataClause = #acc<data_clause acc_attach>, name = "e"}
-!$acc parallel private(a) firstprivate(b) private(c)
+!$acc parallel private(a) firstprivate(b) private(c) async(1)
!$acc end parallel
-! CHECK: %[[ACC_PRIVATE_A:.*]] = acc.private varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "a"}
-! CHECK: %[[ACC_FPRIVATE_B:.*]] = acc.firstprivate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "b"}
-! CHECK: %[[ACC_PRIVATE_C:.*]] = acc.private varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "c"}
-! CHECK: acc.parallel firstprivate(@firstprivatization_section_ext10xext10_ref_10x10xf32 -> %[[ACC_FPRIVATE_B]] : !fir.ref<!fir.array<10x10xf32>>) private(@privatization_ref_10x10xf32 -> %[[ACC_PRIVATE_A]] : !fir.ref<!fir.array<10x10xf32>>, @privatization_ref_10x10xf32 -> %[[ACC_PRIVATE_C]] : !fir.ref<!fir.array<10x10xf32>>) {
+! CHECK: %[[ACC_PRIVATE_A:.*]] = acc.private varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async([[ASYNC3:%.*]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a"}
+! CHECK: %[[ACC_FPRIVATE_B:.*]] = acc.firstprivate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async([[ASYNC3]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "b"}
+! CHECK: %[[ACC_PRIVATE_C:.*]] = acc.private varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async([[ASYNC3]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "c"}
+! CHECK: acc.parallel async([[ASYNC3]]) firstprivate(@firstprivatization_section_ext10xext10_ref_10x10xf32 -> %[[ACC_FPRIVATE_B]] : !fir.ref<!fir.array<10x10xf32>>) private(@privatization_ref_10x10xf32 -> %[[ACC_PRIVATE_A]] : !fir.ref<!fir.array<10x10xf32>>, @privatization_ref_10x10xf32 -> %[[ACC_PRIVATE_C]] : !fir.ref<!fir.array<10x10xf32>>) {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
diff --git a/flang/test/Lower/OpenACC/acc-serial.f90 b/flang/test/Lower/OpenACC/acc-serial.f90
index c6fe6c3d58fe5..284f61976a46d 100644
--- a/flang/test/Lower/OpenACC/acc-serial.f90
+++ b/flang/test/Lower/OpenACC/acc-serial.f90
@@ -60,7 +60,7 @@ subroutine acc_serial
!$acc serial async
!$acc end serial
-! CHECK: acc.serial {
+! CHECK: acc.serial {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {asyncOnly = [#acc.device_type<none>]}
@@ -76,7 +76,7 @@ subroutine acc_serial
!$acc end serial
! CHECK: [[ASYNC2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.serial async([[ASYNC2]] : i32) {
+! CHECK-NEXT: acc.serial async([[ASYNC2]] : i32) {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
diff --git a/flang/test/Lower/OpenACC/acc-update.f90 b/flang/test/Lower/OpenACC/acc-update.f90
index bab21f82152b2..0964fd91457f9 100644
--- a/flang/test/Lower/OpenACC/acc-update.f90
+++ b/flang/test/Lower/OpenACC/acc-update.f90
@@ -60,9 +60,9 @@ subroutine acc_update
! CHECK: acc.update_host accPtr(%[[DEVPTR_B]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "b", structured = false}
!$acc update host(a) async
-! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
+! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {asyncOnly = [#acc.device_type<none>], dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
! CHECK: acc.update async dataOperands(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>)
-! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
+! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {asyncOnly = [#acc.device_type<none>], name = "a", structured = false}
!$acc update host(a) wait
! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
@@ -70,32 +70,32 @@ subroutine acc_update
! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
!$acc update host(a) async wait
-! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
+! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {asyncOnly = [#acc.device_type<none>], dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
! CHECK: acc.update async wait dataOperands(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>)
-! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
+! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {asyncOnly = [#acc.device_type<none>], name = "a", structured = false}
!$acc update host(a) async(1)
-! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
! CHECK: [[ASYNC1:%.*]] = arith.constant 1 : i32
+! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async([[ASYNC1]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
! CHECK: acc.update async([[ASYNC1]] : i32) dataOperands(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>)
-! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
+! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async([[ASYNC1]] : i32) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
!$acc update host(a) async(async)
-! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
! CHECK: [[ASYNC2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
+! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async([[ASYNC2]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
! CHECK: acc.update async([[ASYNC2]] : i32) dataOperands(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>)
-! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
+! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async([[ASYNC2]] : i32) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
!$acc update host(a) wait(1)
-! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
! CHECK: [[WAIT1:%.*]] = arith.constant 1 : i32
+! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
! CHECK: acc.update wait({[[WAIT1]] : i32}) dataOperands(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>)
! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
!$acc update host(a) wait(queues: 1, 2)
-! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
! CHECK: [[WAIT2:%.*]] = arith.constant 1 : i32
! CHECK: [[WAIT3:%.*]] = arith.constant 2 : i32
+! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
! CHECK: acc.update wait({[[WAIT2]] : i32, [[WAIT3]] : i32}) dataOperands(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>)
! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
@@ -105,8 +105,8 @@ subroutine acc_update
! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
!$acc update host(a) device_type(host, nvidia) async
-! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
+! CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {asyncOnly = [#acc.device_type<host>, #acc.device_type<nvidia>], dataClause = #acc<data_clause acc_update_host>, name = "a", structured = false}
! CHECK: acc.update async([#acc.device_type<host>, #acc.device_type<nvidia>]) dataOperands(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>)
-! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
+! CHECK: acc.update_host accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {asyncOnly = [#acc.device_type<host>, #acc.device_type<nvidia>], name = "a", structured = false}
end subroutine acc_update
diff --git a/mlir/include/mlir/Dialect/OpenACC/OpenACC.h b/mlir/include/mlir/Dialect/OpenACC/OpenACC.h
index c53a437ac092b..8239367fdd3e7 100644
--- a/mlir/include/mlir/Dialect/OpenACC/OpenACC.h
+++ b/mlir/include/mlir/Dialect/OpenACC/OpenACC.h
@@ -101,6 +101,21 @@ mlir::Value getVarPtrPtr(mlir::Operation *accDataClauseOp);
/// Returns an empty vector if there are no bounds.
mlir::SmallVector<mlir::Value> getBounds(mlir::Operation *accDataClauseOp);
+/// Used to obtain `async` operands from an acc data clause operation.
+/// Returns an empty vector if there are no such operands.
+mlir::SmallVector<mlir::Value>
+getAsyncOperands(mlir::Operation *accDataClauseOp);
+
+/// Returns an array of acc:DeviceTypeAttr attributes attached to
+/// an acc data clause operation, that correspond to the device types
+/// associated with the async clauses with an async-value.
+mlir::ArrayAttr getAsyncOperandsDeviceType(mlir::Operation *accDataClauseOp);
+
+/// Returns an array of acc:DeviceTypeAttr attributes attached to
+/// an acc data clause operation, that correspond to the device types
+/// associated with the async clauses without an async-value.
+mlir::ArrayAttr getAsyncOnly(mlir::Operation *accDataClauseOp);
+
/// Used to obtain the `name` from an acc operation.
std::optional<llvm::StringRef> getVarName(mlir::Operation *accOp);
@@ -147,6 +162,11 @@ struct ConstructResource
mlir::StringRef getName() final { return "AccConstructResource"; }
};
+struct CurrentDeviceIdResource
+ : public mlir::SideEffects::Resource::Base<CurrentDeviceIdResource> {
+ mlir::StringRef getName() final { return "AccCurrentDeviceIdResource"; }
+};
+
} // namespace acc
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td b/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td
index 361ede110ed13..dc255e772841c 100644
--- a/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td
+++ b/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td
@@ -236,6 +236,43 @@ def OpenACC_CombinedConstructsAttr : EnumAttr<OpenACC_Dialect,
let assemblyFormat = [{ ```<` $value `>` }];
}
+def OpenACC_ParallelConstruct : I64EnumAttrCase<"acc_construct_parallel", 0>;
+def OpenACC_KernelsConstruct : I64EnumAttrCase<"acc_construct_kernels", 1>;
+def OpenACC_LoopConstruct : I64EnumAttrCase<"acc_construct_loop", 2>;
+def OpenACC_DataConstruct : I64EnumAttrCase<"acc_construct_data", 3>;
+def OpenACC_EnterDataConstruct : I64EnumAttrCase<"acc_construct_enter_data", 4>;
+def OpenACC_ExitDataConstruct : I64EnumAttrCase<"acc_construct_exit_data", 5>;
+def OpenACC_HostDataConstruct : I64EnumAttrCase<"acc_construct_host_data", 6>;
+def OpenACC_AtomicConstruct : I64EnumAttrCase<"acc_construct_atomic", 7>;
+def OpenACC_DeclareConstruct : I64EnumAttrCase<"acc_construct_declare", 8>;
+def OpenACC_InitConstruct : I64EnumAttrCase<"acc_construct_init", 9>;
+def OpenACC_ShutdownConstruct : I64EnumAttrCase<"acc_construct_shutdown", 10>;
+def OpenACC_SetConstruct : I64EnumAttrCase<"acc_construct_set", 11>;
+def OpenACC_UpdateConstruct : I64EnumAttrCase<"acc_construct_update", 12>;
+def OpenACC_RoutineConstruct : I64EnumAttrCase<"acc_construct_routine", 13>;
+def OpenACC_WaitConstruct : I64EnumAttrCase<"acc_construct_wait", 14>;
+def OpenACC_RuntimeAPIConstruct : I64EnumAttrCase<"acc_construct_runtime_api", 15>;
+def OpenACC_SerialConstruct : I64EnumAttrCase<"acc_construct_serial", 16>;
+
+def OpenACC_ConstructEnum : I64EnumAttr<"Construct",
+ "constructs supported by OpenACC",
+ [OpenACC_ParallelConstruct, OpenACC_KernelsConstruct,
+ OpenACC_LoopConstruct, OpenACC_DataConstruct,
+ OpenACC_EnterDataConstruct, OpenACC_ExitDataConstruct,
+ OpenACC_HostDataConstruct, OpenACC_AtomicConstruct,
+ OpenACC_DeclareConstruct, OpenACC_InitConstruct,
+ OpenACC_ShutdownConstruct, OpenACC_SetConstruct,
+ OpenACC_UpdateConstruct, OpenACC_RoutineConstruct,
+ OpenACC_WaitConstruct, OpenACC_RuntimeAPIConstruct,
+ OpenACC_SerialConstruct
+ ]> {
+ let genSpecializedAttr = 0;
+ let cppNamespace = "::mlir::acc";
+}
+
+def OpenACC_ConstructAttr : EnumAttr<OpenACC_Dialect, OpenACC_ConstructEnum,
+ "construct">;
+
// Define a resource for the OpenACC runtime counters.
def OpenACC_RuntimeCounters : Resource<"::mlir::acc::RuntimeCounters">;
@@ -245,6 +282,9 @@ def OpenACC_RuntimeCounters : Resource<"::mlir::acc::RuntimeCounters">;
// `dataOperands` list).
def OpenACC_ConstructResource : Resource<"::mlir::acc::ConstructResource">;
+// Define a resource for the OpenACC current device setting.
+def OpenACC_CurrentDeviceIdResource : Resource<"::mlir::acc::CurrentDeviceIdResource">;
+
// Used for data specification in data clauses (2.7.1).
// Either (or both) extent and upperbound must be specified.
def OpenACC_DataBoundsOp : OpenACC_Op<"bounds",
@@ -320,10 +360,15 @@ def OpenACC_DataBoundsOp : OpenACC_Op<"bounds",
class OpenACC_DataEntryOp<string mnemonic, string clause, string extraDescription,
list<Trait> traits = [], dag additionalArgs = (ins)> :
OpenACC_Op<mnemonic, !listconcat(traits,
- [AttrSizedOperandSegments])> {
+ [AttrSizedOperandSegments,
+ MemoryEffects<[MemRead<OpenACC_CurrentDeviceIdResource>]>])> {
let arguments = !con(additionalArgs,
- (ins Optional<OpenACC_PointerLikeTypeInterface>:$varPtrPtr,
+ (ins
+ Optional<OpenACC_PointerLikeTypeInterface>:$varPtrPtr,
Variadic<OpenACC_DataBoundsType>:$bounds, /* rank-0 to rank-{n-1} */
+ Variadic<IntOrIndex>:$asyncOperands,
+ OptionalAttr<DeviceTypeArrayAttr>:$asyncOperandsDeviceType,
+ OptionalAttr<DeviceTypeArrayAttr>:$asyncOnly,
DefaultValuedAttr<OpenACC_DataClauseAttr,clause>:$dataClause,
DefaultValuedAttr<BoolAttr, "true">:$structured,
DefaultValuedAttr<BoolAttr, "false">:$implicit,
@@ -337,6 +382,10 @@ class OpenACC_DataEntryOp<string mnemonic, string clause, string extraDescriptio
attach semantics on data clauses (2.6.4).
- `bounds`: Used when copying just slice of array or array's bounds are not
encoded in type. They are in rank order where rank 0 is inner-most dimension.
+ - `asyncOperands` and `asyncOperandsDeviceType`:
+ pair-wise lists of the async clause values associated with device_type's.
+ - `asyncOnly`: a list of device_type's for which async clause
+ does not specify a value (default is acc_async_noval - OpenACC 3.3 2.16.1).
- `dataClause`: Keeps track of the data clause the user used. This is because
the acc operations are decomposed. So a 'copy' clause is decomposed to both
`acc.copyin` and `acc.copyout` operations, but both have dataClause that
@@ -348,13 +397,54 @@ class OpenACC_DataEntryOp<string mnemonic, string clause, string extraDescriptio
- `implicit`: Whether this is an implicitly generated operation, such as copies
done to satisfy "Variables with Implicitly Determined Data Attributes" in 2.6.2.
- `name`: Holds the name of variable as specified in user clause (including bounds).
+
+ The async values attached to the data entry operation imply that the data
+ action applies to all device types specified by the device_type clauses
+ using the activity queues on these devices as defined by the async values.
}]);
+ code extraClassDeclarationBase = [{
+ /// Return true if the op has the async attribute for the
+ /// mlir::acc::DeviceType::None device_type.
+ bool hasAsyncOnly() {
+ return hasAsyncOnly(mlir::acc::DeviceType::None);
+ }
+ /// Return true if the op has the async attribute for the given device_type.
+ bool hasAsyncOnly(mlir::acc::DeviceType deviceType) {
+ for (auto attr : getAsyncOnlyAttr()) {
+ auto deviceTypeAttr = mlir::dyn_cast<mlir::acc::DeviceTypeAttr>(attr);
+ if (deviceTypeAttr.getValue() == deviceType)
+ return true;
+ }
+ return false;
+ }
+ /// Return the value of the async clause if present.
+ mlir::Value getAsyncValue() {
+ return getAsyncValue(mlir::acc::DeviceType::None);
+ }
+ /// Return the value of the async clause for the given device_type if
+ /// present.
+ mlir::Value getAsyncValue(mlir::acc::DeviceType deviceType) {
+ mlir::ArrayAttr deviceTypes = getAsyncOperandsDeviceTypeAttr();
+ if (!deviceTypes)
+ return nullptr;
+ for (auto [attr, asyncValue] :
+ llvm::zip(deviceTypes, getAsyncOperands())) {
+ auto deviceTypeAttr = mlir::dyn_cast<mlir::acc::DeviceTypeAttr>(attr);
+ if (deviceTypeAttr.getValue() == deviceType)
+ return asyncValue;
+ }
+ return nullptr;
+ }
+ }];
+
let assemblyFormat = [{
`varPtr` `(` $varPtr `:` type($varPtr) `)`
oilist(
`varPtrPtr` `(` $varPtrPtr `:` type($varPtrPtr) `)`
| `bounds` `(` $bounds `)`
+ | `async` `(` custom<DeviceTypeOperands>($asyncOperands,
+ type($asyncOperands), $asyncOperandsDeviceType) `)`
) `->` type($accPtr) attr-dict
}];
@@ -370,6 +460,7 @@ def OpenACC_PrivateOp : OpenACC_DataEntryOp<"private",
let summary = "Represents private semantics for acc private clause.";
let results = (outs Arg<OpenACC_PointerLikeTypeInterface,
"Address of device variable",[MemWrite]>:$accPtr);
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -382,6 +473,7 @@ def OpenACC_FirstprivateOp : OpenACC_DataEntryOp<"firstprivate",
"clause.";
let results = (outs Arg<OpenACC_PointerLikeTypeInterface,
"Address of device variable",[MemWrite]>:$accPtr);
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -393,6 +485,7 @@ def OpenACC_ReductionOp : OpenACC_DataEntryOp<"reduction",
let summary = "Represents reduction semantics for acc reduction clause.";
let results = (outs Arg<OpenACC_PointerLikeTypeInterface,
"Address of device variable",[MemWrite]>:$accPtr);
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -404,6 +497,7 @@ def OpenACC_DevicePtrOp : OpenACC_DataEntryOp<"deviceptr",
(ins OpenACC_PointerLikeTypeInterface:$varPtr)> {
let summary = "Specifies that the variable pointer is a device pointer.";
let results = (outs OpenACC_PointerLikeTypeInterface:$accPtr);
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -416,6 +510,7 @@ def OpenACC_PresentOp : OpenACC_DataEntryOp<"present",
(ins OpenACC_PointerLikeTypeInterface:$varPtr)> {
let summary = "Specifies that the variable is already present on device.";
let results = (outs OpenACC_PointerLikeTypeInterface:$accPtr);
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -431,7 +526,7 @@ def OpenACC_CopyinOp : OpenACC_DataEntryOp<"copyin",
let results = (outs Arg<OpenACC_PointerLikeTypeInterface,
"Address of device variable",[MemWrite]>:$accPtr);
- let extraClassDeclaration = [{
+ let extraClassDeclaration = extraClassDeclarationBase # [{
/// Check if this is a copyin with readonly modifier.
bool isCopyinReadonly();
}];
@@ -450,7 +545,7 @@ def OpenACC_CreateOp : OpenACC_DataEntryOp<"create",
let results = (outs Arg<OpenACC_PointerLikeTypeInterface,
"Address of device variable",[MemWrite]>:$accPtr);
- let extraClassDeclaration = [{
+ let extraClassDeclaration = extraClassDeclarationBase # [{
/// Check if this is a create with zero modifier.
bool isCreateZero();
}];
@@ -466,6 +561,7 @@ def OpenACC_NoCreateOp : OpenACC_DataEntryOp<"nocreate",
(ins OpenACC_PointerLikeTypeInterface:$varPtr)> {
let summary = "Represents acc no_create semantics.";
let results = (outs OpenACC_PointerLikeTypeInterface:$accPtr);
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -480,6 +576,7 @@ def OpenACC_AttachOp : OpenACC_DataEntryOp<"attach",
"device memory with the corresponding device address of the "
"pointee.";
let results = (outs OpenACC_PointerLikeTypeInterface:$accPtr);
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -502,6 +599,7 @@ def OpenACC_GetDevicePtrOp : OpenACC_DataEntryOp<"getdeviceptr",
let summary = "Gets device address if variable exists on device.";
let results = (outs OpenACC_PointerLikeTypeInterface:$accPtr);
let hasVerifier = 0;
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -513,6 +611,7 @@ def OpenACC_UpdateDeviceOp : OpenACC_DataEntryOp<"update_device",
let summary = "Represents acc update device semantics.";
let results = (outs Arg<OpenACC_PointerLikeTypeInterface,
"Address of device variable",[MemWrite]>:$accPtr);
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -524,6 +623,7 @@ def OpenACC_UseDeviceOp : OpenACC_DataEntryOp<"use_device",
(ins OpenACC_PointerLikeTypeInterface:$varPtr)> {
let summary = "Represents acc use_device semantics.";
let results = (outs OpenACC_PointerLikeTypeInterface:$accPtr);
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -535,6 +635,7 @@ def OpenACC_DeclareDeviceResidentOp : OpenACC_DataEntryOp<"declare_device_reside
(ins Arg<OpenACC_PointerLikeTypeInterface,"Address of variable",[MemRead]>:$varPtr)> {
let summary = "Represents acc declare device_resident semantics.";
let results = (outs OpenACC_PointerLikeTypeInterface:$accPtr);
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -546,6 +647,7 @@ def OpenACC_DeclareLinkOp : OpenACC_DataEntryOp<"declare_link",
(ins Arg<OpenACC_PointerLikeTypeInterface,"Address of variable",[MemRead]>:$varPtr)> {
let summary = "Represents acc declare link semantics.";
let results = (outs OpenACC_PointerLikeTypeInterface:$accPtr);
+ let extraClassDeclaration = extraClassDeclarationBase;
}
//===----------------------------------------------------------------------===//
@@ -558,7 +660,7 @@ def OpenACC_CacheOp : OpenACC_DataEntryOp<"cache",
"loop.";
let results = (outs OpenACC_PointerLikeTypeInterface:$accPtr);
- let extraClassDeclaration = [{
+ let extraClassDeclaration = extraClassDeclarationBase # [{
/// Check if this is a cache with readonly modifier.
bool isCacheReadonly() {
return getDataClause() == acc::DataClause::acc_cache_readonly;
@@ -572,9 +674,14 @@ def OpenACC_CacheOp : OpenACC_DataEntryOp<"cache",
// operations for the following OpenACC data clauses: copyout, detach, delete.
class OpenACC_DataExitOp<string mnemonic, string clause, string extraDescription,
list<Trait> traits = [], dag additionalArgs = (ins)> :
- OpenACC_Op<mnemonic, !listconcat(traits, [])> {
+ OpenACC_Op<mnemonic, !listconcat(traits,
+ [AttrSizedOperandSegments,
+ MemoryEffects<[MemRead<OpenACC_CurrentDeviceIdResource>]>])> {
let arguments = !con(additionalArgs,
(ins Variadic<OpenACC_DataBoundsType>:$bounds,
+ Variadic<IntOrIndex>:$asyncOperands,
+ OptionalAttr<DeviceTypeArrayAttr>:$asyncOperandsDeviceType,
+ OptionalAttr<DeviceTypeArrayAttr>:$asyncOnly,
DefaultValuedAttr<OpenACC_DataClauseAttr,clause>:$dataClause,
DefaultValuedAttr<BoolAttr, "true">:$structured,
DefaultValuedAttr<BoolAttr, "false">:$implicit,
@@ -585,6 +692,10 @@ class OpenACC_DataExitOp<string mnemonic, string clause, string extraDescription
operation used.
- `bounds`: Used when copying just slice of array or array's bounds are not
encoded in type. They are in rank order where rank 0 is inner-most dimension.
+ - `asyncOperands` and `asyncOperandsDeviceType`:
+ pair-wise lists of the async clause values associated with device_type's.
+ - `asyncOnly`: a list of device_type's for which async clause
+ does not specify a value (default is acc_async_noval - OpenACC 3.3 2.16.1).
- `dataClause`: Keeps track of the data clause the user used. This is because
the acc operations are decomposed. So a 'copy' clause is decomposed to both
`acc.copyin` and `acc.copyout` operations, but both have dataClause that
@@ -596,13 +707,54 @@ class OpenACC_DataExitOp<string mnemonic, string clause, string extraDescription
- `implicit`: Whether this is an implicitly generated operation, such as copies
done to satisfy "Variables with Implicitly Determined Data Attributes" in 2.6.2.
- `name`: Holds the name of variable as specified in user clause (including bounds).
+
+ The async values attached to the data exit operation imply that the data
+ action applies to all device types specified by the device_type clauses
+ using the activity queues on these devices as defined by the async values.
}]);
+ code extraClassDeclarationBase = [{
+ /// Return true if the op has the async attribute for the
+ /// mlir::acc::DeviceType::None device_type.
+ bool hasAsyncOnly() {
+ return hasAsyncOnly(mlir::acc::DeviceType::None);
+ }
+ /// Return true if the op has the async attribute for the given device_type.
+ bool hasAsyncOnly(mlir::acc::DeviceType deviceType) {
+ for (auto attr : getAsyncOnlyAttr()) {
+ auto deviceTypeAttr = mlir::dyn_cast<mlir::acc::DeviceTypeAttr>(attr);
+ if (deviceTypeAttr.getValue() == deviceType)
+ return true;
+ }
+ return false;
+ }
+ /// Return the value of the async clause if present.
+ mlir::Value getAsyncValue() {
+ return getAsyncValue(mlir::acc::DeviceType::None);
+ }
+ /// Return the value of the async clause for the given device_type if
+ /// present.
+ mlir::Value getAsyncValue(mlir::acc::DeviceType deviceType) {
+ mlir::ArrayAttr deviceTypes = getAsyncOperandsDeviceTypeAttr();
+ if (!deviceTypes)
+ return nullptr;
+ for (auto [attr, asyncValue] :
+ llvm::zip(deviceTypes, getAsyncOperands())) {
+ auto deviceTypeAttr = mlir::dyn_cast<mlir::acc::DeviceTypeAttr>(attr);
+ if (deviceTypeAttr.getValue() == deviceType)
+ return asyncValue;
+ }
+ return nullptr;
+ }
+ }];
+
let assemblyFormat = [{
`accPtr` `(` $accPtr `:` type($accPtr) `)`
oilist(
`bounds` `(` $bounds `)`
| `to` `varPtr` `(` $varPtr `:` type($varPtr) `)`
+ | `async` `(` custom<DeviceTypeOperands>($asyncOperands,
+ type($asyncOperands), $asyncOperandsDeviceType) `)`
) attr-dict
}];
@@ -621,7 +773,7 @@ def OpenACC_CopyoutOp : OpenACC_DataExitOp<"copyout",
Arg<OpenACC_PointerLikeTypeInterface,"Address of variable",[MemWrite]>:$varPtr)> {
let summary = "Represents acc copyout semantics - reverse of copyin.";
- let extraClassDeclaration = [{
+ let extraClassDeclaration = extraClassDeclarationBase # [{
/// Check if this is a copyout with zero modifier.
bool isCopyoutZero();
}];
@@ -629,6 +781,8 @@ def OpenACC_CopyoutOp : OpenACC_DataExitOp<"copyout",
let assemblyFormat = [{
`accPtr` `(` $accPtr `:` type($accPtr) `)`
(`bounds` `(` $bounds^ `)` )?
+ (`async` `(` custom<DeviceTypeOperands>($asyncOperands,
+ type($asyncOperands), $asyncOperandsDeviceType)^ `)`)?
`to` `varPtr` `(` $varPtr `:` type($varPtr) `)`
attr-dict
}];
@@ -644,9 +798,13 @@ def OpenACC_DeleteOp : OpenACC_DataExitOp<"delete",
(ins Arg<OpenACC_PointerLikeTypeInterface,"Address of device variable",[MemRead]>:$accPtr)> {
let summary = "Represents acc delete semantics - reverse of create.";
+ let extraClassDeclaration = extraClassDeclarationBase;
+
let assemblyFormat = [{
`accPtr` `(` $accPtr `:` type($accPtr) `)`
(`bounds` `(` $bounds^ `)` )?
+ (`async` `(` custom<DeviceTypeOperands>($asyncOperands,
+ type($asyncOperands), $asyncOperandsDeviceType)^ `)`)?
attr-dict
}];
}
@@ -661,9 +819,13 @@ def OpenACC_DetachOp : OpenACC_DataExitOp<"detach",
(ins Arg<OpenACC_PointerLikeTypeInterface,"Address of device variable",[MemRead]>:$accPtr)> {
let summary = "Represents acc detach semantics - reverse of attach.";
+ let extraClassDeclaration = extraClassDeclarationBase;
+
let assemblyFormat = [{
`accPtr` `(` $accPtr `:` type($accPtr) `)`
(`bounds` `(` $bounds^ `)` )?
+ (`async` `(` custom<DeviceTypeOperands>($asyncOperands,
+ type($asyncOperands), $asyncOperandsDeviceType)^ `)`)?
attr-dict
}];
}
@@ -679,7 +841,7 @@ def OpenACC_UpdateHostOp : OpenACC_DataExitOp<"update_host",
(ins Arg<OpenACC_PointerLikeTypeInterface,"Address of device variable",[MemRead]>:$accPtr,
Arg<OpenACC_PointerLikeTypeInterface,"Address of variable",[MemWrite]>:$varPtr)> {
let summary = "Represents acc update host semantics.";
- let extraClassDeclaration = [{
+ let extraClassDeclaration = extraClassDeclarationBase # [{
/// Check if this is an acc update self.
bool isSelf() {
return getDataClause() == acc::DataClause::acc_update_self;
@@ -689,6 +851,8 @@ def OpenACC_UpdateHostOp : OpenACC_DataExitOp<"update_host",
let assemblyFormat = [{
`accPtr` `(` $accPtr `:` type($accPtr) `)`
(`bounds` `(` $bounds^ `)` )?
+ (`async` `(` custom<DeviceTypeOperands>($asyncOperands,
+ type($asyncOperands), $asyncOperandsDeviceType)^ `)`)?
`to` `varPtr` `(` $varPtr `:` type($varPtr) `)`
attr-dict
}];
@@ -904,7 +1068,8 @@ def OpenACC_ReductionRecipeOp : OpenACC_Op<"reduction.recipe",
def OpenACC_ParallelOp : OpenACC_Op<"parallel",
[AttrSizedOperandSegments, RecursiveMemoryEffects,
- MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
+ MemoryEffects<[MemWrite<OpenACC_ConstructResource>,
+ MemRead<OpenACC_CurrentDeviceIdResource>]>]> {
let summary = "parallel construct";
let description = [{
The "acc.parallel" operation represents a parallel construct block. It has
@@ -1009,6 +1174,9 @@ def OpenACC_ParallelOp : OpenACC_Op<"parallel",
/// Return the wait devnum value clause for the given device_type if
/// present.
mlir::Value getWaitDevnum(mlir::acc::DeviceType deviceType);
+ static mlir::acc::Construct getConstructId() {
+ return mlir::acc::Construct::acc_construct_parallel;
+ }
}];
let assemblyFormat = [{
@@ -1050,7 +1218,8 @@ def OpenACC_ParallelOp : OpenACC_Op<"parallel",
def OpenACC_SerialOp : OpenACC_Op<"serial",
[AttrSizedOperandSegments, RecursiveMemoryEffects,
- MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
+ MemoryEffects<[MemWrite<OpenACC_ConstructResource>,
+ MemRead<OpenACC_CurrentDeviceIdResource>]>]> {
let summary = "serial construct";
let description = [{
The "acc.serial" operation represents a serial construct block. It has
@@ -1127,6 +1296,9 @@ def OpenACC_SerialOp : OpenACC_Op<"serial",
/// Return the wait devnum value clause for the given device_type if
/// present.
mlir::Value getWaitDevnum(mlir::acc::DeviceType deviceType);
+ static mlir::acc::Construct getConstructId() {
+ return mlir::acc::Construct::acc_construct_serial;
+ }
}];
let assemblyFormat = [{
@@ -1162,7 +1334,8 @@ def OpenACC_SerialOp : OpenACC_Op<"serial",
def OpenACC_KernelsOp : OpenACC_Op<"kernels",
[AttrSizedOperandSegments, RecursiveMemoryEffects,
- MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
+ MemoryEffects<[MemWrite<OpenACC_ConstructResource>,
+ MemRead<OpenACC_CurrentDeviceIdResource>]>]> {
let summary = "kernels construct";
let description = [{
The "acc.kernels" operation represents a kernels construct block. It has
@@ -1262,6 +1435,9 @@ def OpenACC_KernelsOp : OpenACC_Op<"kernels",
/// Return the wait devnum value clause for the given device_type if
/// present.
mlir::Value getWaitDevnum(mlir::acc::DeviceType deviceType);
+ static mlir::acc::Construct getConstructId() {
+ return mlir::acc::Construct::acc_construct_kernels;
+ }
}];
let assemblyFormat = [{
@@ -1294,7 +1470,8 @@ def OpenACC_KernelsOp : OpenACC_Op<"kernels",
def OpenACC_DataOp : OpenACC_Op<"data",
[AttrSizedOperandSegments, RecursiveMemoryEffects,
- MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
+ MemoryEffects<[MemWrite<OpenACC_ConstructResource>,
+ MemRead<OpenACC_CurrentDeviceIdResource>]>]> {
let summary = "data construct";
let description = [{
@@ -1402,7 +1579,9 @@ def OpenACC_TerminatorOp : OpenACC_Op<"terminator", [Pure, Terminator]> {
//===----------------------------------------------------------------------===//
def OpenACC_EnterDataOp : OpenACC_Op<"enter_data",
- [AttrSizedOperandSegments, MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
+ [AttrSizedOperandSegments,
+ MemoryEffects<[MemWrite<OpenACC_ConstructResource>,
+ MemRead<OpenACC_CurrentDeviceIdResource>]>]> {
let summary = "enter data operation";
let description = [{
@@ -1451,7 +1630,9 @@ def OpenACC_EnterDataOp : OpenACC_Op<"enter_data",
//===----------------------------------------------------------------------===//
def OpenACC_ExitDataOp : OpenACC_Op<"exit_data",
- [AttrSizedOperandSegments, MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
+ [AttrSizedOperandSegments,
+ MemoryEffects<[MemWrite<OpenACC_ConstructResource>,
+ MemRead<OpenACC_CurrentDeviceIdResource>]>]> {
let summary = "exit data operation";
let description = [{
@@ -1501,7 +1682,9 @@ def OpenACC_ExitDataOp : OpenACC_Op<"exit_data",
//===----------------------------------------------------------------------===//
def OpenACC_HostDataOp : OpenACC_Op<"host_data",
- [AttrSizedOperandSegments, MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
+ [AttrSizedOperandSegments,
+ MemoryEffects<[MemWrite<OpenACC_ConstructResource>,
+ MemRead<OpenACC_CurrentDeviceIdResource>]>]> {
let summary = "host_data construct";
let description = [{
@@ -1890,7 +2073,8 @@ def AtomicCaptureOp : OpenACC_Op<"atomic.capture",
//===----------------------------------------------------------------------===//
def OpenACC_DeclareEnterOp : OpenACC_Op<"declare_enter",
- [MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
+ [MemoryEffects<[MemWrite<OpenACC_ConstructResource>,
+ MemRead<OpenACC_CurrentDeviceIdResource>]>]> {
let summary = "declare directive - entry to implicit data region";
let description = [{
@@ -1920,7 +2104,9 @@ def OpenACC_DeclareEnterOp : OpenACC_Op<"declare_enter",
}
def OpenACC_DeclareExitOp : OpenACC_Op<"declare_exit",
- [AttrSizedOperandSegments, MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
+ [AttrSizedOperandSegments,
+ MemoryEffects<[MemWrite<OpenACC_ConstructResource>,
+ MemRead<OpenACC_CurrentDeviceIdResource>]>]> {
let summary = "declare directive - exit from implicit data region";
let description = [{
@@ -2022,7 +2208,8 @@ def OpenACC_GlobalDestructorOp : OpenACC_Op<"global_dtor",
}
def OpenACC_DeclareOp : OpenACC_Op<"declare",
- [RecursiveMemoryEffects, MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
+ [RecursiveMemoryEffects,
+ MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
let summary = "declare implicit region";
let description = [{
@@ -2227,7 +2414,8 @@ def OpenACC_ShutdownOp : OpenACC_Op<"shutdown", [AttrSizedOperandSegments]> {
// 2.14.3. Set
//===----------------------------------------------------------------------===//
-def OpenACC_SetOp : OpenACC_Op<"set", [AttrSizedOperandSegments]> {
+def OpenACC_SetOp : OpenACC_Op<"set", [AttrSizedOperandSegments,
+ MemoryEffects<[MemWrite<OpenACC_CurrentDeviceIdResource>]>]> {
let summary = "set operation";
let description = [{
@@ -2259,7 +2447,9 @@ def OpenACC_SetOp : OpenACC_Op<"set", [AttrSizedOperandSegments]> {
//===----------------------------------------------------------------------===//
def OpenACC_UpdateOp : OpenACC_Op<"update",
- [AttrSizedOperandSegments, MemoryEffects<[MemWrite<OpenACC_ConstructResource>]>]> {
+ [AttrSizedOperandSegments,
+ MemoryEffects<[MemWrite<OpenACC_ConstructResource>,
+ MemRead<OpenACC_CurrentDeviceIdResource>]>]> {
let summary = "update operation";
let description = [{
@@ -2362,6 +2552,11 @@ def OpenACC_WaitOp : OpenACC_Op<"wait", [AttrSizedOperandSegments]> {
acc.wait(%value1: index)
acc.wait() async(%async1: i32)
```
+
+ acc.wait does not implement MemoryEffects interface,
+ so it affects all the resources. This is conservatively
+ correct. More precise modelling of the memory effects
+ seems to be impossible without the whole program analysis.
}];
let arguments = (ins Variadic<IntOrIndex>:$waitOperands,
diff --git a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
index 1c8ce1ca3bce3..01305898f252d 100644
--- a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
+++ b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
@@ -2880,6 +2880,36 @@ mlir::acc::getBounds(mlir::Operation *accDataClauseOp) {
return bounds;
}
+mlir::SmallVector<mlir::Value>
+mlir::acc::getAsyncOperands(mlir::Operation *accDataClauseOp) {
+ return llvm::TypeSwitch<mlir::Operation *, mlir::SmallVector<mlir::Value>>(
+ accDataClauseOp)
+ .Case<ACC_DATA_ENTRY_OPS, ACC_DATA_EXIT_OPS>([&](auto dataClause) {
+ return mlir::SmallVector<mlir::Value>(
+ dataClause.getAsyncOperands().begin(),
+ dataClause.getAsyncOperands().end());
+ })
+ .Default([&](mlir::Operation *) {
+ return mlir::SmallVector<mlir::Value, 0>();
+ });
+}
+
+mlir::ArrayAttr
+mlir::acc::getAsyncOperandsDeviceType(mlir::Operation *accDataClauseOp) {
+ return llvm::TypeSwitch<mlir::Operation *, mlir::ArrayAttr>(accDataClauseOp)
+ .Case<ACC_DATA_ENTRY_OPS, ACC_DATA_EXIT_OPS>([&](auto dataClause) {
+ return dataClause.getAsyncOperandsDeviceTypeAttr();
+ })
+ .Default([&](mlir::Operation *) { return mlir::ArrayAttr{}; });
+}
+
+mlir::ArrayAttr mlir::acc::getAsyncOnly(mlir::Operation *accDataClauseOp) {
+ return llvm::TypeSwitch<mlir::Operation *, mlir::ArrayAttr>(accDataClauseOp)
+ .Case<ACC_DATA_ENTRY_OPS, ACC_DATA_EXIT_OPS>(
+ [&](auto dataClause) { return dataClause.getAsyncOnlyAttr(); })
+ .Default([&](mlir::Operation *) { return mlir::ArrayAttr{}; });
+}
+
std::optional<llvm::StringRef> mlir::acc::getVarName(mlir::Operation *accOp) {
auto name{
llvm::TypeSwitch<mlir::Operation *, std::optional<llvm::StringRef>>(accOp)
>From 68a1944070caa33d73390e1a6d956c6abdbea872 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Wed, 3 Jul 2024 10:04:51 +0100
Subject: [PATCH 089/246] [mlir][vector] Project out anonymous bounds in
ScalableValueBoundsConstraintSet (#96499)
If we don't eliminate these columns, then in some cases we fail to
compute a scalable bound. Test case reduced from a real-world example.
---
.../IR/ScalableValueBoundsConstraintSet.cpp | 2 ++
.../Dialect/Vector/test-scalable-bounds.mlir | 28 +++++++++++++++++++
2 files changed, 30 insertions(+)
diff --git a/mlir/lib/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.cpp b/mlir/lib/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.cpp
index 9c365376c84c9..4a826f04e1f1d 100644
--- a/mlir/lib/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.cpp
+++ b/mlir/lib/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.cpp
@@ -8,6 +8,7 @@
#include "mlir/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
+
namespace mlir::vector {
FailureOr<ConstantOrScalableBound::BoundSize>
@@ -74,6 +75,7 @@ ScalableValueBoundsConstraintSet::computeScalableBound(
return p.first != scalableCstr.getVscaleValue() && !isStartingPoint;
};
scalableCstr.projectOut(projectOutFn);
+ scalableCstr.projectOutAnonymous(/*except=*/pos);
// Also project out local variables (these are not tracked by the
// ValueBoundsConstraintSet).
for (unsigned i = 0, e = scalableCstr.cstr.getNumLocalVars(); i < e; ++i) {
diff --git a/mlir/test/Dialect/Vector/test-scalable-bounds.mlir b/mlir/test/Dialect/Vector/test-scalable-bounds.mlir
index 673e03f05c1b8..6af904beb660b 100644
--- a/mlir/test/Dialect/Vector/test-scalable-bounds.mlir
+++ b/mlir/test/Dialect/Vector/test-scalable-bounds.mlir
@@ -215,3 +215,31 @@ func.func @unsupported_negative_mod() {
"test.some_use"(%bound) : (index) -> ()
return
}
+
+// -----
+
+// CHECK: #[[$SCALABLE_BOUND_MAP_5:.*]] = affine_map<()[s0] -> (s0 * 4)>
+
+// CHECK-LABEL: @extract_slice_loop
+// CHECK: %[[VSCALE:.*]] = vector.vscale
+// CHECK: %[[SCALABLE_BOUND:.*]] = affine.apply #[[$SCALABLE_BOUND_MAP_5]]()[%[[VSCALE]]]
+// CHECK: "test.some_use"(%[[SCALABLE_BOUND]]) : (index) -> ()
+
+func.func @extract_slice_loop(%tensor: tensor<1x1x3x?xf32>) {
+ %vscale = vector.vscale
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c2 = arith.constant 2 : index
+ %c3 = arith.constant 3 : index
+ %c4 = arith.constant 4 : index
+ %cst = arith.constant 0.0 : f32
+ %c4_vscale = arith.muli %c4, %vscale : index
+ %slice = tensor.extract_slice %tensor[0, 0, 0, 0] [1, 1, 3, %c4_vscale] [1, 1, 1, 1] : tensor<1x1x3x?xf32> to tensor<1x3x?xf32>
+ %15 = scf.for %arg6 = %c0 to %c3 step %c1 iter_args(%arg = %slice) -> (tensor<1x3x?xf32>) {
+ %dim = tensor.dim %arg, %c2 : tensor<1x3x?xf32>
+ %bound = "test.reify_bound"(%dim) {type = "LB", vscale_min = 1, vscale_max = 16, scalable} : (index) -> index
+ "test.some_use"(%bound) : (index) -> ()
+ scf.yield %arg : tensor<1x3x?xf32>
+ }
+ return
+}
>From a3571376ad9555de07e4d8f74f92de1eaa4c486e Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 3 Jul 2024 11:00:42 +0200
Subject: [PATCH 090/246] [InstCombine] Add test for #97053 (NFC)
---
.../Transforms/InstCombine/scalarization.ll | 33 +++++++++++++++++--
1 file changed, 31 insertions(+), 2 deletions(-)
diff --git a/llvm/test/Transforms/InstCombine/scalarization.ll b/llvm/test/Transforms/InstCombine/scalarization.ll
index 7e645ef7e883e..781b5bfb7d6ff 100644
--- a/llvm/test/Transforms/InstCombine/scalarization.ll
+++ b/llvm/test/Transforms/InstCombine/scalarization.ll
@@ -156,6 +156,35 @@ define i8 @extract_element_binop_splat_variable_index(<4 x i8> %x, i32 %y) {
ret i8 %r
}
+; We cannot move the extractelement before the sdiv here, because %z may be
+; out of range, making the divisor poison and resulting in immediate UB.
+; FIXME: This is a miscompile.
+define i8 @extract_element_binop_splat_variable_index_may_trap(<4 x i8> %x, <4 x i8> %y, i32 %z) {
+;
+; CHECK-LABEL: @extract_element_binop_splat_variable_index_may_trap(
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i32 [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = sdiv i8 42, [[TMP1]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %b = sdiv <4 x i8> splat (i8 42), %y
+ %r = extractelement <4 x i8> %b, i32 %z
+ ret i8 %r
+}
+
+; Moving the extractelement first is fine here, because the index is known to
+; be valid, so we can't introduce additional poison.
+define i8 @extract_element_binop_constant_index_may_trap(<4 x i8> %x, <4 x i8> %y, i32 %z) {
+;
+; CHECK-LABEL: @extract_element_binop_constant_index_may_trap(
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i64 3
+; CHECK-NEXT: [[R:%.*]] = sdiv i8 42, [[TMP1]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %b = sdiv <4 x i8> splat (i8 42), %y
+ %r = extractelement <4 x i8> %b, i32 3
+ ret i8 %r
+}
+
define i8 @extract_element_binop_splat_with_undef_variable_index(<4 x i8> %x, i32 %y) {
;
; CHECK-LABEL: @extract_element_binop_splat_with_undef_variable_index(
@@ -344,8 +373,8 @@ define i1 @extractelt_vector_fcmp_constrhs_dynidx(<2 x float> %arg, i32 %idx) {
define i1 @extractelt_vector_fcmp_copy_flags(<4 x float> %x) {
; CHECK-LABEL: @extractelt_vector_fcmp_copy_flags(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2
-; CHECK-NEXT: [[TMP2:%.*]] = fcmp nsz arcp oeq float [[TMP1]], 0.000000e+00
-; CHECK-NEXT: ret i1 [[TMP2]]
+; CHECK-NEXT: [[R:%.*]] = fcmp nsz arcp oeq float [[TMP1]], 0.000000e+00
+; CHECK-NEXT: ret i1 [[R]]
;
%cmp = fcmp nsz arcp oeq <4 x float> %x, zeroinitializer
%r = extractelement <4 x i1> %cmp, i32 2
>From 4d2ae88d1617a910ec3a1436ce53579523ac2f97 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 3 Jul 2024 10:58:38 +0200
Subject: [PATCH 091/246] [InstCombine] Fix invalid scalarization of div
If the binop is not speculatable, and the extract index is out of
range, then scalarizing will perform the operation on a poison
operand, resulting in immediate UB, instead of the previous
poison result.
Fixes https://github.com/llvm/llvm-project/issues/97053.
---
llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp | 7 ++++++-
llvm/test/Transforms/InstCombine/scalarization.ll | 5 ++---
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 3de56a4038039..753ed55523c84 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -419,6 +419,7 @@ Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
// If extracting a specified index from the vector, see if we can recursively
// find a previously computed scalar that was inserted into the vector.
auto *IndexC = dyn_cast<ConstantInt>(Index);
+ bool HasKnownValidIndex = false;
if (IndexC) {
// Canonicalize type of constant indices to i64 to simplify CSE
if (auto *NewIdx = getPreferredVectorIndex(IndexC))
@@ -426,6 +427,7 @@ Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
ElementCount EC = EI.getVectorOperandType()->getElementCount();
unsigned NumElts = EC.getKnownMinValue();
+ HasKnownValidIndex = IndexC->getValue().ult(NumElts);
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(SrcVec)) {
Intrinsic::ID IID = II->getIntrinsicID();
@@ -471,8 +473,11 @@ Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
return UnaryOperator::CreateWithCopiedFlags(UO->getOpcode(), E, UO);
}
+ // If the binop is not speculatable, we cannot hoist the extractelement if
+ // it may make the operand poison.
BinaryOperator *BO;
- if (match(SrcVec, m_BinOp(BO)) && cheapToScalarize(SrcVec, Index)) {
+ if (match(SrcVec, m_BinOp(BO)) && cheapToScalarize(SrcVec, Index) &&
+ (HasKnownValidIndex || isSafeToSpeculativelyExecute(BO))) {
// extelt (binop X, Y), Index --> binop (extelt X, Index), (extelt Y, Index)
Value *X = BO->getOperand(0), *Y = BO->getOperand(1);
Value *E0 = Builder.CreateExtractElement(X, Index);
diff --git a/llvm/test/Transforms/InstCombine/scalarization.ll b/llvm/test/Transforms/InstCombine/scalarization.ll
index 781b5bfb7d6ff..2f539ece88320 100644
--- a/llvm/test/Transforms/InstCombine/scalarization.ll
+++ b/llvm/test/Transforms/InstCombine/scalarization.ll
@@ -158,12 +158,11 @@ define i8 @extract_element_binop_splat_variable_index(<4 x i8> %x, i32 %y) {
; We cannot move the extractelement before the sdiv here, because %z may be
; out of range, making the divisor poison and resulting in immediate UB.
-; FIXME: This is a miscompile.
define i8 @extract_element_binop_splat_variable_index_may_trap(<4 x i8> %x, <4 x i8> %y, i32 %z) {
;
; CHECK-LABEL: @extract_element_binop_splat_variable_index_may_trap(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i32 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sdiv i8 42, [[TMP1]]
+; CHECK-NEXT: [[B:%.*]] = sdiv <4 x i8> <i8 42, i8 42, i8 42, i8 42>, [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = extractelement <4 x i8> [[B]], i32 [[Z:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%b = sdiv <4 x i8> splat (i8 42), %y
>From d8c07342c021f31f08dee7d15e17f7eca0d1aae0 Mon Sep 17 00:00:00 2001
From: Alexis Engelke <engelke at in.tum.de>
Date: Wed, 3 Jul 2024 11:15:02 +0200
Subject: [PATCH 092/246] [Support] Move raw_ostream::tie to raw_fd_ostream
(#97396)
Originally, tie was introduced by D81156 to flush stdout before writing
to stderr. 030897523 reverted this due to race conditions. Nonetheless,
it does cost performance, causing an extra check in the "cold" path,
which is actually the hot path for raw_svector_ostream. Given that this
feature is only used for errs(), move it to raw_fd_ostream so that it no
longer affects performance of other stream classes.
---
.../clangd/index/remote/server/Server.cpp | 2 -
clang-tools-extra/clangd/tool/ClangdMain.cpp | 2 -
llvm/include/llvm/Support/raw_ostream.h | 22 ++---
llvm/lib/Support/raw_ostream.cpp | 17 ++--
llvm/unittests/Support/raw_ostream_test.cpp | 82 ++++++++++++-------
5 files changed, 71 insertions(+), 54 deletions(-)
diff --git a/clang-tools-extra/clangd/index/remote/server/Server.cpp b/clang-tools-extra/clangd/index/remote/server/Server.cpp
index 4ef3ab6f9af9c..52fca53260a16 100644
--- a/clang-tools-extra/clangd/index/remote/server/Server.cpp
+++ b/clang-tools-extra/clangd/index/remote/server/Server.cpp
@@ -499,8 +499,6 @@ int main(int argc, char *argv[]) {
}
llvm::errs().SetBuffered();
- // Don't flush stdout when logging for thread safety.
- llvm::errs().tie(nullptr);
auto Logger = makeLogger(LogPrefix.getValue(), llvm::errs());
clang::clangd::LoggingSession LoggingSession(*Logger);
diff --git a/clang-tools-extra/clangd/tool/ClangdMain.cpp b/clang-tools-extra/clangd/tool/ClangdMain.cpp
index c3ba655ee2dc6..73000d96c6ca8 100644
--- a/clang-tools-extra/clangd/tool/ClangdMain.cpp
+++ b/clang-tools-extra/clangd/tool/ClangdMain.cpp
@@ -840,8 +840,6 @@ clangd accepts flags on the commandline, and in the CLANGD_FLAGS environment var
// Use buffered stream to stderr (we still flush each log message). Unbuffered
// stream can cause significant (non-deterministic) latency for the logger.
llvm::errs().SetBuffered();
- // Don't flush stdout when logging, this would be both slow and racy!
- llvm::errs().tie(nullptr);
StreamLogger Logger(llvm::errs(), LogLevel);
LoggingSession LoggingSession(Logger);
// Write some initial logs before we start doing any real work.
diff --git a/llvm/include/llvm/Support/raw_ostream.h b/llvm/include/llvm/Support/raw_ostream.h
index 0951ffb19ffa1..df9ee2e5a7858 100644
--- a/llvm/include/llvm/Support/raw_ostream.h
+++ b/llvm/include/llvm/Support/raw_ostream.h
@@ -82,10 +82,6 @@ class raw_ostream {
char *OutBufStart, *OutBufEnd, *OutBufCur;
bool ColorEnabled = false;
- /// Optional stream this stream is tied to. If this stream is written to, the
- /// tied-to stream will be flushed first.
- raw_ostream *TiedStream = nullptr;
-
enum class BufferKind {
Unbuffered = 0,
InternalBuffer,
@@ -360,10 +356,6 @@ class raw_ostream {
bool colors_enabled() const { return ColorEnabled; }
- /// Tie this stream to the specified stream. Replaces any existing tied-to
- /// stream. Specifying a nullptr unties the stream.
- void tie(raw_ostream *TieTo) { TiedStream = TieTo; }
-
//===--------------------------------------------------------------------===//
// Subclass Interface
//===--------------------------------------------------------------------===//
@@ -422,9 +414,6 @@ class raw_ostream {
/// flushing. The result is affected by calls to enable_color().
bool prepare_colors();
- /// Flush the tied-to stream (if present) and then write the required data.
- void flush_tied_then_write(const char *Ptr, size_t Size);
-
virtual void anchor();
};
@@ -475,6 +464,10 @@ class raw_fd_ostream : public raw_pwrite_stream {
bool IsRegularFile = false;
mutable std::optional<bool> HasColors;
+ /// Optional stream this stream is tied to. If this stream is written to, the
+ /// tied-to stream will be flushed first.
+ raw_ostream *TiedStream = nullptr;
+
#ifdef _WIN32
/// True if this fd refers to a Windows console device. Mintty and other
/// terminal emulators are TTYs, but they are not consoles.
@@ -553,6 +546,13 @@ class raw_fd_ostream : public raw_pwrite_stream {
bool has_colors() const override;
+ /// Tie this stream to the specified stream. Replaces any existing tied-to
+ /// stream. Specifying a nullptr unties the stream. This is intended for to
+ /// tie errs() to outs(), so that outs() is flushed whenever something is
+ /// written to errs(), preventing weird and hard-to-test output when stderr
+ /// is redirected to stdout.
+ void tie(raw_ostream *TieTo) { TiedStream = TieTo; }
+
std::error_code error() const { return EC; }
/// Return the value of the flag in this raw_fd_ostream indicating whether an
diff --git a/llvm/lib/Support/raw_ostream.cpp b/llvm/lib/Support/raw_ostream.cpp
index 0acb54f76c0bf..2ce54faa9857e 100644
--- a/llvm/lib/Support/raw_ostream.cpp
+++ b/llvm/lib/Support/raw_ostream.cpp
@@ -221,7 +221,7 @@ void raw_ostream::flush_nonempty() {
assert(OutBufCur > OutBufStart && "Invalid call to flush_nonempty.");
size_t Length = OutBufCur - OutBufStart;
OutBufCur = OutBufStart;
- flush_tied_then_write(OutBufStart, Length);
+ write_impl(OutBufStart, Length);
}
raw_ostream &raw_ostream::write(unsigned char C) {
@@ -229,7 +229,7 @@ raw_ostream &raw_ostream::write(unsigned char C) {
if (LLVM_UNLIKELY(OutBufCur >= OutBufEnd)) {
if (LLVM_UNLIKELY(!OutBufStart)) {
if (BufferMode == BufferKind::Unbuffered) {
- flush_tied_then_write(reinterpret_cast<char *>(&C), 1);
+ write_impl(reinterpret_cast<char *>(&C), 1);
return *this;
}
// Set up a buffer and start over.
@@ -249,7 +249,7 @@ raw_ostream &raw_ostream::write(const char *Ptr, size_t Size) {
if (LLVM_UNLIKELY(size_t(OutBufEnd - OutBufCur) < Size)) {
if (LLVM_UNLIKELY(!OutBufStart)) {
if (BufferMode == BufferKind::Unbuffered) {
- flush_tied_then_write(Ptr, Size);
+ write_impl(Ptr, Size);
return *this;
}
// Set up a buffer and start over.
@@ -265,7 +265,7 @@ raw_ostream &raw_ostream::write(const char *Ptr, size_t Size) {
if (LLVM_UNLIKELY(OutBufCur == OutBufStart)) {
assert(NumBytes != 0 && "undefined behavior");
size_t BytesToWrite = Size - (Size % NumBytes);
- flush_tied_then_write(Ptr, BytesToWrite);
+ write_impl(Ptr, BytesToWrite);
size_t BytesRemaining = Size - BytesToWrite;
if (BytesRemaining > size_t(OutBufEnd - OutBufCur)) {
// Too much left over to copy into our buffer.
@@ -306,12 +306,6 @@ void raw_ostream::copy_to_buffer(const char *Ptr, size_t Size) {
OutBufCur += Size;
}
-void raw_ostream::flush_tied_then_write(const char *Ptr, size_t Size) {
- if (TiedStream)
- TiedStream->flush();
- write_impl(Ptr, Size);
-}
-
// Formatted output.
raw_ostream &raw_ostream::operator<<(const format_object_base &Fmt) {
// If we have more than a few bytes left in our output buffer, try
@@ -742,6 +736,9 @@ static bool write_console_impl(int FD, StringRef Data) {
#endif
void raw_fd_ostream::write_impl(const char *Ptr, size_t Size) {
+ if (TiedStream)
+ TiedStream->flush();
+
assert(FD >= 0 && "File already closed.");
pos += Size;
diff --git a/llvm/unittests/Support/raw_ostream_test.cpp b/llvm/unittests/Support/raw_ostream_test.cpp
index 451eda8af51b6..4c4b6cc317be5 100644
--- a/llvm/unittests/Support/raw_ostream_test.cpp
+++ b/llvm/unittests/Support/raw_ostream_test.cpp
@@ -388,9 +388,14 @@ TEST(raw_ostreamTest, flush_tied_to_stream_on_write) {
TiedTo.SetBuffered();
TiedTo << "a";
- std::string Buffer;
- raw_string_ostream TiedStream(Buffer);
+ SmallString<64> Path;
+ int FD;
+ ASSERT_FALSE(sys::fs::createTemporaryFile("tietest", "", FD, Path));
+ FileRemover Cleanup(Path);
+ raw_fd_ostream TiedStream(FD, /*ShouldClose=*/false);
+ TiedStream.SetUnbuffered();
TiedStream.tie(&TiedTo);
+
// Sanity check that the stream hasn't already been flushed.
EXPECT_EQ("", TiedToBuffer);
@@ -435,30 +440,60 @@ TEST(raw_ostreamTest, flush_tied_to_stream_on_write) {
TiedStream << "pq";
EXPECT_EQ("acego", TiedToBuffer);
- // Streams can be tied to each other safely.
+ // Calling tie with nullptr unties stream.
+ TiedStream.SetUnbuffered();
+ TiedStream.tie(nullptr);
+ TiedTo << "y";
+ TiedStream << "0";
+ EXPECT_EQ("acego", TiedToBuffer);
+
+ TiedTo.flush();
TiedStream.flush();
- Buffer = "";
+}
+
+static void checkFileData(StringRef FileName, StringRef GoldenData) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufOrErr =
+ MemoryBuffer::getFileOrSTDIN(FileName);
+ EXPECT_FALSE(BufOrErr.getError());
+
+ EXPECT_EQ((*BufOrErr)->getBufferSize(), GoldenData.size());
+ EXPECT_EQ(memcmp((*BufOrErr)->getBufferStart(), GoldenData.data(),
+ GoldenData.size()),
+ 0);
+}
+
+TEST(raw_ostreamTest, raw_fd_ostream_mutual_ties) {
+ SmallString<64> PathTiedTo;
+ int FDTiedTo;
+ ASSERT_FALSE(
+ sys::fs::createTemporaryFile("tietest1", "", FDTiedTo, PathTiedTo));
+ FileRemover CleanupTiedTo(PathTiedTo);
+ raw_fd_ostream TiedTo(FDTiedTo, /*ShouldClose=*/false);
+
+ SmallString<64> PathTiedStream;
+ int FDTiedStream;
+ ASSERT_FALSE(sys::fs::createTemporaryFile("tietest2", "", FDTiedStream,
+ PathTiedStream));
+ FileRemover CleanupTiedStream(PathTiedStream);
+ raw_fd_ostream TiedStream(FDTiedStream, /*ShouldClose=*/false);
+
+ // Streams can be tied to each other safely.
+ TiedStream.tie(&TiedTo);
+ TiedStream.SetBuffered();
+ TiedStream.SetBufferSize(2);
TiedTo.tie(&TiedStream);
TiedTo.SetBufferSize(2);
TiedStream << "r";
TiedTo << "s";
- EXPECT_EQ("", Buffer);
- EXPECT_EQ("acego", TiedToBuffer);
+ checkFileData(PathTiedStream.str(), "");
+ checkFileData(PathTiedTo.str(), "");
TiedTo << "tuv";
- EXPECT_EQ("r", Buffer);
+ checkFileData(PathTiedStream.str(), "r");
TiedStream << "wxy";
- EXPECT_EQ("acegostuv", TiedToBuffer);
- // The x remains in the buffer, since it was written after the flush of
+ checkFileData(PathTiedTo.str(), "stuv");
+ // The y remains in the buffer, since it was written after the flush of
// TiedTo.
- EXPECT_EQ("rwx", Buffer);
- TiedTo.tie(nullptr);
-
- // Calling tie with nullptr unties stream.
- TiedStream.SetUnbuffered();
- TiedStream.tie(nullptr);
- TiedTo << "y";
- TiedStream << "0";
- EXPECT_EQ("acegostuv", TiedToBuffer);
+ checkFileData(PathTiedStream.str(), "rwx");
TiedTo.flush();
TiedStream.flush();
@@ -478,17 +513,6 @@ TEST(raw_ostreamTest, reserve_stream) {
EXPECT_EQ("11111111111111111111hello1world", Str);
}
-static void checkFileData(StringRef FileName, StringRef GoldenData) {
- ErrorOr<std::unique_ptr<MemoryBuffer>> BufOrErr =
- MemoryBuffer::getFileOrSTDIN(FileName);
- EXPECT_FALSE(BufOrErr.getError());
-
- EXPECT_EQ((*BufOrErr)->getBufferSize(), GoldenData.size());
- EXPECT_EQ(memcmp((*BufOrErr)->getBufferStart(), GoldenData.data(),
- GoldenData.size()),
- 0);
-}
-
TEST(raw_ostreamTest, writeToOutputFile) {
SmallString<64> Path;
int FD;
>From bb260eb87d9bebd93e64051b574fbce0eebbad30 Mon Sep 17 00:00:00 2001
From: Alexis Engelke <engelke at in.tum.de>
Date: Wed, 3 Jul 2024 11:19:05 +0200
Subject: [PATCH 093/246] [CodeGen] Only deduplicate PHIs on critical edges
(#97064)
PHIElim deduplicates identical PHI nodes to reduce the number of copies
inserted. There are two cases:
1. Identical PHI nodes are in different blocks. That's the reason for
this optimization; this can't be avoided at SSA-level. A necessary
prerequisite for this is that the predecessors of all basic blocks
(where such a PHI node could occur) are the same. This implies that
all (>= 2) predecessors must have multiple successors, i.e. all edges
into the block are critical edges.
2. Identical PHI nodes are in the same block. CSE can remove these.
There are a few cases, however, where they still occur regardless:
- expand-large-div-rem creates PHI nodes with large integers, which
get lowered into one PHI per MVT. Later, some identical values
(zeroes) get folded, resulting in identical PHI nodes.
- peephole-opt occasionally inserts PHIs for the same value.
- Some pseudo instruction emitters create redundant PHI nodes (e.g.,
AVR's insertShift), merging the same values more than once.
In any case, this happens rarely and MachineCSE handles most cases
anyway, so that PHIElim only gets to see very few of such cases (see
changed test files).
Currently, all PHI nodes are inserted into a DenseMap that checks
equality not by pointer but by operands. This hash map is pretty
expensive (hashing itself and the hash map), but only really useful in
the first case.
Avoid this expensive hashing most of the time by restricting it to basic
blocks with only critical input edges. This improves performance for
code with many PHI nodes, especially at -O0. (Note that Clang often
doesn't generate PHI nodes and -O0 includes no mem2reg. Other
compilers always generate PHI nodes.)
---
llvm/lib/CodeGen/PHIElimination.cpp | 57 ++-
.../branch-folding-implicit-def-subreg.ll | 4 +-
llvm/test/CodeGen/X86/bfloat.ll | 340 +++++++++---------
.../X86/div-rem-pair-recomposition-signed.ll | 294 +++++++--------
4 files changed, 361 insertions(+), 334 deletions(-)
diff --git a/llvm/lib/CodeGen/PHIElimination.cpp b/llvm/lib/CodeGen/PHIElimination.cpp
index 592972f5c83b2..4fde4ec78ea28 100644
--- a/llvm/lib/CodeGen/PHIElimination.cpp
+++ b/llvm/lib/CodeGen/PHIElimination.cpp
@@ -83,7 +83,8 @@ namespace {
bool EliminatePHINodes(MachineFunction &MF, MachineBasicBlock &MBB);
void LowerPHINode(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator LastPHIIt);
+ MachineBasicBlock::iterator LastPHIIt,
+ bool AllEdgesCritical);
/// analyzePHINodes - Gather information about the PHI nodes in
/// here. In particular, we want to map the number of uses of a virtual
@@ -191,7 +192,8 @@ bool PHIElimination::runOnMachineFunction(MachineFunction &MF) {
MRI->leaveSSA();
// Populate VRegPHIUseCount
- analyzePHINodes(MF);
+ if (LV || LIS)
+ analyzePHINodes(MF);
// Eliminate PHI instructions by inserting copies into predecessor blocks.
for (auto &MBB : MF)
@@ -239,8 +241,20 @@ bool PHIElimination::EliminatePHINodes(MachineFunction &MF,
MachineBasicBlock::iterator LastPHIIt =
std::prev(MBB.SkipPHIsAndLabels(MBB.begin()));
+ // If all incoming edges are critical, we try to deduplicate identical PHIs so
+ // that we generate fewer copies. If at any edge is non-critical, we either
+ // have less than two predecessors (=> no PHIs) or a predecessor has only us
+ // as a successor (=> identical PHI node can't occur in different block).
+ bool AllEdgesCritical = MBB.pred_size() >= 2;
+ for (MachineBasicBlock *Pred : MBB.predecessors()) {
+ if (Pred->succ_size() < 2) {
+ AllEdgesCritical = false;
+ break;
+ }
+ }
+
while (MBB.front().isPHI())
- LowerPHINode(MBB, LastPHIIt);
+ LowerPHINode(MBB, LastPHIIt, AllEdgesCritical);
return true;
}
@@ -267,7 +281,8 @@ static bool allPhiOperandsUndefined(const MachineInstr &MPhi,
}
/// LowerPHINode - Lower the PHI node at the top of the specified block.
void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator LastPHIIt) {
+ MachineBasicBlock::iterator LastPHIIt,
+ bool AllEdgesCritical) {
++NumLowered;
MachineBasicBlock::iterator AfterPHIsIt = std::next(LastPHIIt);
@@ -283,6 +298,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
// Create a new register for the incoming PHI arguments.
MachineFunction &MF = *MBB.getParent();
unsigned IncomingReg = 0;
+ bool EliminateNow = true; // delay elimination of nodes in LoweredPHIs
bool reusedIncoming = false; // Is IncomingReg reused from an earlier PHI?
// Insert a register to register copy at the top of the current block (but
@@ -297,19 +313,28 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
TII->get(TargetOpcode::IMPLICIT_DEF), DestReg);
else {
// Can we reuse an earlier PHI node? This only happens for critical edges,
- // typically those created by tail duplication.
- unsigned &entry = LoweredPHIs[MPhi];
- if (entry) {
+ // typically those created by tail duplication. Typically, an identical PHI
+ // node can't occur, so avoid hashing/storing such PHIs, which is somewhat
+ // expensive.
+ unsigned *Entry = nullptr;
+ if (AllEdgesCritical)
+ Entry = &LoweredPHIs[MPhi];
+ if (Entry && *Entry) {
// An identical PHI node was already lowered. Reuse the incoming register.
- IncomingReg = entry;
+ IncomingReg = *Entry;
reusedIncoming = true;
++NumReused;
LLVM_DEBUG(dbgs() << "Reusing " << printReg(IncomingReg) << " for "
<< *MPhi);
} else {
const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(DestReg);
- entry = IncomingReg = MF.getRegInfo().createVirtualRegister(RC);
+ IncomingReg = MF.getRegInfo().createVirtualRegister(RC);
+ if (Entry) {
+ EliminateNow = false;
+ *Entry = IncomingReg;
+ }
}
+
// Give the target possiblity to handle special cases fallthrough otherwise
PHICopy = TII->createPHIDestinationCopy(MBB, AfterPHIsIt, MPhi->getDebugLoc(),
IncomingReg, DestReg);
@@ -445,11 +470,13 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
}
// Adjust the VRegPHIUseCount map to account for the removal of this PHI node.
- for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2) {
- if (!MPhi->getOperand(i).isUndef()) {
- --VRegPHIUseCount[BBVRegPair(
- MPhi->getOperand(i + 1).getMBB()->getNumber(),
- MPhi->getOperand(i).getReg())];
+ if (LV || LIS) {
+ for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2) {
+ if (!MPhi->getOperand(i).isUndef()) {
+ --VRegPHIUseCount[BBVRegPair(
+ MPhi->getOperand(i + 1).getMBB()->getNumber(),
+ MPhi->getOperand(i).getReg())];
+ }
}
}
@@ -646,7 +673,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
}
// Really delete the PHI instruction now, if it is not in the LoweredPHIs map.
- if (reusedIncoming || !IncomingReg) {
+ if (EliminateNow) {
if (LIS)
LIS->RemoveMachineInstrFromMaps(*MPhi);
MF.deleteMachineInstr(MPhi);
diff --git a/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll b/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll
index 384715a849c1e..0bd030f1a3750 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll
@@ -804,6 +804,7 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $vgpr15 = V_ALIGNBIT_B32_e64 $vgpr15, $vgpr14, 1, implicit $exec
; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_XOR_B64 $exec, -1, implicit-def dead $scc
; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_OR_B64 renamable $sgpr28_sgpr29, $exec, implicit-def dead $scc
+ ; GFX90A-NEXT: renamable $vgpr10 = COPY renamable $vgpr14, implicit $exec
; GFX90A-NEXT: S_BRANCH %bb.61
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.58:
@@ -886,11 +887,10 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.61.Flow31:
; GFX90A-NEXT: successors: %bb.62(0x80000000)
- ; GFX90A-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15, $vgpr15, $vgpr17, $vgpr18, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7:0x000000000000000F, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr16_sgpr17, $sgpr24_sgpr25, $sgpr26_sgpr27, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr16_sgpr17_sgpr18_sgpr19:0x00000000000000F0, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000C, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX90A-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15, $vgpr15, $vgpr17, $vgpr18, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7:0x000000000000000F, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr16_sgpr17, $sgpr24_sgpr25, $sgpr26_sgpr27, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr16_sgpr17_sgpr18_sgpr19:0x00000000000000F0, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr50_sgpr51, implicit-def $scc
; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_MOV_B64 0
- ; GFX90A-NEXT: renamable $vgpr10 = COPY renamable $vgpr14, implicit $exec
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.62.Flow30:
; GFX90A-NEXT: successors: %bb.56(0x80000000)
diff --git a/llvm/test/CodeGen/X86/bfloat.ll b/llvm/test/CodeGen/X86/bfloat.ll
index 8b5ca57df27ed..ec76e8b05678b 100644
--- a/llvm/test/CodeGen/X86/bfloat.ll
+++ b/llvm/test/CodeGen/X86/bfloat.ll
@@ -770,12 +770,70 @@ define <32 x bfloat> @pr63017_2() nounwind {
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movd {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movdqa %xmm0, %xmm15
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movdqa %xmm0, %xmm13
+; SSE2-NEXT: movdqa %xmm0, %xmm14
+; SSE2-NEXT: movdqa %xmm0, %xmm11
+; SSE2-NEXT: movdqa %xmm0, %xmm12
+; SSE2-NEXT: movdqa %xmm0, %xmm9
+; SSE2-NEXT: movdqa %xmm0, %xmm10
+; SSE2-NEXT: movdqa %xmm0, %xmm7
+; SSE2-NEXT: movdqa %xmm0, %xmm8
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: jmp .LBB12_3
; SSE2-NEXT: .LBB12_1:
-; SSE2-NEXT: movd {{.*#+}} xmm1 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd {{.*#+}} xmm2 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movdqa %xmm2, %xmm15
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movdqa %xmm2, %xmm13
+; SSE2-NEXT: movdqa %xmm2, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm11
+; SSE2-NEXT: movdqa %xmm2, %xmm12
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: movdqa %xmm2, %xmm10
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: movdqa %xmm2, %xmm8
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: .LBB12_3: # %else
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
@@ -787,299 +845,240 @@ define <32 x bfloat> @pr63017_2() nounwind {
; SSE2-NEXT: .LBB12_5: # %else2
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_6
-; SSE2-NEXT: # %bb.7: # %cond.load4
+; SSE2-NEXT: jne .LBB12_7
+; SSE2-NEXT: # %bb.6: # %cond.load4
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movdqa %xmm1, %xmm14
-; SSE2-NEXT: movdqa %xmm1, %xmm15
-; SSE2-NEXT: movdqa %xmm1, %xmm12
-; SSE2-NEXT: movdqa %xmm1, %xmm13
-; SSE2-NEXT: movdqa %xmm1, %xmm10
-; SSE2-NEXT: movdqa %xmm1, %xmm11
-; SSE2-NEXT: movdqa %xmm1, %xmm8
-; SSE2-NEXT: movdqa %xmm1, %xmm9
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: jmp .LBB12_8
-; SSE2-NEXT: .LBB12_6:
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movdqa %xmm1, %xmm14
-; SSE2-NEXT: movdqa %xmm1, %xmm15
-; SSE2-NEXT: movdqa %xmm1, %xmm12
-; SSE2-NEXT: movdqa %xmm1, %xmm13
-; SSE2-NEXT: movdqa %xmm1, %xmm10
-; SSE2-NEXT: movdqa %xmm1, %xmm11
-; SSE2-NEXT: movdqa %xmm1, %xmm8
-; SSE2-NEXT: movdqa %xmm1, %xmm9
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: .LBB12_8: # %else5
+; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE2-NEXT: .LBB12_7: # %else5
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_10
-; SSE2-NEXT: # %bb.9: # %cond.load7
+; SSE2-NEXT: jne .LBB12_9
+; SSE2-NEXT: # %bb.8: # %cond.load7
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_10: # %else8
+; SSE2-NEXT: .LBB12_9: # %else8
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_12
-; SSE2-NEXT: # %bb.11: # %cond.load10
+; SSE2-NEXT: jne .LBB12_11
+; SSE2-NEXT: # %bb.10: # %cond.load10
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_12: # %else11
+; SSE2-NEXT: .LBB12_11: # %else11
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_14
-; SSE2-NEXT: # %bb.13: # %cond.load13
+; SSE2-NEXT: jne .LBB12_13
+; SSE2-NEXT: # %bb.12: # %cond.load13
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_14: # %else14
+; SSE2-NEXT: .LBB12_13: # %else14
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_16
-; SSE2-NEXT: # %bb.15: # %cond.load16
+; SSE2-NEXT: jne .LBB12_15
+; SSE2-NEXT: # %bb.14: # %cond.load16
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_16: # %else17
+; SSE2-NEXT: .LBB12_15: # %else17
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_18
-; SSE2-NEXT: # %bb.17: # %cond.load19
+; SSE2-NEXT: jne .LBB12_17
+; SSE2-NEXT: # %bb.16: # %cond.load19
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_18: # %else20
+; SSE2-NEXT: .LBB12_17: # %else20
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_20
-; SSE2-NEXT: # %bb.19: # %cond.load22
+; SSE2-NEXT: jne .LBB12_19
+; SSE2-NEXT: # %bb.18: # %cond.load22
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_20: # %else23
+; SSE2-NEXT: .LBB12_19: # %else23
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_22
-; SSE2-NEXT: # %bb.21: # %cond.load25
+; SSE2-NEXT: jne .LBB12_21
+; SSE2-NEXT: # %bb.20: # %cond.load25
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_22: # %else26
+; SSE2-NEXT: .LBB12_21: # %else26
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_24
-; SSE2-NEXT: # %bb.23: # %cond.load28
+; SSE2-NEXT: jne .LBB12_23
+; SSE2-NEXT: # %bb.22: # %cond.load28
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_24: # %else29
+; SSE2-NEXT: .LBB12_23: # %else29
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_26
-; SSE2-NEXT: # %bb.25: # %cond.load31
+; SSE2-NEXT: jne .LBB12_25
+; SSE2-NEXT: # %bb.24: # %cond.load31
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_26: # %else32
+; SSE2-NEXT: .LBB12_25: # %else32
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_28
-; SSE2-NEXT: # %bb.27: # %cond.load34
+; SSE2-NEXT: jne .LBB12_27
+; SSE2-NEXT: # %bb.26: # %cond.load34
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_28: # %else35
+; SSE2-NEXT: .LBB12_27: # %else35
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_30
-; SSE2-NEXT: # %bb.29: # %cond.load37
+; SSE2-NEXT: jne .LBB12_29
+; SSE2-NEXT: # %bb.28: # %cond.load37
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_30: # %else38
+; SSE2-NEXT: .LBB12_29: # %else38
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_32
-; SSE2-NEXT: # %bb.31: # %cond.load40
+; SSE2-NEXT: jne .LBB12_31
+; SSE2-NEXT: # %bb.30: # %cond.load40
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_32: # %else41
+; SSE2-NEXT: .LBB12_31: # %else41
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_34
-; SSE2-NEXT: # %bb.33: # %cond.load43
+; SSE2-NEXT: jne .LBB12_33
+; SSE2-NEXT: # %bb.32: # %cond.load43
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_34: # %else44
+; SSE2-NEXT: .LBB12_33: # %else44
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_36
-; SSE2-NEXT: # %bb.35: # %cond.load46
+; SSE2-NEXT: jne .LBB12_35
+; SSE2-NEXT: # %bb.34: # %cond.load46
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
-; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_36: # %else47
+; SSE2-NEXT: movd %eax, %xmm15
+; SSE2-NEXT: .LBB12_35: # %else47
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_38
-; SSE2-NEXT: # %bb.37: # %cond.load49
+; SSE2-NEXT: jne .LBB12_37
+; SSE2-NEXT: # %bb.36: # %cond.load49
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: .LBB12_38: # %else50
+; SSE2-NEXT: .LBB12_37: # %else50
+; SSE2-NEXT: xorl %eax, %eax
+; SSE2-NEXT: testb %al, %al
+; SSE2-NEXT: jne .LBB12_39
+; SSE2-NEXT: # %bb.38: # %cond.load52
+; SSE2-NEXT: movzwl (%rax), %eax
+; SSE2-NEXT: shll $16, %eax
+; SSE2-NEXT: movd %eax, %xmm13
+; SSE2-NEXT: .LBB12_39: # %else53
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_40
-; SSE2-NEXT: # %bb.39: # %cond.load52
+; SSE2-NEXT: jne .LBB12_41
+; SSE2-NEXT: # %bb.40: # %cond.load55
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movd %eax, %xmm14
-; SSE2-NEXT: .LBB12_40: # %else53
+; SSE2-NEXT: .LBB12_41: # %else56
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_42
-; SSE2-NEXT: # %bb.41: # %cond.load55
+; SSE2-NEXT: jne .LBB12_43
+; SSE2-NEXT: # %bb.42: # %cond.load58
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
-; SSE2-NEXT: movd %eax, %xmm15
-; SSE2-NEXT: .LBB12_42: # %else56
+; SSE2-NEXT: movd %eax, %xmm11
+; SSE2-NEXT: .LBB12_43: # %else59
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_44
-; SSE2-NEXT: # %bb.43: # %cond.load58
+; SSE2-NEXT: jne .LBB12_45
+; SSE2-NEXT: # %bb.44: # %cond.load61
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movd %eax, %xmm12
-; SSE2-NEXT: .LBB12_44: # %else59
+; SSE2-NEXT: .LBB12_45: # %else62
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_46
-; SSE2-NEXT: # %bb.45: # %cond.load61
+; SSE2-NEXT: jne .LBB12_47
+; SSE2-NEXT: # %bb.46: # %cond.load64
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
-; SSE2-NEXT: movd %eax, %xmm13
-; SSE2-NEXT: .LBB12_46: # %else62
+; SSE2-NEXT: movd %eax, %xmm9
+; SSE2-NEXT: .LBB12_47: # %else65
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_48
-; SSE2-NEXT: # %bb.47: # %cond.load64
+; SSE2-NEXT: jne .LBB12_49
+; SSE2-NEXT: # %bb.48: # %cond.load67
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movd %eax, %xmm10
-; SSE2-NEXT: .LBB12_48: # %else65
+; SSE2-NEXT: .LBB12_49: # %else68
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_50
-; SSE2-NEXT: # %bb.49: # %cond.load67
+; SSE2-NEXT: jne .LBB12_51
+; SSE2-NEXT: # %bb.50: # %cond.load70
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
-; SSE2-NEXT: movd %eax, %xmm11
-; SSE2-NEXT: .LBB12_50: # %else68
+; SSE2-NEXT: movd %eax, %xmm7
+; SSE2-NEXT: .LBB12_51: # %else71
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_52
-; SSE2-NEXT: # %bb.51: # %cond.load70
+; SSE2-NEXT: jne .LBB12_53
+; SSE2-NEXT: # %bb.52: # %cond.load73
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movd %eax, %xmm8
-; SSE2-NEXT: .LBB12_52: # %else71
+; SSE2-NEXT: .LBB12_53: # %else74
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_54
-; SSE2-NEXT: # %bb.53: # %cond.load73
+; SSE2-NEXT: jne .LBB12_55
+; SSE2-NEXT: # %bb.54: # %cond.load76
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
-; SSE2-NEXT: movd %eax, %xmm9
-; SSE2-NEXT: .LBB12_54: # %else74
+; SSE2-NEXT: movd %eax, %xmm5
+; SSE2-NEXT: .LBB12_55: # %else77
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_56
-; SSE2-NEXT: # %bb.55: # %cond.load76
+; SSE2-NEXT: jne .LBB12_57
+; SSE2-NEXT: # %bb.56: # %cond.load79
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movd %eax, %xmm6
-; SSE2-NEXT: .LBB12_56: # %else77
+; SSE2-NEXT: .LBB12_57: # %else80
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_58
-; SSE2-NEXT: # %bb.57: # %cond.load79
+; SSE2-NEXT: jne .LBB12_59
+; SSE2-NEXT: # %bb.58: # %cond.load82
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
-; SSE2-NEXT: movd %eax, %xmm7
-; SSE2-NEXT: .LBB12_58: # %else80
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: .LBB12_59: # %else83
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_60
-; SSE2-NEXT: # %bb.59: # %cond.load82
+; SSE2-NEXT: jne .LBB12_61
+; SSE2-NEXT: # %bb.60: # %cond.load85
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movd %eax, %xmm4
-; SSE2-NEXT: .LBB12_60: # %else83
-; SSE2-NEXT: xorl %eax, %eax
-; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_62
-; SSE2-NEXT: # %bb.61: # %cond.load85
-; SSE2-NEXT: movzwl (%rax), %eax
-; SSE2-NEXT: shll $16, %eax
-; SSE2-NEXT: movd %eax, %xmm5
-; SSE2-NEXT: .LBB12_62: # %else86
+; SSE2-NEXT: .LBB12_61: # %else86
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne .LBB12_64
-; SSE2-NEXT: # %bb.63: # %cond.load88
+; SSE2-NEXT: jne .LBB12_63
+; SSE2-NEXT: # %bb.62: # %cond.load88
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: .LBB12_64: # %else89
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: .LBB12_63: # %else89
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: movd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: movd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: movd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
@@ -1087,21 +1086,20 @@ define <32 x bfloat> @pr63017_2() nounwind {
; SSE2-NEXT: movd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: movd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: movd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: movd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: movd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: movd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: movd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: movd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: jne .LBB12_65
-; SSE2-NEXT: # %bb.66: # %cond.load91
+; SSE2-NEXT: jne .LBB12_64
+; SSE2-NEXT: # %bb.65: # %cond.load91
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: jmp .LBB12_67
-; SSE2-NEXT: .LBB12_65:
-; SSE2-NEXT: movd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; SSE2-NEXT: .LBB12_67: # %else92
+; SSE2-NEXT: jmp .LBB12_66
+; SSE2-NEXT: .LBB12_64:
+; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: .LBB12_66: # %else92
; SSE2-NEXT: callq __truncsfbf2 at PLT
; SSE2-NEXT: pextrw $0, %xmm0, %ebx
; SSE2-NEXT: shll $16, %ebx
diff --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
index 1c303de55c95d..aa7b77f01d5ba 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
@@ -177,7 +177,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: subl $156, %esp
+; X86-NEXT: subl $152, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -273,44 +273,42 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl %ebp, %esi
; X86-NEXT: orl %ebx, %esi
; X86-NEXT: cmovnel %ecx, %edx
-; X86-NEXT: xorl %ebx, %ebx
+; X86-NEXT: xorl %esi, %esi
; X86-NEXT: subl %edx, %edi
+; X86-NEXT: movl $0, %ebx
+; X86-NEXT: sbbl %ebx, %ebx
; X86-NEXT: movl $0, %edx
; X86-NEXT: sbbl %edx, %edx
; X86-NEXT: movl $0, %eax
; X86-NEXT: sbbl %eax, %eax
-; X86-NEXT: movl $0, %esi
-; X86-NEXT: sbbl %esi, %esi
; X86-NEXT: movl $127, %ecx
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: cmpl %edi, %ecx
+; X86-NEXT: movl %ebx, %edi
+; X86-NEXT: movl $0, %ecx
+; X86-NEXT: sbbl %ebx, %ecx
; X86-NEXT: movl $0, %ecx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %edx, %ecx
; X86-NEXT: movl $0, %ecx
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %eax, %ecx
-; X86-NEXT: movl $0, %ecx
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %esi, %ecx
; X86-NEXT: setb %cl
; X86-NEXT: orb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Folded Reload
; X86-NEXT: movl (%esp), %edx # 4-byte Reload
-; X86-NEXT: cmovnel %ebx, %edx
-; X86-NEXT: cmovnel %ebx, %ebp
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: cmovnel %ebx, %eax
-; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; X86-NEXT: movl %ebx, %esi
+; X86-NEXT: cmovnel %esi, %edx
+; X86-NEXT: cmovnel %esi, %ebp
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: cmovnel %esi, %ebx
+; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-NEXT: jne .LBB4_8
; X86-NEXT: # %bb.1: # %_udiv-special-cases
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: xorl $127, %edi
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: movl %ebx, %ecx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: xorl $127, %eax
+; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-NEXT: movl %edi, %ecx
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: orl %edi, %ecx
+; X86-NEXT: orl %eax, %ecx
; X86-NEXT: je .LBB4_8
; X86-NEXT: # %bb.2: # %udiv-bb1
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -333,34 +331,34 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: shrb $3, %al
; X86-NEXT: andb $15, %al
; X86-NEXT: negb %al
-; X86-NEXT: movsbl %al, %edi
-; X86-NEXT: movl 148(%esp,%edi), %edx
-; X86-NEXT: movl 152(%esp,%edi), %esi
+; X86-NEXT: movsbl %al, %eax
+; X86-NEXT: movl 144(%esp,%eax), %edx
+; X86-NEXT: movl 148(%esp,%eax), %esi
; X86-NEXT: movb %ch, %cl
; X86-NEXT: shldl %cl, %edx, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: shll %cl, %edx
; X86-NEXT: notb %cl
-; X86-NEXT: movl 144(%esp,%edi), %eax
-; X86-NEXT: movl %eax, %ebp
+; X86-NEXT: movl 140(%esp,%eax), %ebx
+; X86-NEXT: movl %ebx, %ebp
; X86-NEXT: shrl %ebp
; X86-NEXT: shrl %cl, %ebp
; X86-NEXT: orl %edx, %ebp
-; X86-NEXT: movl 140(%esp,%edi), %edx
+; X86-NEXT: movl 136(%esp,%eax), %eax
; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: shll %cl, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shldl %cl, %eax, %ebx
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: addl $1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: adcl $0, %ebx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: adcl $0, %edi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: adcl $0, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: adcl $0, %edx
; X86-NEXT: jae .LBB4_3
; X86-NEXT: # %bb.6:
-; X86-NEXT: xorl %edi, %edi
; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: xorl %eax, %eax
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: jmp .LBB4_7
; X86-NEXT: .LBB4_3: # %udiv-preheader
@@ -376,176 +374,180 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movb %dl, %ch
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movb %al, %ch
; X86-NEXT: andb $7, %ch
-; X86-NEXT: movb %dl, %cl
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: andb $15, %cl
-; X86-NEXT: movzbl %cl, %edx
-; X86-NEXT: movl 104(%esp,%edx), %ebx
-; X86-NEXT: movl 100(%esp,%edx), %edi
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edi, %ebp
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: shrb $3, %al
+; X86-NEXT: andb $15, %al
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: movl 100(%esp,%eax), %esi
+; X86-NEXT: movl %esi, (%esp) # 4-byte Spill
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 96(%esp,%eax), %edx
+; X86-NEXT: movl %ebp, %edi
+; X86-NEXT: movl %edx, %ebp
; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shrdl %cl, %ebx, %ebp
-; X86-NEXT: movl 92(%esp,%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 96(%esp,%edx), %esi
-; X86-NEXT: movl %esi, %edx
-; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %ebp
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 88(%esp,%eax), %ebx
+; X86-NEXT: movl 92(%esp,%eax), %esi
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: shrl %cl, %eax
; X86-NEXT: notb %cl
-; X86-NEXT: addl %edi, %edi
-; X86-NEXT: shll %cl, %edi
-; X86-NEXT: orl %edx, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: addl %edx, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: orl %eax, %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shrl %cl, %ebx
+; X86-NEXT: shrl %cl, (%esp) # 4-byte Folded Spill
+; X86-NEXT: shrdl %cl, %esi, %ebx
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shrdl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: addl $-1, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: adcl $-1, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: adcl $-1, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: adcl $-1, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: addl $-1, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: adcl $-1, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: adcl $-1, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: adcl $-1, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB4_4: # %udiv-do-while
; X86-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-NEXT: movl %ebp, (%esp) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: shldl $1, %ebp, %ebx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %ebp, %edx
; X86-NEXT: shldl $1, %ebp, (%esp) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl $1, %edx, %ebp
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: shldl $1, %edi, %edx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-NEXT: shldl $1, %ebp, %edx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: shldl $1, %ebx, %ebp
+; X86-NEXT: shldl $1, %ecx, %ebx
+; X86-NEXT: shldl $1, %edi, %ecx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: orl %eax, %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: shldl $1, %ecx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: orl %esi, %edi
+; X86-NEXT: orl %eax, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl $1, %eax, %ecx
-; X86-NEXT: orl %esi, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: shldl $1, %ecx, %eax
-; X86-NEXT: orl %esi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: addl %ecx, %ecx
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-NEXT: movl %esi, %edi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT: shldl $1, %esi, %ecx
+; X86-NEXT: orl %eax, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: cmpl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; X86-NEXT: addl %esi, %esi
+; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: cmpl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: sbbl %ebp, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: sbbl (%esp), %ecx # 4-byte Folded Reload
+; X86-NEXT: sbbl %edx, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: sbbl %ebx, %ecx
+; X86-NEXT: sbbl (%esp), %ecx # 4-byte Folded Reload
; X86-NEXT: sarl $31, %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: andl $1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ecx, %esi
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT: andl %edi, %esi
; X86-NEXT: movl %ecx, %edi
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: subl %ecx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: subl %ecx, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %eax, %ebp
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl (%esp), %ebp # 4-byte Reload
-; X86-NEXT: sbbl %edi, %ebp
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: sbbl %esi, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: sbbl %edi, %edx
+; X86-NEXT: movl %edx, %ebp
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT: sbbl %esi, (%esp) # 4-byte Folded Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: addl $-1, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: adcl $-1, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: adcl $-1, %ebx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: adcl $-1, %eax
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: adcl $-1, %esi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: orl %esi, %edi
+; X86-NEXT: adcl $-1, %edx
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: orl %edx, %eax
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: orl %ebx, %ecx
-; X86-NEXT: orl %edi, %ecx
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: orl %esi, %ecx
+; X86-NEXT: orl %eax, %ecx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: jne .LBB4_4
; X86-NEXT: # %bb.5:
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: movl %edi, %ebp
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: .LBB4_7: # %udiv-loop-exit
; X86-NEXT: shldl $1, %ebp, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: shldl $1, %eax, %ebp
-; X86-NEXT: orl %ecx, %ebp
+; X86-NEXT: orl %eax, %edx
+; X86-NEXT: shldl $1, %ebx, %ebp
+; X86-NEXT: orl %eax, %ebp
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: shldl $1, %esi, %eax
-; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: shldl $1, %esi, %ebx
+; X86-NEXT: orl %eax, %ebx
; X86-NEXT: addl %esi, %esi
-; X86-NEXT: orl %edi, %esi
+; X86-NEXT: orl %ecx, %esi
; X86-NEXT: .LBB4_8: # %udiv-end
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl %ecx, %edx
-; X86-NEXT: xorl %ecx, %ebp
-; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: xorl %ecx, %esi
-; X86-NEXT: subl %ecx, %esi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: xorl %eax, %edx
+; X86-NEXT: xorl %eax, %ebp
+; X86-NEXT: xorl %eax, %ebx
+; X86-NEXT: xorl %eax, %esi
+; X86-NEXT: subl %eax, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %ecx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %ecx, %ebp
-; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: sbbl %eax, %ebx
+; X86-NEXT: sbbl %eax, %ebp
+; X86-NEXT: sbbl %eax, %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %esi, (%ecx)
-; X86-NEXT: movl %eax, 4(%ecx)
-; X86-NEXT: movl %ebp, 8(%ecx)
-; X86-NEXT: movl %edx, 12(%ecx)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %esi, (%eax)
+; X86-NEXT: movl %ebx, 4(%eax)
+; X86-NEXT: movl %ebp, 8(%eax)
+; X86-NEXT: movl %edx, 12(%eax)
+; X86-NEXT: movl %ebx, %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ebp, %edi
; X86-NEXT: mull %ecx
-; X86-NEXT: movl %edx, %ebx
-; X86-NEXT: movl %eax, %ebp
+; X86-NEXT: movl %edx, %ebp
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %esi, %eax
; X86-NEXT: mull %ecx
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
; X86-NEXT: movl %edx, %ecx
-; X86-NEXT: addl %ebp, %ecx
-; X86-NEXT: adcl $0, %ebx
+; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-NEXT: adcl $0, %ebp
; X86-NEXT: movl %esi, %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT: mull %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: mull %esi
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: adcl %ebx, %edx
-; X86-NEXT: movl %edx, %ebx
+; X86-NEXT: adcl %ebp, %edx
+; X86-NEXT: movl %edx, %ebp
; X86-NEXT: setb %cl
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl %esi, %eax
-; X86-NEXT: mull %ebp
-; X86-NEXT: addl %ebx, %eax
+; X86-NEXT: movl %ebx, %eax
+; X86-NEXT: mull %esi
+; X86-NEXT: addl %ebp, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movzbl %cl, %eax
; X86-NEXT: adcl %eax, %edx
@@ -555,12 +557,12 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: imull %eax, %ecx
; X86-NEXT: mull %edi
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: imull %ebp, %edi
+; X86-NEXT: imull %esi, %edi
; X86-NEXT: addl %edx, %edi
; X86-NEXT: addl %ecx, %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: imull %esi, %ecx
+; X86-NEXT: imull %ebx, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: imull %edx, %esi
@@ -584,7 +586,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl %edx, 4(%eax)
; X86-NEXT: movl %ebx, 8(%eax)
; X86-NEXT: movl %edi, 12(%eax)
-; X86-NEXT: addl $156, %esp
+; X86-NEXT: addl $152, %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
>From 3b73cb3bf138d2ad7f354c9d26037d769cfe9d53 Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Wed, 3 Jul 2024 10:22:15 +0100
Subject: [PATCH 094/246] [AArch64][GlobalISel] Create copy rather than
single-element concat
The verifier does not accept single-element G_CONCAT_VECTORS, so if there is a
single Op generate a COPY instead.
---
.../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 5 +++-
.../GlobalISel/combine-shufflevector.mir | 28 +++++++++++++++++++
2 files changed, 32 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index ef2ef1e0ffb04..c27b882f17003 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -446,7 +446,10 @@ void CombinerHelper::applyCombineShuffleConcat(MachineInstr &MI,
}
}
- Builder.buildConcatVectors(MI.getOperand(0).getReg(), Ops);
+ if (Ops.size() > 1)
+ Builder.buildConcatVectors(MI.getOperand(0).getReg(), Ops);
+ else
+ Builder.buildCopy(MI.getOperand(0).getReg(), Ops[0]);
MI.eraseFromParent();
}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-shufflevector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-shufflevector.mir
index 0de989f8be75d..fbcc87fc7012a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-shufflevector.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-shufflevector.mir
@@ -200,3 +200,31 @@ body: |
$q0 = COPY %z(<16 x s8>)
RET_ReallyLR implicit $q0
...
+
+---
+name: single_vector_to_copy
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $q0, $q1
+
+ ; CHECK-LABEL: name: single_vector_to_copy
+ ; CHECK: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %p1:_(<4 x s32>) = COPY $q0
+ ; CHECK-NEXT: %p2:_(<4 x s32>) = COPY $q1
+ ; CHECK-NEXT: $q0 = COPY %p1(<4 x s32>)
+ ; CHECK-NEXT: $q1 = COPY %p2(<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+ %p1:_(<4 x s32>) = COPY $q0
+ %p2:_(<4 x s32>) = COPY $q1
+
+ %a:_(<8 x s32>) = G_CONCAT_VECTORS %p1:_(<4 x s32>), %p2:_(<4 x s32>)
+
+ %x:_(<4 x s32>) = G_SHUFFLE_VECTOR %a:_(<8 x s32>), %a:_, shufflemask(0, 1, 2, 3)
+ %y:_(<4 x s32>) = G_SHUFFLE_VECTOR %a:_(<8 x s32>), %a:_, shufflemask(4, 5, 6, 7)
+
+ $q0 = COPY %x(<4 x s32>)
+ $q1 = COPY %y(<4 x s32>)
+ RET_ReallyLR implicit $q0
+...
>From 5a1a46722948b79803826f1b11877ffcf102c094 Mon Sep 17 00:00:00 2001
From: Dominik Adamski <dominik.adamski at amd.com>
Date: Wed, 3 Jul 2024 11:42:32 +0200
Subject: [PATCH 095/246] Revert "[AMDGPU][OpenMP] Do not attach
-fcuda-is-device flag for AMDGPU OpenMP" (#97531)
Reverts llvm/llvm-project#96909 (commit ID: 8bb00cb160830ec8f6029c2aae79d3e46b04b99c)
It breaks OpenMP CI:
https://gitlab.e4s.io/uo-public/llvm-openmp-offloading/-/jobs/283716
---
clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp | 2 ++
clang/test/Driver/amdgpu-openmp-toolchain.c | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp b/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
index b75d400e6ce91..1c0fb4babe3a5 100644
--- a/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
+++ b/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
@@ -47,6 +47,8 @@ void AMDGPUOpenMPToolChain::addClangTargetOptions(
assert(DeviceOffloadingKind == Action::OFK_OpenMP &&
"Only OpenMP offloading kinds are supported.");
+ CC1Args.push_back("-fcuda-is-device");
+
if (DriverArgs.hasArg(options::OPT_nogpulib))
return;
diff --git a/clang/test/Driver/amdgpu-openmp-toolchain.c b/clang/test/Driver/amdgpu-openmp-toolchain.c
index a153c4afb0ce8..49af04acc4639 100644
--- a/clang/test/Driver/amdgpu-openmp-toolchain.c
+++ b/clang/test/Driver/amdgpu-openmp-toolchain.c
@@ -7,7 +7,7 @@
// verify the tools invocations
// CHECK: "-cc1" "-triple" "x86_64-unknown-linux-gnu"{{.*}}"-emit-llvm-bc"{{.*}}"-x" "c"
-// CHECK: "-cc1" "-triple" "amdgcn-amd-amdhsa" "-aux-triple" "x86_64-unknown-linux-gnu"{{.*}}"-target-cpu" "gfx906"
+// CHECK: "-cc1" "-triple" "amdgcn-amd-amdhsa" "-aux-triple" "x86_64-unknown-linux-gnu"{{.*}}"-fcuda-is-device"{{.*}}"-target-cpu" "gfx906"
// CHECK: "-cc1" "-triple" "x86_64-unknown-linux-gnu"{{.*}}"-emit-obj"
// CHECK: clang-linker-wrapper{{.*}} "-o" "a.out"
>From edbbc832a5308e4f6943583965e74254799f13ae Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Wed, 3 Jul 2024 10:50:22 +0100
Subject: [PATCH 096/246] ConstantRange: add query for isAllPositive (#97420)
ConstantRange has queries for isAllNegative and isAllNonNegative, but
misses a query for isAllPositive. Add this function.
---
llvm/include/llvm/IR/ConstantRange.h | 3 +++
llvm/lib/IR/ConstantRange.cpp | 10 ++++++++++
llvm/unittests/IR/ConstantRangeTest.cpp | 12 ++++++++++--
3 files changed, 23 insertions(+), 2 deletions(-)
diff --git a/llvm/include/llvm/IR/ConstantRange.h b/llvm/include/llvm/IR/ConstantRange.h
index 7b94b9c6c6d11..86d0a6b35d748 100644
--- a/llvm/include/llvm/IR/ConstantRange.h
+++ b/llvm/include/llvm/IR/ConstantRange.h
@@ -277,6 +277,9 @@ class [[nodiscard]] ConstantRange {
/// Return true if all values in this range are non-negative.
bool isAllNonNegative() const;
+ /// Return true if all values in this range are positive.
+ bool isAllPositive() const;
+
/// Return the largest unsigned value contained in the ConstantRange.
APInt getUnsignedMax() const;
diff --git a/llvm/lib/IR/ConstantRange.cpp b/llvm/lib/IR/ConstantRange.cpp
index 19041704a40be..50de975d83c0a 100644
--- a/llvm/lib/IR/ConstantRange.cpp
+++ b/llvm/lib/IR/ConstantRange.cpp
@@ -440,6 +440,16 @@ bool ConstantRange::isAllNonNegative() const {
return !isSignWrappedSet() && Lower.isNonNegative();
}
+bool ConstantRange::isAllPositive() const {
+ // Empty set is all positive, full set is not.
+ if (isEmptySet())
+ return true;
+ if (isFullSet())
+ return false;
+
+ return !isSignWrappedSet() && Lower.isStrictlyPositive();
+}
+
APInt ConstantRange::getUnsignedMax() const {
if (isFullSet() || isUpperWrapped())
return APInt::getMaxValue(getBitWidth());
diff --git a/llvm/unittests/IR/ConstantRangeTest.cpp b/llvm/unittests/IR/ConstantRangeTest.cpp
index 392c41f74b431..0181e2ce6ac92 100644
--- a/llvm/unittests/IR/ConstantRangeTest.cpp
+++ b/llvm/unittests/IR/ConstantRangeTest.cpp
@@ -2398,23 +2398,31 @@ TEST_F(ConstantRangeTest, Negative) {
// they are also covered by the exhaustive test below.
EXPECT_TRUE(Empty.isAllNegative());
EXPECT_TRUE(Empty.isAllNonNegative());
+ EXPECT_TRUE(Empty.isAllPositive());
EXPECT_FALSE(Full.isAllNegative());
EXPECT_FALSE(Full.isAllNonNegative());
+ EXPECT_FALSE(Full.isAllPositive());
EnumerateInterestingConstantRanges([](const ConstantRange &CR) {
bool AllNegative = true;
bool AllNonNegative = true;
+ bool AllPositive = true;
ForeachNumInConstantRange(CR, [&](const APInt &N) {
if (!N.isNegative())
AllNegative = false;
if (!N.isNonNegative())
AllNonNegative = false;
+ if (!N.isStrictlyPositive())
+ AllPositive = false;
});
- assert((CR.isEmptySet() || !AllNegative || !AllNonNegative) &&
- "Only empty set can be both all negative and all non-negative");
+ assert(
+ (CR.isEmptySet() || !AllNegative || !AllNonNegative || !AllPositive) &&
+ "Only empty set can be all negative, all non-negative, and all "
+ "positive");
EXPECT_EQ(AllNegative, CR.isAllNegative());
EXPECT_EQ(AllNonNegative, CR.isAllNonNegative());
+ EXPECT_EQ(AllPositive, CR.isAllPositive());
});
}
>From 915ee0b823a528456226de513f303483d5fe0793 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Wed, 3 Jul 2024 10:50:52 +0100
Subject: [PATCH 097/246] UTC: support debug output from LDist (#93208)
Tweak the LoopDistribute debug output to be prefixed with "LDist: ", get
it to be stable, and extend update_analyze_test_checks.py trivially to
support this output.
---
llvm/lib/Transforms/Scalar/LoopDistribute.cpp | 58 ++++-----
.../Transforms/LoopDistribute/debug-print.ll | 94 ++++++++++++--
.../Inputs/loop-distribute.ll | 27 ++++
.../Inputs/loop-distribute.ll.expected | 118 ++++++++++++++++++
.../loop-distribute.test | 8 ++
llvm/utils/UpdateTestChecks/common.py | 6 +-
llvm/utils/update_analyze_test_checks.py | 10 +-
7 files changed, 275 insertions(+), 46 deletions(-)
create mode 100644 llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/Inputs/loop-distribute.ll
create mode 100644 llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/Inputs/loop-distribute.ll.expected
create mode 100644 llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/loop-distribute.test
diff --git a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
index 7a34ec2c008cc..c1afa72f2ff3f 100644
--- a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -26,7 +26,7 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
@@ -120,7 +120,7 @@ namespace {
/// Maintains the set of instructions of the loop for a partition before
/// cloning. After cloning, it hosts the new loop.
class InstPartition {
- using InstructionSet = SmallPtrSet<Instruction *, 8>;
+ using InstructionSet = SmallSetVector<Instruction *, 8>;
public:
InstPartition(Instruction *I, Loop *L, bool DepCycle = false)
@@ -166,7 +166,7 @@ class InstPartition {
// Insert instructions from the loop that we depend on.
for (Value *V : I->operand_values()) {
auto *I = dyn_cast<Instruction>(V);
- if (I && OrigLoop->contains(I->getParent()) && Set.insert(I).second)
+ if (I && OrigLoop->contains(I->getParent()) && Set.insert(I))
Worklist.push_back(I);
}
}
@@ -231,17 +231,16 @@ class InstPartition {
}
}
- void print() const {
- if (DepCycle)
- dbgs() << " (cycle)\n";
+ void print(raw_ostream &OS) const {
+ OS << (DepCycle ? " (cycle)\n" : "\n");
for (auto *I : Set)
// Prefix with the block name.
- dbgs() << " " << I->getParent()->getName() << ":" << *I << "\n";
+ OS << " " << I->getParent()->getName() << ":" << *I << "\n";
}
- void printBlocks() const {
+ void printBlocks(raw_ostream &OS) const {
for (auto *BB : getDistributedLoop()->getBlocks())
- dbgs() << *BB;
+ OS << *BB;
}
private:
@@ -368,11 +367,11 @@ class InstPartitionContainer {
std::tie(LoadToPart, NewElt) =
LoadToPartition.insert(std::make_pair(Inst, PartI));
if (!NewElt) {
- LLVM_DEBUG(dbgs()
- << "Merging partitions due to this load in multiple "
- << "partitions: " << PartI << ", " << LoadToPart->second
- << "\n"
- << *Inst << "\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "LDist: Merging partitions due to this load in multiple "
+ << "partitions: " << PartI << ", " << LoadToPart->second << "\n"
+ << *Inst << "\n");
auto PartJ = I;
do {
@@ -530,8 +529,8 @@ class InstPartitionContainer {
void print(raw_ostream &OS) const {
unsigned Index = 0;
for (const auto &P : PartitionContainer) {
- OS << "Partition " << Index++ << " (" << &P << "):\n";
- P.print();
+ OS << "LDist: Partition " << Index++ << ":";
+ P.print(OS);
}
}
@@ -545,11 +544,11 @@ class InstPartitionContainer {
}
#endif
- void printBlocks() const {
+ void printBlocks(raw_ostream &OS) const {
unsigned Index = 0;
for (const auto &P : PartitionContainer) {
- dbgs() << "\nPartition " << Index++ << " (" << &P << "):\n";
- P.printBlocks();
+ OS << "LDist: Partition " << Index++ << ":";
+ P.printBlocks(OS);
}
}
@@ -628,7 +627,7 @@ class MemoryInstructionDependences {
const SmallVectorImpl<Dependence> &Dependences) {
Accesses.append(Instructions.begin(), Instructions.end());
- LLVM_DEBUG(dbgs() << "Backward dependences:\n");
+ LLVM_DEBUG(dbgs() << "LDist: Backward dependences:\n");
for (const auto &Dep : Dependences)
if (Dep.isPossiblyBackward()) {
// Note that the designations source and destination follow the program
@@ -738,7 +737,7 @@ class LoopDistributeForLoop {
for (auto *Inst : DefsUsedOutside)
Partitions.addToNewNonCyclicPartition(Inst);
- LLVM_DEBUG(dbgs() << "Seeded partitions:\n" << Partitions);
+ LLVM_DEBUG(dbgs() << "LDist: Seeded partitions:\n" << Partitions);
if (Partitions.getSize() < 2)
return fail("CantIsolateUnsafeDeps",
"cannot isolate unsafe dependencies");
@@ -746,19 +745,19 @@ class LoopDistributeForLoop {
// Run the merge heuristics: Merge non-cyclic adjacent partitions since we
// should be able to vectorize these together.
Partitions.mergeBeforePopulating();
- LLVM_DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions);
+ LLVM_DEBUG(dbgs() << "LDist: Merged partitions:\n" << Partitions);
if (Partitions.getSize() < 2)
return fail("CantIsolateUnsafeDeps",
"cannot isolate unsafe dependencies");
// Now, populate the partitions with non-memory operations.
Partitions.populateUsedSet();
- LLVM_DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions);
+ LLVM_DEBUG(dbgs() << "LDist: Populated partitions:\n" << Partitions);
// In order to preserve original lexical order for loads, keep them in the
// partition that we set up in the MemoryInstructionDependences loop.
if (Partitions.mergeToAvoidDuplicatedLoads()) {
- LLVM_DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n"
+ LLVM_DEBUG(dbgs() << "LDist: Partitions merged to ensure unique loads:\n"
<< Partitions);
if (Partitions.getSize() < 2)
return fail("CantIsolateUnsafeDeps",
@@ -782,7 +781,8 @@ class LoopDistributeForLoop {
if (!IsForced.value_or(false) && hasDisableAllTransformsHint(L))
return fail("HeuristicDisabled", "distribution heuristic disabled");
- LLVM_DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n");
+ LLVM_DEBUG(dbgs() << "LDist: Distributing loop: "
+ << L->getHeader()->getName() << "\n");
// We're done forming the partitions set up the reverse mapping from
// instructions to partitions.
Partitions.setupPartitionIdOnInstructions();
@@ -810,7 +810,7 @@ class LoopDistributeForLoop {
MDNode *OrigLoopID = L->getLoopID();
- LLVM_DEBUG(dbgs() << "\nPointers:\n");
+ LLVM_DEBUG(dbgs() << "LDist: Pointers:\n");
LLVM_DEBUG(LAI->getRuntimePointerChecking()->printChecks(dbgs(), Checks));
LoopVersioning LVer(*LAI, Checks, L, LI, DT, SE);
LVer.versionLoop(DefsUsedOutside);
@@ -833,8 +833,8 @@ class LoopDistributeForLoop {
// Now, we remove the instruction from each loop that don't belong to that
// partition.
Partitions.removeUnusedInsts();
- LLVM_DEBUG(dbgs() << "\nAfter removing unused Instrs:\n");
- LLVM_DEBUG(Partitions.printBlocks());
+ LLVM_DEBUG(dbgs() << "LDist: After removing unused Instrs:\n");
+ LLVM_DEBUG(Partitions.printBlocks(dbgs()));
if (LDistVerify) {
LI->verify(*DT);
@@ -856,7 +856,7 @@ class LoopDistributeForLoop {
LLVMContext &Ctx = F->getContext();
bool Forced = isForced().value_or(false);
- LLVM_DEBUG(dbgs() << "Skipping; " << Message << "\n");
+ LLVM_DEBUG(dbgs() << "LDist: Skipping; " << Message << "\n");
// With Rpass-missed report that distribution failed.
ORE->emit([&]() {
diff --git a/llvm/test/Transforms/LoopDistribute/debug-print.ll b/llvm/test/Transforms/LoopDistribute/debug-print.ll
index 733c33483ecc4..2c3a0116fe131 100644
--- a/llvm/test/Transforms/LoopDistribute/debug-print.ll
+++ b/llvm/test/Transforms/LoopDistribute/debug-print.ll
@@ -1,20 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5
; REQUIRES: asserts
; RUN: opt -passes=loop-distribute -enable-loop-distribute \
; RUN: -debug-only=loop-distribute -disable-output 2>&1 %s | FileCheck %s
define void @f(ptr noalias %a, ptr noalias %b, ptr noalias %c, ptr noalias %d, i64 %stride) {
; CHECK-LABEL: 'f'
-; CHECK: LDist: Found a candidate loop: for.body
-; CHECK: Backward dependences:
-; CHECK-NEXT: Backward:
-; CHECK-NEXT: %load.a = load i32, ptr %gep.a, align 4 ->
-; CHECK-NEXT: store i32 %mul.a, ptr %gep.a.plus4, align 4
-; CHECK: Seeded partitions:
-; CHECK: Partition 0
-; CHECK: Partition 1
-; CHECK: Partition 2
-; CHECK: Partition 3
-; CHECK: Distributing loop
+; CHECK-NEXT: LDist: Found a candidate loop: for.body
+; CHECK-NEXT: LDist: Backward dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: %load.a = load i32, ptr %gep.a, align 4 ->
+; CHECK-NEXT: store i32 %mul.a, ptr %gep.a.plus4, align 4
+; CHECK-NEXT: LDist: Seeded partitions:
+; CHECK-NEXT: LDist: Partition 0: (cycle)
+; CHECK-NEXT: for.body: %load.a = load i32, ptr %gep.a, align 4
+; CHECK-NEXT: for.body: %load.b = load i32, ptr %gep.b, align 4
+; CHECK-NEXT: for.body: store i32 %mul.a, ptr %gep.a.plus4, align 4
+; CHECK-NEXT: LDist: Partition 1:
+; CHECK-NEXT: for.body: %loadD = load i32, ptr %gep.d, align 4
+; CHECK-NEXT: LDist: Partition 2:
+; CHECK-NEXT: for.body: %load.strided.a = load i32, ptr %gep.strided.a, align 4
+; CHECK-NEXT: LDist: Partition 3:
+; CHECK-NEXT: for.body: store i32 %mul.c, ptr %gep.c, align 4
+; CHECK-NEXT: LDist: Merged partitions:
+; CHECK-NEXT: LDist: Partition 0: (cycle)
+; CHECK-NEXT: for.body: %load.a = load i32, ptr %gep.a, align 4
+; CHECK-NEXT: for.body: %load.b = load i32, ptr %gep.b, align 4
+; CHECK-NEXT: for.body: store i32 %mul.a, ptr %gep.a.plus4, align 4
+; CHECK-NEXT: LDist: Partition 1:
+; CHECK-NEXT: for.body: %loadD = load i32, ptr %gep.d, align 4
+; CHECK-NEXT: for.body: %load.strided.a = load i32, ptr %gep.strided.a, align 4
+; CHECK-NEXT: for.body: store i32 %mul.c, ptr %gep.c, align 4
+; CHECK-NEXT: LDist: Populated partitions:
+; CHECK-NEXT: LDist: Partition 0: (cycle)
+; CHECK-NEXT: for.body: %load.a = load i32, ptr %gep.a, align 4
+; CHECK-NEXT: for.body: %load.b = load i32, ptr %gep.b, align 4
+; CHECK-NEXT: for.body: store i32 %mul.a, ptr %gep.a.plus4, align 4
+; CHECK-NEXT: for.body: br i1 %exitcond, label %exit, label %for.body
+; CHECK-NEXT: for.body: %exitcond = icmp eq i64 %add, 20
+; CHECK-NEXT: for.body: %add = add nuw nsw i64 %ind, 1
+; CHECK-NEXT: for.body: %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+; CHECK-NEXT: for.body: %mul.a = mul i32 %load.b, %load.a
+; CHECK-NEXT: for.body: %gep.a.plus4 = getelementptr inbounds i32, ptr %a, i64 %add
+; CHECK-NEXT: for.body: %gep.b = getelementptr inbounds i32, ptr %b, i64 %ind
+; CHECK-NEXT: for.body: %gep.a = getelementptr inbounds i32, ptr %a, i64 %ind
+; CHECK-NEXT: LDist: Partition 1:
+; CHECK-NEXT: for.body: %loadD = load i32, ptr %gep.d, align 4
+; CHECK-NEXT: for.body: %load.strided.a = load i32, ptr %gep.strided.a, align 4
+; CHECK-NEXT: for.body: store i32 %mul.c, ptr %gep.c, align 4
+; CHECK-NEXT: for.body: br i1 %exitcond, label %exit, label %for.body
+; CHECK-NEXT: for.body: %exitcond = icmp eq i64 %add, 20
+; CHECK-NEXT: for.body: %add = add nuw nsw i64 %ind, 1
+; CHECK-NEXT: for.body: %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+; CHECK-NEXT: for.body: %mul.c = mul i32 %loadD, %load.strided.a
+; CHECK-NEXT: for.body: %gep.c = getelementptr inbounds i32, ptr %c, i64 %ind
+; CHECK-NEXT: for.body: %gep.strided.a = getelementptr inbounds i32, ptr %a, i64 %mul
+; CHECK-NEXT: for.body: %mul = mul i64 %ind, %stride
+; CHECK-NEXT: for.body: %gep.d = getelementptr inbounds i32, ptr %d, i64 %ind
+; CHECK-NEXT: LDist: Distributing loop: for.body
+; CHECK-NEXT: LDist: Pointers:
+; CHECK-NEXT: LDist: After removing unused Instrs:
+; CHECK-NEXT: LDist: Partition 0:
+; CHECK-NEXT: for.body.ldist1: ; preds = %for.body.ldist1, %for.body.ph.ldist1
+; CHECK-NEXT: %ind.ldist1 = phi i64 [ 0, %for.body.ph.ldist1 ], [ %add.ldist1, %for.body.ldist1 ]
+; CHECK-NEXT: %gep.a.ldist1 = getelementptr inbounds i32, ptr %a, i64 %ind.ldist1
+; CHECK-NEXT: %load.a.ldist1 = load i32, ptr %gep.a.ldist1, align 4
+; CHECK-NEXT: %gep.b.ldist1 = getelementptr inbounds i32, ptr %b, i64 %ind.ldist1
+; CHECK-NEXT: %load.b.ldist1 = load i32, ptr %gep.b.ldist1, align 4
+; CHECK-NEXT: %mul.a.ldist1 = mul i32 %load.b.ldist1, %load.a.ldist1
+; CHECK-NEXT: %add.ldist1 = add nuw nsw i64 %ind.ldist1, 1
+; CHECK-NEXT: %gep.a.plus4.ldist1 = getelementptr inbounds i32, ptr %a, i64 %add.ldist1
+; CHECK-NEXT: store i32 %mul.a.ldist1, ptr %gep.a.plus4.ldist1, align 4
+; CHECK-NEXT: %exitcond.ldist1 = icmp eq i64 %add.ldist1, 20
+; CHECK-NEXT: br i1 %exitcond.ldist1, label %for.body.ph, label %for.body.ldist1
+; CHECK-NEXT: LDist: Partition 1:
+; CHECK-NEXT: for.body: ; preds = %for.body, %for.body.ph
+; CHECK-NEXT: %ind = phi i64 [ 0, %for.body.ph ], [ %add, %for.body ]
+; CHECK-NEXT: %add = add nuw nsw i64 %ind, 1
+; CHECK-NEXT: %gep.d = getelementptr inbounds i32, ptr %d, i64 %ind
+; CHECK-NEXT: %loadD = load i32, ptr %gep.d, align 4
+; CHECK-NEXT: %mul = mul i64 %ind, %stride
+; CHECK-NEXT: %gep.strided.a = getelementptr inbounds i32, ptr %a, i64 %mul
+; CHECK-NEXT: %load.strided.a = load i32, ptr %gep.strided.a, align 4
+; CHECK-NEXT: %mul.c = mul i32 %loadD, %load.strided.a
+; CHECK-NEXT: %gep.c = getelementptr inbounds i32, ptr %c, i64 %ind
+; CHECK-NEXT: store i32 %mul.c, ptr %gep.c, align 4
+; CHECK-NEXT: %exitcond = icmp eq i64 %add, 20
+; CHECK-NEXT: br i1 %exitcond, label %exit.loopexit1, label %for.body
+;
entry:
br label %for.body
diff --git a/llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/Inputs/loop-distribute.ll b/llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/Inputs/loop-distribute.ll
new file mode 100644
index 0000000000000..48f80533c6379
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/Inputs/loop-distribute.ll
@@ -0,0 +1,27 @@
+; RUN: opt -passes=loop-distribute -enable-loop-distribute \
+; RUN: -debug-only=loop-distribute -disable-output 2>&1 %s | FileCheck %s
+
+define void @ldist(i1 %c, ptr %A, ptr %B, ptr %C) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %if.end, %entry
+ %iv = phi i16 [ 0, %entry ], [ %iv.next, %if.end ]
+ %lv = load i16, ptr %A, align 1
+ store i16 %lv, ptr %A, align 1
+ br i1 %c, label %if.then, label %if.end
+
+if.then: ; preds = %for.body
+ %lv2 = load i16, ptr %A, align 1
+ br label %if.end
+
+if.end: ; preds = %if.then, %for.body
+ %c.sink = phi ptr [ %B, %if.then ], [ %C, %for.body ]
+ %lv3 = load i16, ptr %c.sink
+ %iv.next = add nuw nsw i16 %iv, 1
+ %tobool.not = icmp eq i16 %iv.next, 1000
+ br i1 %tobool.not, label %for.end.loopexit, label %for.body
+
+for.end.loopexit: ; preds = %if.end
+ ret void
+}
diff --git a/llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/Inputs/loop-distribute.ll.expected b/llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/Inputs/loop-distribute.ll.expected
new file mode 100644
index 0000000000000..baef851b84ee5
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/Inputs/loop-distribute.ll.expected
@@ -0,0 +1,118 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt -passes=loop-distribute -enable-loop-distribute \
+; RUN: -debug-only=loop-distribute -disable-output 2>&1 %s | FileCheck %s
+
+define void @ldist(i1 %c, ptr %A, ptr %B, ptr %C) {
+; CHECK-LABEL: 'ldist'
+; CHECK-NEXT: LDist: Found a candidate loop: for.body
+; CHECK-NEXT: LDist: Backward dependences:
+; CHECK-NEXT: Unknown:
+; CHECK-NEXT: %lv = load i16, ptr %A, align 1 ->
+; CHECK-NEXT: store i16 %lv, ptr %A, align 1
+; CHECK-NEXT: Unknown:
+; CHECK-NEXT: store i16 %lv, ptr %A, align 1 ->
+; CHECK-NEXT: %lv2 = load i16, ptr %A, align 1
+; CHECK-NEXT: LDist: Seeded partitions:
+; CHECK-NEXT: LDist: Partition 0: (cycle)
+; CHECK-NEXT: for.body: %lv = load i16, ptr %A, align 1
+; CHECK-NEXT: for.body: store i16 %lv, ptr %A, align 1
+; CHECK-NEXT: if.then: %lv2 = load i16, ptr %A, align 1
+; CHECK-NEXT: LDist: Partition 1:
+; CHECK-NEXT: if.end: %lv3 = load i16, ptr %c.sink, align 2
+; CHECK-NEXT: LDist: Partition 2:
+; CHECK-NEXT: if.end: %lv3 = load i16, ptr %c.sink, align 2
+; CHECK-NEXT: LDist: Merged partitions:
+; CHECK-NEXT: LDist: Partition 0: (cycle)
+; CHECK-NEXT: for.body: %lv = load i16, ptr %A, align 1
+; CHECK-NEXT: for.body: store i16 %lv, ptr %A, align 1
+; CHECK-NEXT: if.then: %lv2 = load i16, ptr %A, align 1
+; CHECK-NEXT: LDist: Partition 1:
+; CHECK-NEXT: if.end: %lv3 = load i16, ptr %c.sink, align 2
+; CHECK-NEXT: LDist: Populated partitions:
+; CHECK-NEXT: LDist: Partition 0: (cycle)
+; CHECK-NEXT: for.body: %lv = load i16, ptr %A, align 1
+; CHECK-NEXT: for.body: store i16 %lv, ptr %A, align 1
+; CHECK-NEXT: if.then: %lv2 = load i16, ptr %A, align 1
+; CHECK-NEXT: for.body: br i1 %c, label %if.then, label %if.end
+; CHECK-NEXT: if.then: br label %if.end
+; CHECK-NEXT: if.end: br i1 %tobool.not, label %for.end.loopexit, label %for.body
+; CHECK-NEXT: if.end: %tobool.not = icmp eq i16 %iv.next, 1000
+; CHECK-NEXT: if.end: %iv.next = add nuw nsw i16 %iv, 1
+; CHECK-NEXT: for.body: %iv = phi i16 [ 0, %entry ], [ %iv.next, %if.end ]
+; CHECK-NEXT: LDist: Partition 1:
+; CHECK-NEXT: if.end: %lv3 = load i16, ptr %c.sink, align 2
+; CHECK-NEXT: for.body: br i1 %c, label %if.then, label %if.end
+; CHECK-NEXT: if.then: br label %if.end
+; CHECK-NEXT: if.end: br i1 %tobool.not, label %for.end.loopexit, label %for.body
+; CHECK-NEXT: if.end: %tobool.not = icmp eq i16 %iv.next, 1000
+; CHECK-NEXT: if.end: %iv.next = add nuw nsw i16 %iv, 1
+; CHECK-NEXT: for.body: %iv = phi i16 [ 0, %entry ], [ %iv.next, %if.end ]
+; CHECK-NEXT: if.end: %c.sink = phi ptr [ %B, %if.then ], [ %C, %for.body ]
+; CHECK-NEXT: LDist: Distributing loop: for.body
+; CHECK-NEXT: LDist: Pointers:
+; CHECK-NEXT: Check 0:
+; CHECK-NEXT: Comparing group ([[GRP1:0x[0-9a-f]+]]):
+; CHECK-NEXT: ptr %A
+; CHECK-NEXT: ptr %A
+; CHECK-NEXT: Against group ([[GRP2:0x[0-9a-f]+]]):
+; CHECK-NEXT: ptr %C
+; CHECK-NEXT: Check 1:
+; CHECK-NEXT: Comparing group ([[GRP1]]):
+; CHECK-NEXT: ptr %A
+; CHECK-NEXT: ptr %A
+; CHECK-NEXT: Against group ([[GRP3:0x[0-9a-f]+]]):
+; CHECK-NEXT: ptr %B
+; CHECK-NEXT: LDist: After removing unused Instrs:
+; CHECK-NEXT: LDist: Partition 0:
+; CHECK-NEXT: for.body.ldist1: ; preds = %if.end.ldist1, %for.body.ph.ldist1
+; CHECK-NEXT: %iv.ldist1 = phi i16 [ 0, %for.body.ph.ldist1 ], [ %iv.next.ldist1, %if.end.ldist1 ]
+; CHECK-NEXT: %lv.ldist1 = load i16, ptr %A, align 1, !alias.scope !0, !noalias !3
+; CHECK-NEXT: store i16 %lv.ldist1, ptr %A, align 1, !alias.scope !0, !noalias !3
+; CHECK-NEXT: br i1 %c, label %if.then.ldist1, label %if.end.ldist1
+; CHECK-EMPTY:
+; CHECK-NEXT: if.then.ldist1: ; preds = %for.body.ldist1
+; CHECK-NEXT: %lv2.ldist1 = load i16, ptr %A, align 1, !alias.scope !0, !noalias !3
+; CHECK-NEXT: br label %if.end.ldist1
+; CHECK-EMPTY:
+; CHECK-NEXT: if.end.ldist1: ; preds = %if.then.ldist1, %for.body.ldist1
+; CHECK-NEXT: %iv.next.ldist1 = add nuw nsw i16 %iv.ldist1, 1
+; CHECK-NEXT: %tobool.not.ldist1 = icmp eq i16 %iv.next.ldist1, 1000
+; CHECK-NEXT: br i1 %tobool.not.ldist1, label %for.body.ph, label %for.body.ldist1
+; CHECK-NEXT: LDist: Partition 1:
+; CHECK-NEXT: for.body: ; preds = %if.end, %for.body.ph
+; CHECK-NEXT: %iv = phi i16 [ 0, %for.body.ph ], [ %iv.next, %if.end ]
+; CHECK-NEXT: br i1 %c, label %if.then, label %if.end
+; CHECK-EMPTY:
+; CHECK-NEXT: if.then: ; preds = %for.body
+; CHECK-NEXT: br label %if.end
+; CHECK-EMPTY:
+; CHECK-NEXT: if.end: ; preds = %if.then, %for.body
+; CHECK-NEXT: %c.sink = phi ptr [ %B, %if.then ], [ %C, %for.body ]
+; CHECK-NEXT: %lv3 = load i16, ptr %c.sink, align 2
+; CHECK-NEXT: %iv.next = add nuw nsw i16 %iv, 1
+; CHECK-NEXT: %tobool.not = icmp eq i16 %iv.next, 1000
+; CHECK-NEXT: br i1 %tobool.not, label %for.end.loopexit.loopexit6, label %for.body
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %if.end, %entry
+ %iv = phi i16 [ 0, %entry ], [ %iv.next, %if.end ]
+ %lv = load i16, ptr %A, align 1
+ store i16 %lv, ptr %A, align 1
+ br i1 %c, label %if.then, label %if.end
+
+if.then: ; preds = %for.body
+ %lv2 = load i16, ptr %A, align 1
+ br label %if.end
+
+if.end: ; preds = %if.then, %for.body
+ %c.sink = phi ptr [ %B, %if.then ], [ %C, %for.body ]
+ %lv3 = load i16, ptr %c.sink
+ %iv.next = add nuw nsw i16 %iv, 1
+ %tobool.not = icmp eq i16 %iv.next, 1000
+ br i1 %tobool.not, label %for.end.loopexit, label %for.body
+
+for.end.loopexit: ; preds = %if.end
+ ret void
+}
diff --git a/llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/loop-distribute.test b/llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/loop-distribute.test
new file mode 100644
index 0000000000000..70df83077b454
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_analyze_test_checks/loop-distribute.test
@@ -0,0 +1,8 @@
+# REQUIRES: asserts
+
+## Basic test checking that update_analyze_test_checks.py works correctly
+# RUN: cp -f %S/Inputs/loop-distribute.ll %t.ll && %update_analyze_test_checks %t.ll
+# RUN: diff -u %t.ll %S/Inputs/loop-distribute.ll.expected
+## Check that running the script again does not change the result:
+# RUN: %update_analyze_test_checks %t.ll
+# RUN: diff -u %t.ll %S/Inputs/loop-distribute.ll.expected
diff --git a/llvm/utils/UpdateTestChecks/common.py b/llvm/utils/UpdateTestChecks/common.py
index 85c129488d950..eb212ed304e9d 100644
--- a/llvm/utils/UpdateTestChecks/common.py
+++ b/llvm/utils/UpdateTestChecks/common.py
@@ -569,7 +569,7 @@ def invoke_tool(exe, cmd_args, ir, preprocess_cmd=None, verbose=False):
flags=(re.X | re.S),
)
-LV_DEBUG_RE = re.compile(
+LOOP_PASS_DEBUG_RE = re.compile(
r"^\s*\'(?P<func>[\w.$-]+?)\'[^\n]*" r"\s*\n(?P<body>.*)$", flags=(re.X | re.S)
)
@@ -973,6 +973,7 @@ class NamelessValue:
name (as in e.g. `@some_global` or `%x`) or just a number (as in e.g. `%12`
or `!4`).
"""
+
def __init__(
self,
check_prefix,
@@ -1635,8 +1636,9 @@ def generalize_check_lines(
regexp = ginfo.get_regexp()
multiple_braces_re = re.compile(r"({{+)|(}}+)")
+
def escape_braces(match_obj):
- return '{{' + re.escape(match_obj.group(0)) + '}}'
+ return "{{" + re.escape(match_obj.group(0)) + "}}"
if ginfo.is_ir():
for i, line in enumerate(lines):
diff --git a/llvm/utils/update_analyze_test_checks.py b/llvm/utils/update_analyze_test_checks.py
index 47506626a0a58..d356ebead0d81 100755
--- a/llvm/utils/update_analyze_test_checks.py
+++ b/llvm/utils/update_analyze_test_checks.py
@@ -134,13 +134,15 @@ def main():
raw_tool_output,
prefixes,
)
- elif re.search(r"LV: Checking a loop in ", raw_tool_outputs) is not None:
- # Split analysis outputs by "Printing analysis " declarations.
+ elif (
+ re.search(r"(LV|LDist): Checking a loop in ", raw_tool_outputs)
+ is not None
+ ):
for raw_tool_output in re.split(
- r"LV: Checking a loop in ", raw_tool_outputs
+ r"(LV|LDist): Checking a loop in ", raw_tool_outputs
):
builder.process_run_line(
- common.LV_DEBUG_RE,
+ common.LOOP_PASS_DEBUG_RE,
common.scrub_body,
raw_tool_output,
prefixes,
>From f819302a09dfec201f3ee4ef79b77a1e4c1de00d Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Wed, 3 Jul 2024 10:51:25 +0100
Subject: [PATCH 098/246] mlir/Presburger: reinstate use of LogicalResult
(#97415)
Follow up on a desire post-landing d0fee98 (mlir/Presburger: strip
dependency on MLIRSupport) to reinstate the use of LogicalResult in
Presburger. Since db791b2 (mlir/LogicalResult: move into llvm),
LogicalResult is in LLVM, and fulfilling this desire is possible while
still maintaining the goal of stripping the Presburger library of mlir
dependencies.
---
.../Analysis/Presburger/IntegerRelation.h | 12 ++--
.../mlir/Analysis/Presburger/Simplex.h | 12 ++--
.../Analysis/FlatLinearValueConstraints.cpp | 4 +-
.../Analysis/Presburger/IntegerRelation.cpp | 26 ++++----
.../Presburger/PresburgerRelation.cpp | 59 ++++++++++---------
mlir/lib/Analysis/Presburger/Simplex.cpp | 57 +++++++++---------
mlir/lib/Analysis/Presburger/Utils.cpp | 30 +++++-----
7 files changed, 106 insertions(+), 94 deletions(-)
diff --git a/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h b/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h
index 5e5cd898b7518..a27fc8c37eeda 100644
--- a/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h
+++ b/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h
@@ -21,13 +21,17 @@
#include "mlir/Analysis/Presburger/Utils.h"
#include "llvm/ADT/DynamicAPInt.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/LogicalResult.h"
#include <optional>
namespace mlir {
namespace presburger {
using llvm::DynamicAPInt;
+using llvm::failure;
using llvm::int64fromDynamicAPInt;
+using llvm::LogicalResult;
using llvm::SmallVectorImpl;
+using llvm::success;
class IntegerRelation;
class IntegerPolyhedron;
@@ -478,7 +482,7 @@ class IntegerRelation {
/// equality detection; if successful, the constant is substituted for the
/// variable everywhere in the constraint system and then removed from the
/// system.
- bool constantFoldVar(unsigned pos);
+ LogicalResult constantFoldVar(unsigned pos);
/// This method calls `constantFoldVar` for the specified range of variables,
/// `num` variables starting at position `pos`.
@@ -501,7 +505,7 @@ class IntegerRelation {
/// 3) this = {0 <= d0 <= 5, 1 <= d1 <= 9}
/// other = {2 <= d0 <= 6, 5 <= d1 <= 15},
/// output = {0 <= d0 <= 6, 1 <= d1 <= 15}
- bool unionBoundingBox(const IntegerRelation &other);
+ LogicalResult unionBoundingBox(const IntegerRelation &other);
/// Returns the smallest known constant bound for the extent of the specified
/// variable (pos^th), i.e., the smallest known constant that is greater
@@ -774,8 +778,8 @@ class IntegerRelation {
/// Eliminates a single variable at `position` from equality and inequality
/// constraints. Returns `success` if the variable was eliminated, and
/// `failure` otherwise.
- inline bool gaussianEliminateVar(unsigned position) {
- return gaussianEliminateVars(position, position + 1) == 1;
+ inline LogicalResult gaussianEliminateVar(unsigned position) {
+ return success(gaussianEliminateVars(position, position + 1) == 1);
}
/// Removes local variables using equalities. Each equality is checked if it
diff --git a/mlir/include/mlir/Analysis/Presburger/Simplex.h b/mlir/include/mlir/Analysis/Presburger/Simplex.h
index f413636e06910..4c40c4cdcb655 100644
--- a/mlir/include/mlir/Analysis/Presburger/Simplex.h
+++ b/mlir/include/mlir/Analysis/Presburger/Simplex.h
@@ -445,7 +445,7 @@ class LexSimplexBase : public SimplexBase {
/// lexicopositivity of the basis transform. The row must have a non-positive
/// sample value. If this is not possible, return failure. This occurs when
/// the constraints have no solution or the sample value is zero.
- bool moveRowUnknownToColumn(unsigned row);
+ LogicalResult moveRowUnknownToColumn(unsigned row);
/// Given a row that has a non-integer sample value, add an inequality to cut
/// away this fractional sample value from the polytope without removing any
@@ -459,7 +459,7 @@ class LexSimplexBase : public SimplexBase {
///
/// Return failure if the tableau became empty, and success if it didn't.
/// Failure status indicates that the polytope was integer empty.
- bool addCut(unsigned row);
+ LogicalResult addCut(unsigned row);
/// Undo the addition of the last constraint. This is only called while
/// rolling back.
@@ -511,7 +511,7 @@ class LexSimplex : public LexSimplexBase {
MaybeOptimum<SmallVector<Fraction, 8>> getRationalSample() const;
/// Make the tableau configuration consistent.
- bool restoreRationalConsistency();
+ LogicalResult restoreRationalConsistency();
/// Return whether the specified row is violated;
bool rowIsViolated(unsigned row) const;
@@ -626,7 +626,7 @@ class SymbolicLexSimplex : public LexSimplexBase {
/// Return failure if the tableau became empty, indicating that the polytope
/// is always integer empty in the current symbol domain.
/// Return success otherwise.
- bool doNonBranchingPivots();
+ LogicalResult doNonBranchingPivots();
/// Get a row that is always violated in the current domain, if one exists.
std::optional<unsigned> maybeGetAlwaysViolatedRow();
@@ -647,7 +647,7 @@ class SymbolicLexSimplex : public LexSimplexBase {
/// at the time of the call. (This function may modify the symbol domain, but
/// failure statu indicates that the polytope was empty for all symbol values
/// in the initial domain.)
- bool addSymbolicCut(unsigned row);
+ LogicalResult addSymbolicCut(unsigned row);
/// Get the numerator of the symbolic sample of the specific row.
/// This is an affine expression in the symbols with integer coefficients.
@@ -820,7 +820,7 @@ class Simplex : public SimplexBase {
///
/// Returns success if the unknown was successfully restored to a non-negative
/// sample value, failure otherwise.
- bool restoreRow(Unknown &u);
+ LogicalResult restoreRow(Unknown &u);
/// Find a pivot to change the sample value of row in the specified
/// direction while preserving tableau consistency, except that if the
diff --git a/mlir/lib/Analysis/FlatLinearValueConstraints.cpp b/mlir/lib/Analysis/FlatLinearValueConstraints.cpp
index 746cff525beb2..e628fb152b52f 100644
--- a/mlir/lib/Analysis/FlatLinearValueConstraints.cpp
+++ b/mlir/lib/Analysis/FlatLinearValueConstraints.cpp
@@ -1247,10 +1247,10 @@ LogicalResult FlatLinearValueConstraints::unionBoundingBox(
if (!areVarsAligned(*this, otherCst)) {
FlatLinearValueConstraints otherCopy(otherCst);
mergeAndAlignVars(/*offset=*/getNumDimVars(), this, &otherCopy);
- return success(IntegerPolyhedron::unionBoundingBox(otherCopy));
+ return IntegerPolyhedron::unionBoundingBox(otherCopy);
}
- return success(IntegerPolyhedron::unionBoundingBox(otherCst));
+ return IntegerPolyhedron::unionBoundingBox(otherCst);
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
index 6b438692ff6f9..095a7dcb287f3 100644
--- a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
+++ b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
@@ -26,6 +26,7 @@
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -1552,22 +1553,22 @@ static int findEqualityToConstant(const IntegerRelation &cst, unsigned pos,
return -1;
}
-bool IntegerRelation::constantFoldVar(unsigned pos) {
+LogicalResult IntegerRelation::constantFoldVar(unsigned pos) {
assert(pos < getNumVars() && "invalid position");
int rowIdx;
if ((rowIdx = findEqualityToConstant(*this, pos)) == -1)
- return false;
+ return failure();
// atEq(rowIdx, pos) is either -1 or 1.
assert(atEq(rowIdx, pos) * atEq(rowIdx, pos) == 1);
DynamicAPInt constVal = -atEq(rowIdx, getNumCols() - 1) / atEq(rowIdx, pos);
setAndEliminate(pos, constVal);
- return true;
+ return success();
}
void IntegerRelation::constantFoldVarRange(unsigned pos, unsigned num) {
for (unsigned s = pos, t = pos, e = pos + num; s < e; s++) {
- if (!constantFoldVar(t))
+ if (constantFoldVar(t).failed())
t++;
}
}
@@ -1944,9 +1945,9 @@ void IntegerRelation::fourierMotzkinEliminate(unsigned pos, bool darkShadow,
for (unsigned r = 0, e = getNumEqualities(); r < e; r++) {
if (atEq(r, pos) != 0) {
// Use Gaussian elimination here (since we have an equality).
- bool ret = gaussianEliminateVar(pos);
+ LogicalResult ret = gaussianEliminateVar(pos);
(void)ret;
- assert(ret && "Gaussian elimination guaranteed to succeed");
+ assert(ret.succeeded() && "Gaussian elimination guaranteed to succeed");
LLVM_DEBUG(llvm::dbgs() << "FM output (through Gaussian elimination):\n");
LLVM_DEBUG(dump());
return;
@@ -2173,7 +2174,8 @@ static void getCommonConstraints(const IntegerRelation &a,
// Computes the bounding box with respect to 'other' by finding the min of the
// lower bounds and the max of the upper bounds along each of the dimensions.
-bool IntegerRelation::unionBoundingBox(const IntegerRelation &otherCst) {
+LogicalResult
+IntegerRelation::unionBoundingBox(const IntegerRelation &otherCst) {
assert(space.isEqual(otherCst.getSpace()) && "Spaces should match.");
assert(getNumLocalVars() == 0 && "local ids not supported yet here");
@@ -2201,13 +2203,13 @@ bool IntegerRelation::unionBoundingBox(const IntegerRelation &otherCst) {
if (!extent.has_value())
// TODO: symbolic extents when necessary.
// TODO: handle union if a dimension is unbounded.
- return false;
+ return failure();
auto otherExtent = otherCst.getConstantBoundOnDimSize(
d, &otherLb, &otherLbFloorDivisor, &otherUb);
if (!otherExtent.has_value() || lbFloorDivisor != otherLbFloorDivisor)
// TODO: symbolic extents when necessary.
- return false;
+ return failure();
assert(lbFloorDivisor > 0 && "divisor always expected to be positive");
@@ -2227,7 +2229,7 @@ bool IntegerRelation::unionBoundingBox(const IntegerRelation &otherCst) {
auto constLb = getConstantBound(BoundType::LB, d);
auto constOtherLb = otherCst.getConstantBound(BoundType::LB, d);
if (!constLb.has_value() || !constOtherLb.has_value())
- return false;
+ return failure();
std::fill(minLb.begin(), minLb.end(), 0);
minLb.back() = std::min(*constLb, *constOtherLb);
}
@@ -2243,7 +2245,7 @@ bool IntegerRelation::unionBoundingBox(const IntegerRelation &otherCst) {
auto constUb = getConstantBound(BoundType::UB, d);
auto constOtherUb = otherCst.getConstantBound(BoundType::UB, d);
if (!constUb.has_value() || !constOtherUb.has_value())
- return false;
+ return failure();
std::fill(maxUb.begin(), maxUb.end(), 0);
maxUb.back() = std::max(*constUb, *constOtherUb);
}
@@ -2281,7 +2283,7 @@ bool IntegerRelation::unionBoundingBox(const IntegerRelation &otherCst) {
// union (since the above are just the union along dimensions); we shouldn't
// be discarding any other constraints on the symbols.
- return true;
+ return success();
}
bool IntegerRelation::isColZero(unsigned pos) const {
diff --git a/mlir/lib/Analysis/Presburger/PresburgerRelation.cpp b/mlir/lib/Analysis/Presburger/PresburgerRelation.cpp
index 5c4965c919ac3..e284ca82420ba 100644
--- a/mlir/lib/Analysis/Presburger/PresburgerRelation.cpp
+++ b/mlir/lib/Analysis/Presburger/PresburgerRelation.cpp
@@ -15,6 +15,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <functional>
@@ -753,18 +754,18 @@ class presburger::SetCoalescer {
/// \___\|/ \_____/
///
///
- bool coalescePairCutCase(unsigned i, unsigned j);
+ LogicalResult coalescePairCutCase(unsigned i, unsigned j);
/// Types the inequality `ineq` according to its `IneqType` for `simp` into
/// `redundantIneqsB` and `cuttingIneqsB`. Returns success, if no separate
/// inequalities were encountered. Otherwise, returns failure.
- bool typeInequality(ArrayRef<DynamicAPInt> ineq, Simplex &simp);
+ LogicalResult typeInequality(ArrayRef<DynamicAPInt> ineq, Simplex &simp);
/// Types the equality `eq`, i.e. for `eq` == 0, types both `eq` >= 0 and
/// -`eq` >= 0 according to their `IneqType` for `simp` into
/// `redundantIneqsB` and `cuttingIneqsB`. Returns success, if no separate
/// inequalities were encountered. Otherwise, returns failure.
- bool typeEquality(ArrayRef<DynamicAPInt> eq, Simplex &simp);
+ LogicalResult typeEquality(ArrayRef<DynamicAPInt> eq, Simplex &simp);
/// Replaces the element at position `i` with the last element and erases
/// the last element for both `disjuncts` and `simplices`.
@@ -775,7 +776,7 @@ class presburger::SetCoalescer {
/// successfully coalesced. The simplices in `simplices` need to be the ones
/// constructed from `disjuncts`. At this point, there are no empty
/// disjuncts in `disjuncts` left.
- bool coalescePair(unsigned i, unsigned j);
+ LogicalResult coalescePair(unsigned i, unsigned j);
};
/// Constructs a `SetCoalescer` from a `PresburgerRelation`. Only adds non-empty
@@ -818,7 +819,7 @@ PresburgerRelation SetCoalescer::coalesce() {
cuttingIneqsB.clear();
if (i == j)
continue;
- if (coalescePair(i, j)) {
+ if (coalescePair(i, j).succeeded()) {
broken = true;
break;
}
@@ -902,7 +903,7 @@ void SetCoalescer::addCoalescedDisjunct(unsigned i, unsigned j,
/// \___\|/ \_____/
///
///
-bool SetCoalescer::coalescePairCutCase(unsigned i, unsigned j) {
+LogicalResult SetCoalescer::coalescePairCutCase(unsigned i, unsigned j) {
/// All inequalities of `b` need to be redundant. We already know that the
/// redundant ones are, so only the cutting ones remain to be checked.
Simplex &simp = simplices[i];
@@ -910,7 +911,7 @@ bool SetCoalescer::coalescePairCutCase(unsigned i, unsigned j) {
if (llvm::any_of(cuttingIneqsA, [this, &simp](ArrayRef<DynamicAPInt> curr) {
return !isFacetContained(curr, simp);
}))
- return false;
+ return failure();
IntegerRelation newSet(disjunct.getSpace());
for (ArrayRef<DynamicAPInt> curr : redundantIneqsA)
@@ -920,23 +921,25 @@ bool SetCoalescer::coalescePairCutCase(unsigned i, unsigned j) {
newSet.addInequality(curr);
addCoalescedDisjunct(i, j, newSet);
- return true;
+ return success();
}
-bool SetCoalescer::typeInequality(ArrayRef<DynamicAPInt> ineq, Simplex &simp) {
+LogicalResult SetCoalescer::typeInequality(ArrayRef<DynamicAPInt> ineq,
+ Simplex &simp) {
Simplex::IneqType type = simp.findIneqType(ineq);
if (type == Simplex::IneqType::Redundant)
redundantIneqsB.push_back(ineq);
else if (type == Simplex::IneqType::Cut)
cuttingIneqsB.push_back(ineq);
else
- return false;
- return true;
+ return failure();
+ return success();
}
-bool SetCoalescer::typeEquality(ArrayRef<DynamicAPInt> eq, Simplex &simp) {
- if (!typeInequality(eq, simp))
- return false;
+LogicalResult SetCoalescer::typeEquality(ArrayRef<DynamicAPInt> eq,
+ Simplex &simp) {
+ if (typeInequality(eq, simp).failed())
+ return failure();
negEqs.push_back(getNegatedCoeffs(eq));
ArrayRef<DynamicAPInt> inv(negEqs.back());
return typeInequality(inv, simp);
@@ -951,7 +954,7 @@ void SetCoalescer::eraseDisjunct(unsigned i) {
simplices.pop_back();
}
-bool SetCoalescer::coalescePair(unsigned i, unsigned j) {
+LogicalResult SetCoalescer::coalescePair(unsigned i, unsigned j) {
IntegerRelation &a = disjuncts[i];
IntegerRelation &b = disjuncts[j];
@@ -959,7 +962,7 @@ bool SetCoalescer::coalescePair(unsigned i, unsigned j) {
/// skipped.
/// TODO: implement local id support.
if (a.getNumLocalVars() != 0 || b.getNumLocalVars() != 0)
- return false;
+ return failure();
Simplex &simpA = simplices[i];
Simplex &simpB = simplices[j];
@@ -969,34 +972,34 @@ bool SetCoalescer::coalescePair(unsigned i, unsigned j) {
// inequality is encountered during typing, the two IntegerRelations
// cannot be coalesced.
for (int k = 0, e = a.getNumInequalities(); k < e; ++k)
- if (!typeInequality(a.getInequality(k), simpB))
- return false;
+ if (typeInequality(a.getInequality(k), simpB).failed())
+ return failure();
for (int k = 0, e = a.getNumEqualities(); k < e; ++k)
- if (!typeEquality(a.getEquality(k), simpB))
- return false;
+ if (typeEquality(a.getEquality(k), simpB).failed())
+ return failure();
std::swap(redundantIneqsA, redundantIneqsB);
std::swap(cuttingIneqsA, cuttingIneqsB);
for (int k = 0, e = b.getNumInequalities(); k < e; ++k)
- if (!typeInequality(b.getInequality(k), simpA))
- return false;
+ if (typeInequality(b.getInequality(k), simpA).failed())
+ return failure();
for (int k = 0, e = b.getNumEqualities(); k < e; ++k)
- if (!typeEquality(b.getEquality(k), simpA))
- return false;
+ if (typeEquality(b.getEquality(k), simpA).failed())
+ return failure();
// If there are no cutting inequalities of `a`, `b` is contained
// within `a`.
if (cuttingIneqsA.empty()) {
eraseDisjunct(j);
- return true;
+ return success();
}
// Try to apply the cut case
- if (coalescePairCutCase(i, j))
- return true;
+ if (coalescePairCutCase(i, j).succeeded())
+ return success();
// Swap the vectors to compare the pair (j,i) instead of (i,j).
std::swap(redundantIneqsA, redundantIneqsB);
@@ -1006,7 +1009,7 @@ bool SetCoalescer::coalescePair(unsigned i, unsigned j) {
// within `a`.
if (cuttingIneqsA.empty()) {
eraseDisjunct(i);
- return true;
+ return success();
}
// Try to apply the cut case
diff --git a/mlir/lib/Analysis/Presburger/Simplex.cpp b/mlir/lib/Analysis/Presburger/Simplex.cpp
index 4efc7a3755014..bebbf0325f430 100644
--- a/mlir/lib/Analysis/Presburger/Simplex.cpp
+++ b/mlir/lib/Analysis/Presburger/Simplex.cpp
@@ -17,6 +17,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <functional>
@@ -229,7 +230,7 @@ Direction flippedDirection(Direction direction) {
/// add these to the set of ignored columns and continue to the next row. If we
/// run out of rows, then A*y is zero and we are done.
MaybeOptimum<SmallVector<Fraction, 8>> LexSimplex::findRationalLexMin() {
- if (!restoreRationalConsistency()) {
+ if (restoreRationalConsistency().failed()) {
markEmpty();
return OptimumKind::Empty;
}
@@ -274,7 +275,7 @@ MaybeOptimum<SmallVector<Fraction, 8>> LexSimplex::findRationalLexMin() {
///
/// The constraint is violated when added (it would be useless otherwise)
/// so we immediately try to move it to a column.
-bool LexSimplexBase::addCut(unsigned row) {
+LogicalResult LexSimplexBase::addCut(unsigned row) {
DynamicAPInt d = tableau(row, 0);
unsigned cutRow = addZeroRow(/*makeRestricted=*/true);
tableau(cutRow, 0) = d;
@@ -301,7 +302,7 @@ std::optional<unsigned> LexSimplex::maybeGetNonIntegralVarRow() const {
MaybeOptimum<SmallVector<DynamicAPInt, 8>> LexSimplex::findIntegerLexMin() {
// We first try to make the tableau consistent.
- if (!restoreRationalConsistency())
+ if (restoreRationalConsistency().failed())
return OptimumKind::Empty;
// Then, if the sample value is integral, we are done.
@@ -316,9 +317,9 @@ MaybeOptimum<SmallVector<DynamicAPInt, 8>> LexSimplex::findIntegerLexMin() {
//
// Failure indicates that the tableau became empty, which occurs when the
// polytope is integer empty.
- if (!addCut(*maybeRow))
+ if (addCut(*maybeRow).failed())
return OptimumKind::Empty;
- if (!restoreRationalConsistency())
+ if (restoreRationalConsistency().failed())
return OptimumKind::Empty;
}
@@ -411,7 +412,7 @@ bool SymbolicLexSimplex::isSymbolicSampleIntegral(unsigned row) const {
/// (sum_i (b_i%d)y_i - (-c%d) - sum_i (-a_i%d)s_i + q*d)/d >= 0
/// This constraint is violated when added so we immediately try to move it to a
/// column.
-bool SymbolicLexSimplex::addSymbolicCut(unsigned row) {
+LogicalResult SymbolicLexSimplex::addSymbolicCut(unsigned row) {
DynamicAPInt d = tableau(row, 0);
if (isRangeDivisibleBy(tableau.getRow(row).slice(3, nSymbol), d)) {
// The coefficients of symbols in the symbol numerator are divisible
@@ -523,11 +524,11 @@ std::optional<unsigned> SymbolicLexSimplex::maybeGetNonIntegralVarRow() {
/// The non-branching pivots are just the ones moving the rows
/// that are always violated in the symbol domain.
-bool SymbolicLexSimplex::doNonBranchingPivots() {
+LogicalResult SymbolicLexSimplex::doNonBranchingPivots() {
while (std::optional<unsigned> row = maybeGetAlwaysViolatedRow())
- if (!moveRowUnknownToColumn(*row))
- return false;
- return true;
+ if (moveRowUnknownToColumn(*row).failed())
+ return failure();
+ return success();
}
SymbolicLexOpt SymbolicLexSimplex::computeSymbolicIntegerLexMin() {
@@ -567,7 +568,7 @@ SymbolicLexOpt SymbolicLexSimplex::computeSymbolicIntegerLexMin() {
continue;
}
- if (!doNonBranchingPivots()) {
+ if (doNonBranchingPivots().failed()) {
// Could not find pivots for violated constraints; return.
--level;
continue;
@@ -627,7 +628,7 @@ SymbolicLexOpt SymbolicLexSimplex::computeSymbolicIntegerLexMin() {
// The tableau is rationally consistent for the current domain.
// Now we look for non-integral sample values and add cuts for them.
if (std::optional<unsigned> row = maybeGetNonIntegralVarRow()) {
- if (!addSymbolicCut(*row)) {
+ if (addSymbolicCut(*row).failed()) {
// No integral points; return.
--level;
continue;
@@ -661,7 +662,7 @@ SymbolicLexOpt SymbolicLexSimplex::computeSymbolicIntegerLexMin() {
SmallVector<DynamicAPInt, 8> splitIneq =
getComplementIneq(getSymbolicSampleIneq(u.pos));
normalizeRange(splitIneq);
- if (!moveRowUnknownToColumn(u.pos)) {
+ if (moveRowUnknownToColumn(u.pos).failed()) {
// The unknown can't be made non-negative; return.
--level;
continue;
@@ -699,13 +700,13 @@ std::optional<unsigned> LexSimplex::maybeGetViolatedRow() const {
/// We simply look for violated rows and keep trying to move them to column
/// orientation, which always succeeds unless the constraints have no solution
/// in which case we just give up and return.
-bool LexSimplex::restoreRationalConsistency() {
+LogicalResult LexSimplex::restoreRationalConsistency() {
if (empty)
- return false;
+ return failure();
while (std::optional<unsigned> maybeViolatedRow = maybeGetViolatedRow())
- if (!moveRowUnknownToColumn(*maybeViolatedRow))
- return false;
- return true;
+ if (moveRowUnknownToColumn(*maybeViolatedRow).failed())
+ return failure();
+ return success();
}
// Move the row unknown to column orientation while preserving lexicopositivity
@@ -770,7 +771,7 @@ bool LexSimplex::restoreRationalConsistency() {
// which is in contradiction to the fact that B.col(j) / B(i,j) must be
// lexicographically smaller than B.col(k) / B(i,k), since it lexicographically
// minimizes the change in sample value.
-bool LexSimplexBase::moveRowUnknownToColumn(unsigned row) {
+LogicalResult LexSimplexBase::moveRowUnknownToColumn(unsigned row) {
std::optional<unsigned> maybeColumn;
for (unsigned col = 3 + nSymbol, e = getNumColumns(); col < e; ++col) {
if (tableau(row, col) <= 0)
@@ -780,10 +781,10 @@ bool LexSimplexBase::moveRowUnknownToColumn(unsigned row) {
}
if (!maybeColumn)
- return false;
+ return failure();
pivot(row, *maybeColumn);
- return true;
+ return success();
}
unsigned LexSimplexBase::getLexMinPivotColumn(unsigned row, unsigned colA,
@@ -986,7 +987,7 @@ void SimplexBase::pivot(unsigned pivotRow, unsigned pivotCol) {
/// Perform pivots until the unknown has a non-negative sample value or until
/// no more upward pivots can be performed. Return success if we were able to
/// bring the row to a non-negative sample value, and failure otherwise.
-bool Simplex::restoreRow(Unknown &u) {
+LogicalResult Simplex::restoreRow(Unknown &u) {
assert(u.orientation == Orientation::Row &&
"unknown should be in row position");
@@ -997,9 +998,9 @@ bool Simplex::restoreRow(Unknown &u) {
pivot(*maybePivot);
if (u.orientation == Orientation::Column)
- return true; // the unknown is unbounded above.
+ return success(); // the unknown is unbounded above.
}
- return tableau(u.pos, 1) >= 0;
+ return success(tableau(u.pos, 1) >= 0);
}
/// Find a row that can be used to pivot the column in the specified direction.
@@ -1105,8 +1106,8 @@ void SimplexBase::markEmpty() {
/// empty and we mark it as such.
void Simplex::addInequality(ArrayRef<DynamicAPInt> coeffs) {
unsigned conIndex = addRow(coeffs, /*makeRestricted=*/true);
- bool result = restoreRow(con[conIndex]);
- if (!result)
+ LogicalResult result = restoreRow(con[conIndex]);
+ if (result.failed())
markEmpty();
}
@@ -1384,7 +1385,7 @@ MaybeOptimum<Fraction> Simplex::computeOptimum(Direction direction,
MaybeOptimum<Fraction> optimum = computeRowOptimum(direction, row);
if (u.restricted && direction == Direction::Down &&
(optimum.isUnbounded() || *optimum < Fraction(0, 1))) {
- if (!restoreRow(u))
+ if (restoreRow(u).failed())
llvm_unreachable("Could not restore row!");
}
return optimum;
@@ -1453,7 +1454,7 @@ void Simplex::detectRedundant(unsigned offset, unsigned count) {
if (minimum.isUnbounded() || *minimum < Fraction(0, 1)) {
// Constraint is unbounded below or can attain negative sample values and
// hence is not redundant.
- if (!restoreRow(u))
+ if (restoreRow(u).failed())
llvm_unreachable("Could not restore non-redundant row!");
continue;
}
diff --git a/mlir/lib/Analysis/Presburger/Utils.cpp b/mlir/lib/Analysis/Presburger/Utils.cpp
index 65190c6f07d4b..9b32972de2e0a 100644
--- a/mlir/lib/Analysis/Presburger/Utils.cpp
+++ b/mlir/lib/Analysis/Presburger/Utils.cpp
@@ -15,6 +15,7 @@
#include "mlir/Analysis/Presburger/PresburgerSpace.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdint>
@@ -95,10 +96,10 @@ static void normalizeDivisionByGCD(MutableArrayRef<DynamicAPInt> dividend,
/// If successful, `expr` is set to dividend of the division and `divisor` is
/// set to the denominator of the division, which will be positive.
/// The final division expression is normalized by GCD.
-static bool getDivRepr(const IntegerRelation &cst, unsigned pos,
- unsigned ubIneq, unsigned lbIneq,
- MutableArrayRef<DynamicAPInt> expr,
- DynamicAPInt &divisor) {
+static LogicalResult getDivRepr(const IntegerRelation &cst, unsigned pos,
+ unsigned ubIneq, unsigned lbIneq,
+ MutableArrayRef<DynamicAPInt> expr,
+ DynamicAPInt &divisor) {
assert(pos <= cst.getNumVars() && "Invalid variable position");
assert(ubIneq <= cst.getNumInequalities() &&
@@ -120,7 +121,7 @@ static bool getDivRepr(const IntegerRelation &cst, unsigned pos,
break;
if (i < e)
- return false;
+ return failure();
// Then, check if the constant term is of the proper form.
// Due to the form of the upper/lower bound inequalities, the sum of their
@@ -132,7 +133,7 @@ static bool getDivRepr(const IntegerRelation &cst, unsigned pos,
// Check if `c` satisfies the condition `0 <= c <= divisor - 1`.
// This also implictly checks that `divisor` is positive.
if (!(0 <= c && c <= divisor - 1)) // NOLINT
- return false;
+ return failure();
// The inequality pair can be used to extract the division.
// Set `expr` to the dividend of the division except the constant term, which
@@ -147,7 +148,7 @@ static bool getDivRepr(const IntegerRelation &cst, unsigned pos,
expr.back() = cst.atIneq(ubIneq, cst.getNumCols() - 1) + c;
normalizeDivisionByGCD(expr, divisor);
- return true;
+ return success();
}
/// Check if the pos^th variable can be represented as a division using
@@ -161,9 +162,10 @@ static bool getDivRepr(const IntegerRelation &cst, unsigned pos,
/// If successful, `expr` is set to dividend of the division and `divisor` is
/// set to the denominator of the division. The final division expression is
/// normalized by GCD.
-static bool getDivRepr(const IntegerRelation &cst, unsigned pos, unsigned eqInd,
- MutableArrayRef<DynamicAPInt> expr,
- DynamicAPInt &divisor) {
+static LogicalResult getDivRepr(const IntegerRelation &cst, unsigned pos,
+ unsigned eqInd,
+ MutableArrayRef<DynamicAPInt> expr,
+ DynamicAPInt &divisor) {
assert(pos <= cst.getNumVars() && "Invalid variable position");
assert(eqInd <= cst.getNumEqualities() && "Invalid equality position");
@@ -174,7 +176,7 @@ static bool getDivRepr(const IntegerRelation &cst, unsigned pos, unsigned eqInd,
// Equality must involve the pos-th variable and hence `tempDiv` != 0.
DynamicAPInt tempDiv = cst.atEq(eqInd, pos);
if (tempDiv == 0)
- return false;
+ return failure();
int signDiv = tempDiv < 0 ? -1 : 1;
// The divisor is always a positive integer.
@@ -187,7 +189,7 @@ static bool getDivRepr(const IntegerRelation &cst, unsigned pos, unsigned eqInd,
expr.back() = -signDiv * cst.atEq(eqInd, cst.getNumCols() - 1);
normalizeDivisionByGCD(expr, divisor);
- return true;
+ return success();
}
// Returns `false` if the constraints depends on a variable for which an
@@ -238,7 +240,7 @@ MaybeLocalRepr presburger::computeSingleVarRepr(
for (unsigned ubPos : ubIndices) {
for (unsigned lbPos : lbIndices) {
// Attempt to get divison representation from ubPos, lbPos.
- if (!getDivRepr(cst, pos, ubPos, lbPos, dividend, divisor))
+ if (getDivRepr(cst, pos, ubPos, lbPos, dividend, divisor).failed())
continue;
if (!checkExplicitRepresentation(cst, foundRepr, dividend, pos))
@@ -251,7 +253,7 @@ MaybeLocalRepr presburger::computeSingleVarRepr(
}
for (unsigned eqPos : eqIndices) {
// Attempt to get divison representation from eqPos.
- if (!getDivRepr(cst, pos, eqPos, dividend, divisor))
+ if (getDivRepr(cst, pos, eqPos, dividend, divisor).failed())
continue;
if (!checkExplicitRepresentation(cst, foundRepr, dividend, pos))
>From da24d3a79d73c725d1b672263e558a3de6cbcde9 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Wed, 3 Jul 2024 02:53:46 -0700
Subject: [PATCH 099/246] [AsmParser] Use range-based for loops (NFC) (#97499)
---
llvm/lib/AsmParser/LLParser.cpp | 71 ++++++++++++++++-----------------
1 file changed, 35 insertions(+), 36 deletions(-)
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 059d42d9faa7f..87cd8aac8d035 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -3293,17 +3293,16 @@ bool LLParser::parseFunctionType(Type *&Result) {
return true;
// Reject names on the arguments lists.
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
- if (!ArgList[i].Name.empty())
- return error(ArgList[i].Loc, "argument name invalid in function type");
- if (ArgList[i].Attrs.hasAttributes())
- return error(ArgList[i].Loc,
- "argument attributes invalid in function type");
+ for (const ArgInfo &Arg : ArgList) {
+ if (!Arg.Name.empty())
+ return error(Arg.Loc, "argument name invalid in function type");
+ if (Arg.Attrs.hasAttributes())
+ return error(Arg.Loc, "argument attributes invalid in function type");
}
SmallVector<Type*, 16> ArgListTy;
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i)
- ArgListTy.push_back(ArgList[i].Ty);
+ for (const ArgInfo &Arg : ArgList)
+ ArgListTy.push_back(Arg.Ty);
Result = FunctionType::get(Result, ArgListTy, IsVarArg);
return false;
@@ -6404,9 +6403,9 @@ bool LLParser::parseFunctionHeader(Function *&Fn, bool IsDefine,
std::vector<Type*> ParamTypeList;
SmallVector<AttributeSet, 8> Attrs;
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
- ParamTypeList.push_back(ArgList[i].Ty);
- Attrs.push_back(ArgList[i].Attrs);
+ for (const ArgInfo &Arg : ArgList) {
+ ParamTypeList.push_back(Arg.Ty);
+ Attrs.push_back(Arg.Attrs);
}
AttributeList PAL =
@@ -7230,8 +7229,8 @@ bool LLParser::parseIndirectBr(Instruction *&Inst, PerFunctionState &PFS) {
return true;
IndirectBrInst *IBI = IndirectBrInst::Create(Address, DestList.size());
- for (unsigned i = 0, e = DestList.size(); i != e; ++i)
- IBI->addDestination(DestList[i]);
+ for (BasicBlock *Dest : DestList)
+ IBI->addDestination(Dest);
Inst = IBI;
return false;
}
@@ -7246,8 +7245,8 @@ bool LLParser::resolveFunctionType(Type *RetType,
if (!FuncTy) {
// Pull out the types of all of the arguments...
std::vector<Type*> ParamTypes;
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i)
- ParamTypes.push_back(ArgList[i].V->getType());
+ for (const ParamInfo &Arg : ArgList)
+ ParamTypes.push_back(Arg.V->getType());
if (!FunctionType::isValidReturnType(RetType))
return true;
@@ -7310,19 +7309,19 @@ bool LLParser::parseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
// correctly. Also, gather any parameter attributes.
FunctionType::param_iterator I = Ty->param_begin();
FunctionType::param_iterator E = Ty->param_end();
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
+ for (const ParamInfo &Arg : ArgList) {
Type *ExpectedTy = nullptr;
if (I != E) {
ExpectedTy = *I++;
} else if (!Ty->isVarArg()) {
- return error(ArgList[i].Loc, "too many arguments specified");
+ return error(Arg.Loc, "too many arguments specified");
}
- if (ExpectedTy && ExpectedTy != ArgList[i].V->getType())
- return error(ArgList[i].Loc, "argument is not of expected type '" +
- getTypeString(ExpectedTy) + "'");
- Args.push_back(ArgList[i].V);
- ArgAttrs.push_back(ArgList[i].Attrs);
+ if (ExpectedTy && ExpectedTy != Arg.V->getType())
+ return error(Arg.Loc, "argument is not of expected type '" +
+ getTypeString(ExpectedTy) + "'");
+ Args.push_back(Arg.V);
+ ArgAttrs.push_back(Arg.Attrs);
}
if (I != E)
@@ -7623,19 +7622,19 @@ bool LLParser::parseCallBr(Instruction *&Inst, PerFunctionState &PFS) {
// correctly. Also, gather any parameter attributes.
FunctionType::param_iterator I = Ty->param_begin();
FunctionType::param_iterator E = Ty->param_end();
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
+ for (const ParamInfo &Arg : ArgList) {
Type *ExpectedTy = nullptr;
if (I != E) {
ExpectedTy = *I++;
} else if (!Ty->isVarArg()) {
- return error(ArgList[i].Loc, "too many arguments specified");
+ return error(Arg.Loc, "too many arguments specified");
}
- if (ExpectedTy && ExpectedTy != ArgList[i].V->getType())
- return error(ArgList[i].Loc, "argument is not of expected type '" +
- getTypeString(ExpectedTy) + "'");
- Args.push_back(ArgList[i].V);
- ArgAttrs.push_back(ArgList[i].Attrs);
+ if (ExpectedTy && ExpectedTy != Arg.V->getType())
+ return error(Arg.Loc, "argument is not of expected type '" +
+ getTypeString(ExpectedTy) + "'");
+ Args.push_back(Arg.V);
+ ArgAttrs.push_back(Arg.Attrs);
}
if (I != E)
@@ -8018,19 +8017,19 @@ bool LLParser::parseCall(Instruction *&Inst, PerFunctionState &PFS,
// correctly. Also, gather any parameter attributes.
FunctionType::param_iterator I = Ty->param_begin();
FunctionType::param_iterator E = Ty->param_end();
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
+ for (const ParamInfo &Arg : ArgList) {
Type *ExpectedTy = nullptr;
if (I != E) {
ExpectedTy = *I++;
} else if (!Ty->isVarArg()) {
- return error(ArgList[i].Loc, "too many arguments specified");
+ return error(Arg.Loc, "too many arguments specified");
}
- if (ExpectedTy && ExpectedTy != ArgList[i].V->getType())
- return error(ArgList[i].Loc, "argument is not of expected type '" +
- getTypeString(ExpectedTy) + "'");
- Args.push_back(ArgList[i].V);
- Attrs.push_back(ArgList[i].Attrs);
+ if (ExpectedTy && ExpectedTy != Arg.V->getType())
+ return error(Arg.Loc, "argument is not of expected type '" +
+ getTypeString(ExpectedTy) + "'");
+ Args.push_back(Arg.V);
+ Attrs.push_back(Arg.Attrs);
}
if (I != E)
>From 2b3376f35340d86d766dc8007534f137bf93aed3 Mon Sep 17 00:00:00 2001
From: Allen <zhongyunde at huawei.com>
Date: Wed, 3 Jul 2024 18:35:34 +0800
Subject: [PATCH 100/246] [InstCombine] Guard noundef for transformation from
xor to or disjoint (#96905)
Fix https://github.com/llvm/llvm-project/issues/96857
---
.../InstCombine/InstCombineAndOrXor.cpp | 8 +++--
llvm/test/Transforms/InstCombine/xor.ll | 32 +++++++++++++++++++
2 files changed, 38 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index b8664089c36ce..7f52a3ea95f73 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -4616,8 +4616,12 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
Value *M;
if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(M)), m_Value()),
- m_c_And(m_Deferred(M), m_Value()))))
- return BinaryOperator::CreateDisjointOr(Op0, Op1);
+ m_c_And(m_Deferred(M), m_Value())))) {
+ if (isGuaranteedNotToBeUndef(M))
+ return BinaryOperator::CreateDisjointOr(Op0, Op1);
+ else
+ return BinaryOperator::CreateOr(Op0, Op1);
+ }
if (Instruction *Xor = visitMaskedMerge(I, Builder))
return Xor;
diff --git a/llvm/test/Transforms/InstCombine/xor.ll b/llvm/test/Transforms/InstCombine/xor.ll
index 9a59db40ef8b1..2ff95821f4e00 100644
--- a/llvm/test/Transforms/InstCombine/xor.ll
+++ b/llvm/test/Transforms/InstCombine/xor.ll
@@ -1453,3 +1453,35 @@ define i32 @tryFactorization_xor_ashr_ashr(i32 %a) {
%xor = xor i32 %not, %shr1
ret i32 %xor
}
+
+; https://alive2.llvm.org/ce/z/SOxv-e
+define i4 @PR96857_xor_with_noundef(i4 %val0, i4 %val1, i4 noundef %val2) {
+; CHECK-LABEL: @PR96857_xor_with_noundef(
+; CHECK-NEXT: [[VAL4:%.*]] = and i4 [[VAL2:%.*]], [[VAL0:%.*]]
+; CHECK-NEXT: [[VAL5:%.*]] = xor i4 [[VAL2]], -1
+; CHECK-NEXT: [[VAL6:%.*]] = and i4 [[VAL5]], [[VAL1:%.*]]
+; CHECK-NEXT: [[VAL7:%.*]] = or disjoint i4 [[VAL4]], [[VAL6]]
+; CHECK-NEXT: ret i4 [[VAL7]]
+;
+ %val4 = and i4 %val2, %val0
+ %val5 = xor i4 %val2, -1
+ %val6 = and i4 %val5, %val1
+ %val7 = xor i4 %val4, %val6
+ ret i4 %val7
+}
+
+; https://alive2.llvm.org/ce/z/whLTaJ
+define i4 @PR96857_xor_without_noundef(i4 %val0, i4 %val1, i4 %val2) {
+; CHECK-LABEL: @PR96857_xor_without_noundef(
+; CHECK-NEXT: [[VAL4:%.*]] = and i4 [[VAL2:%.*]], [[VAL0:%.*]]
+; CHECK-NEXT: [[VAL5:%.*]] = xor i4 [[VAL2]], -1
+; CHECK-NEXT: [[VAL6:%.*]] = and i4 [[VAL5]], [[VAL1:%.*]]
+; CHECK-NEXT: [[VAL7:%.*]] = or i4 [[VAL4]], [[VAL6]]
+; CHECK-NEXT: ret i4 [[VAL7]]
+;
+ %val4 = and i4 %val2, %val0
+ %val5 = xor i4 %val2, -1
+ %val6 = and i4 %val5, %val1
+ %val7 = xor i4 %val4, %val6
+ ret i4 %val7
+}
>From b76dd4edbfbba5593b691ef92b755f25cf63f445 Mon Sep 17 00:00:00 2001
From: Jay Foad <jay.foad at amd.com>
Date: Wed, 3 Jul 2024 11:35:51 +0100
Subject: [PATCH 101/246] [AMDGPU] Disable atomic optimization of fadd/fsub
with result (#96479)
An atomic fadd instruction like this should return %x:
; value at %ptr is %x
%r = atomicrmw fadd ptr %ptr, float %y
After atomic optimization, if %y is uniform, the result is calculated
as %r = %x + * %y * +0.0. This has a couple of problems:
1. If %y is Inf or NaN, this will return NaN instead of %x.
2. If %x is -0.0 and %y is positive, this will return +0.0 instead of
-0.0.
Avoid these problems by disabling the "%y is uniform" path if there are
any uses of the result.
---
.../Target/AMDGPU/AMDGPUAtomicOptimizer.cpp | 15 +-
.../GlobalISel/global-atomic-fadd.f32-rtn.ll | 188 ++-
.../AMDGPU/global-atomic-fadd.f32-rtn.ll | 160 ++-
.../global-atomicrmw-fadd-wrong-subtarget.ll | 59 +-
.../AMDGPU/global_atomic_optimizer_fp_rtn.ll | 758 +++++-----
.../CodeGen/AMDGPU/local-atomicrmw-fadd.ll | 1224 +++++++++--------
6 files changed, 1394 insertions(+), 1010 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
index 08c1cd56337aa..54968be677a37 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -224,7 +224,14 @@ void AMDGPUAtomicOptimizerImpl::visitAtomicRMWInst(AtomicRMWInst &I) {
return;
}
- const bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx));
+ bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx));
+
+ if ((Op == AtomicRMWInst::FAdd || Op == AtomicRMWInst::FSub) &&
+ !I.use_empty()) {
+ // Disable the uniform return value calculation using fmul because it
+ // mishandles infinities, NaNs and signed zeros. FIXME.
+ ValDivergent = true;
+ }
// If the value operand is divergent, each lane is contributing a different
// value to the atomic calculation. We can only optimize divergent values if
@@ -988,6 +995,12 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
break;
case AtomicRMWInst::FAdd:
case AtomicRMWInst::FSub: {
+ // FIXME: This path is currently disabled in visitAtomicRMWInst because
+ // of problems calculating the first active lane of the result (where
+ // Mbcnt is 0):
+ // - If V is infinity or NaN we will return NaN instead of BroadcastI.
+ // - If BroadcastI is -0.0 and V is positive we will return +0.0 instead
+ // of -0.0.
LaneOffset = B.CreateFMul(V, Mbcnt);
break;
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll
index e48d281f37c9a..077aff46839a6 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
-; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefixes=GFX90A_GFX940,GFX90A %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefixes=GFX90A_GFX940,GFX940 %s
; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX11 %s
; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX11 %s
@@ -149,6 +149,190 @@ define amdgpu_ps float @global_atomic_fadd_f32_rtn_atomicrmw(ptr addrspace(1) %p
}
define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace(1) inreg %ptr, float %data) #0 {
+ ; GFX90A-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw
+ ; GFX90A: bb.1 (%ir-block.0):
+ ; GFX90A-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
+ ; GFX90A-NEXT: liveins: $sgpr0, $sgpr1, $vgpr0
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX90A-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX90A-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX90A-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX90A-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GFX90A-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64_xexec = SI_PS_LIVE
+ ; GFX90A-NEXT: [[SI_IF:%[0-9]+]]:sreg_64_xexec = SI_IF [[SI_PS_LIVE]], %bb.4, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX90A-NEXT: S_BRANCH %bb.2
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: bb.2 (%ir-block.5):
+ ; GFX90A-NEXT: successors: %bb.3(0x40000000), %bb.5(0x40000000)
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY $exec
+ ; GFX90A-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub0
+ ; GFX90A-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub0
+ ; GFX90A-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub1
+ ; GFX90A-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX90A-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1
+ ; GFX90A-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0
+ ; GFX90A-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX90A-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+ ; GFX90A-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]]
+ ; GFX90A-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY8]], [[COPY9]], implicit $exec
+ ; GFX90A-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; GFX90A-NEXT: [[V_MBCNT_HI_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_HI_U32_B32_e64 [[COPY10]], [[V_MBCNT_LO_U32_B32_e64_]], implicit $exec
+ ; GFX90A-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648
+ ; GFX90A-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
+ ; GFX90A-NEXT: [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY2]], [[COPY11]], implicit-def dead $scc, implicit $exec
+ ; GFX90A-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
+ ; GFX90A-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY12]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY13]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY14]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY15]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY16]], [[V_ADD_F32_e64_3]], 322, 10, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY17]], [[V_ADD_F32_e64_4]], 323, 12, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_4]], 0, [[V_MOV_B32_dpp5]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp6:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY18]], [[V_ADD_F32_e64_5]], 312, 15, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 63
+ ; GFX90A-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_5]], [[S_MOV_B32_4]]
+ ; GFX90A-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[V_READLANE_B32_]]
+ ; GFX90A-NEXT: [[STRICT_WWM:%[0-9]+]]:vgpr_32 = STRICT_WWM [[COPY19]], implicit $exec
+ ; GFX90A-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]]
+ ; GFX90A-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_MBCNT_HI_U32_B32_e64_]], [[COPY20]], implicit $exec
+ ; GFX90A-NEXT: [[SI_IF1:%[0-9]+]]:sreg_64_xexec = SI_IF [[V_CMP_EQ_U32_e64_]], %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX90A-NEXT: S_BRANCH %bb.3
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: bb.3 (%ir-block.36):
+ ; GFX90A-NEXT: successors: %bb.5(0x80000000)
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; GFX90A-NEXT: [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN [[V_MOV_B32_e32_]], [[STRICT_WWM]], [[REG_SEQUENCE]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
+ ; GFX90A-NEXT: S_BRANCH %bb.5
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: bb.4.Flow:
+ ; GFX90A-NEXT: successors: %bb.6(0x80000000)
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI %43, %bb.5, [[DEF]], %bb.1
+ ; GFX90A-NEXT: SI_END_CF [[SI_IF]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX90A-NEXT: S_BRANCH %bb.6
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: bb.5 (%ir-block.39):
+ ; GFX90A-NEXT: successors: %bb.4(0x80000000)
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]], %bb.3, [[DEF]], %bb.2
+ ; GFX90A-NEXT: SI_END_CF [[SI_IF1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX90A-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[PHI1]], implicit $exec
+ ; GFX90A-NEXT: [[STRICT_WWM1:%[0-9]+]]:vgpr_32 = STRICT_WWM [[V_MOV_B32_dpp6]], implicit $exec
+ ; GFX90A-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_6:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY21]], 0, [[STRICT_WWM1]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: S_BRANCH %bb.4
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: bb.6 (%ir-block.46):
+ ; GFX90A-NEXT: $vgpr0 = COPY [[PHI]]
+ ; GFX90A-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX940-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw
+ ; GFX940: bb.1 (%ir-block.0):
+ ; GFX940-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
+ ; GFX940-NEXT: liveins: $sgpr0, $sgpr1, $vgpr0
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX940-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX940-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX940-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GFX940-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64_xexec = SI_PS_LIVE
+ ; GFX940-NEXT: [[SI_IF:%[0-9]+]]:sreg_64_xexec = SI_IF [[SI_PS_LIVE]], %bb.4, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX940-NEXT: S_BRANCH %bb.2
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: bb.2 (%ir-block.5):
+ ; GFX940-NEXT: successors: %bb.3(0x40000000), %bb.5(0x40000000)
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY $exec
+ ; GFX940-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub0
+ ; GFX940-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub0
+ ; GFX940-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub1
+ ; GFX940-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX940-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1
+ ; GFX940-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0
+ ; GFX940-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX940-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+ ; GFX940-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]]
+ ; GFX940-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY8]], [[COPY9]], implicit $exec
+ ; GFX940-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; GFX940-NEXT: [[V_MBCNT_HI_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_HI_U32_B32_e64 [[COPY10]], [[V_MBCNT_LO_U32_B32_e64_]], implicit $exec
+ ; GFX940-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648
+ ; GFX940-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
+ ; GFX940-NEXT: [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY2]], [[COPY11]], implicit-def dead $scc, implicit $exec
+ ; GFX940-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
+ ; GFX940-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX940-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY12]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX940-NEXT: [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY13]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX940-NEXT: [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY14]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX940-NEXT: [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY15]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX940-NEXT: [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY16]], [[V_ADD_F32_e64_3]], 322, 10, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX940-NEXT: [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY17]], [[V_ADD_F32_e64_4]], 323, 12, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_4]], 0, [[V_MOV_B32_dpp5]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
+ ; GFX940-NEXT: [[V_MOV_B32_dpp6:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY18]], [[V_ADD_F32_e64_5]], 312, 15, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 63
+ ; GFX940-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_5]], [[S_MOV_B32_4]]
+ ; GFX940-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[V_READLANE_B32_]]
+ ; GFX940-NEXT: [[STRICT_WWM:%[0-9]+]]:vgpr_32 = STRICT_WWM [[COPY19]], implicit $exec
+ ; GFX940-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]]
+ ; GFX940-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_MBCNT_HI_U32_B32_e64_]], [[COPY20]], implicit $exec
+ ; GFX940-NEXT: [[SI_IF1:%[0-9]+]]:sreg_64_xexec = SI_IF [[V_CMP_EQ_U32_e64_]], %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX940-NEXT: S_BRANCH %bb.3
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: bb.3 (%ir-block.36):
+ ; GFX940-NEXT: successors: %bb.5(0x80000000)
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; GFX940-NEXT: [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN [[V_MOV_B32_e32_]], [[STRICT_WWM]], [[REG_SEQUENCE]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
+ ; GFX940-NEXT: S_BRANCH %bb.5
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: bb.4.Flow:
+ ; GFX940-NEXT: successors: %bb.6(0x80000000)
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI %42, %bb.5, [[DEF]], %bb.1
+ ; GFX940-NEXT: SI_END_CF [[SI_IF]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX940-NEXT: S_BRANCH %bb.6
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: bb.5 (%ir-block.39):
+ ; GFX940-NEXT: successors: %bb.4(0x80000000)
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]], %bb.3, [[DEF]], %bb.2
+ ; GFX940-NEXT: SI_END_CF [[SI_IF1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX940-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[PHI1]], implicit $exec
+ ; GFX940-NEXT: [[STRICT_WWM1:%[0-9]+]]:vgpr_32 = STRICT_WWM [[V_MOV_B32_dpp6]], implicit $exec
+ ; GFX940-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX940-NEXT: [[V_ADD_F32_e64_6:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY21]], 0, [[STRICT_WWM1]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: S_BRANCH %bb.4
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: bb.6 (%ir-block.46):
+ ; GFX940-NEXT: $vgpr0 = COPY [[PHI]]
+ ; GFX940-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
; GFX11-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw
; GFX11: bb.1 (%ir-block.0):
; GFX11-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll
index 3454e9d1019e5..d4dee983d4fc0 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefixes=GFX90A_GFX940,GFX90A %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefixes=GFX90A_GFX940,GFX940 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX11 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX11 %s
@@ -155,6 +155,162 @@ define amdgpu_ps float @global_atomic_fadd_f32_rtn_atomicrmw(ptr addrspace(1) %p
}
define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace(1) inreg %ptr, float %data) #0 {
+ ; GFX90A-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw
+ ; GFX90A: bb.0 (%ir-block.0):
+ ; GFX90A-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000)
+ ; GFX90A-NEXT: liveins: $sgpr0, $sgpr1, $vgpr0
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX90A-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX90A-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX90A-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX90A-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX90A-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64 = SI_PS_LIVE
+ ; GFX90A-NEXT: [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; GFX90A-NEXT: [[SI_IF:%[0-9]+]]:sreg_64 = SI_IF killed [[SI_PS_LIVE]], %bb.3, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; GFX90A-NEXT: S_BRANCH %bb.1
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: bb.1 (%ir-block.5):
+ ; GFX90A-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec
+ ; GFX90A-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[COPY4]].sub1
+ ; GFX90A-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY4]].sub0
+ ; GFX90A-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX90A-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX90A-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 killed [[COPY6]], [[COPY7]], implicit $exec
+ ; GFX90A-NEXT: [[V_MBCNT_HI_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_HI_U32_B32_e64 killed [[COPY5]], killed [[V_MBCNT_LO_U32_B32_e64_]], implicit $exec
+ ; GFX90A-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648
+ ; GFX90A-NEXT: [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY]], killed [[S_MOV_B32_1]], implicit-def dead $scc, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 -2147483648, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, killed [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, killed [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, killed [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, killed [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_3]], 322, 10, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, killed [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_4]], 323, 12, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_4]], 0, killed [[V_MOV_B32_dpp5]], 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B32_dpp6:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_5]], 312, 15, 15, 0, implicit $exec
+ ; GFX90A-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 63
+ ; GFX90A-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_5]], killed [[S_MOV_B32_2]]
+ ; GFX90A-NEXT: early-clobber %2:sgpr_32 = STRICT_WWM killed [[V_READLANE_B32_]], implicit $exec
+ ; GFX90A-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_MBCNT_HI_U32_B32_e64_]], [[S_MOV_B32_]], implicit $exec
+ ; GFX90A-NEXT: [[DEF1:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; GFX90A-NEXT: [[SI_IF1:%[0-9]+]]:sreg_64 = SI_IF killed [[V_CMP_EQ_U32_e64_]], %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; GFX90A-NEXT: S_BRANCH %bb.2
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: bb.2 (%ir-block.36):
+ ; GFX90A-NEXT: successors: %bb.4(0x80000000)
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; GFX90A-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY %2
+ ; GFX90A-NEXT: [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN killed [[V_MOV_B32_e32_1]], [[COPY8]], [[COPY3]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
+ ; GFX90A-NEXT: S_BRANCH %bb.4
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: bb.3.Flow:
+ ; GFX90A-NEXT: successors: %bb.5(0x80000000)
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[DEF]], %bb.0, %7, %bb.4
+ ; GFX90A-NEXT: SI_END_CF [[SI_IF]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; GFX90A-NEXT: S_BRANCH %bb.5
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: bb.4 (%ir-block.39):
+ ; GFX90A-NEXT: successors: %bb.3(0x80000000)
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[DEF1]], %bb.1, [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]], %bb.2
+ ; GFX90A-NEXT: SI_END_CF [[SI_IF1]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; GFX90A-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[PHI1]], implicit $exec
+ ; GFX90A-NEXT: early-clobber %45:vgpr_32 = STRICT_WWM [[V_MOV_B32_dpp6]], implicit $exec
+ ; GFX90A-NEXT: [[V_ADD_F32_e64_6:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[V_READFIRSTLANE_B32_]], 0, killed %45, 0, 0, implicit $mode, implicit $exec
+ ; GFX90A-NEXT: S_BRANCH %bb.3
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: bb.5 (%ir-block.46):
+ ; GFX90A-NEXT: $vgpr0 = COPY [[PHI]]
+ ; GFX90A-NEXT: SI_RETURN_TO_EPILOG $vgpr0
+ ;
+ ; GFX940-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw
+ ; GFX940: bb.0 (%ir-block.0):
+ ; GFX940-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000)
+ ; GFX940-NEXT: liveins: $sgpr0, $sgpr1, $vgpr0
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX940-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX940-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX940-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX940-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64 = SI_PS_LIVE
+ ; GFX940-NEXT: [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; GFX940-NEXT: [[SI_IF:%[0-9]+]]:sreg_64 = SI_IF killed [[SI_PS_LIVE]], %bb.3, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; GFX940-NEXT: S_BRANCH %bb.1
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: bb.1 (%ir-block.5):
+ ; GFX940-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec
+ ; GFX940-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[COPY4]].sub1
+ ; GFX940-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY4]].sub0
+ ; GFX940-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX940-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX940-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 killed [[COPY6]], [[COPY7]], implicit $exec
+ ; GFX940-NEXT: [[V_MBCNT_HI_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_HI_U32_B32_e64 killed [[COPY5]], killed [[V_MBCNT_LO_U32_B32_e64_]], implicit $exec
+ ; GFX940-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648
+ ; GFX940-NEXT: [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY]], killed [[S_MOV_B32_1]], implicit-def dead $scc, implicit $exec
+ ; GFX940-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 -2147483648, implicit $exec
+ ; GFX940-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, killed [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, killed [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, killed [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, killed [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_3]], 322, 10, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, killed [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_4]], 323, 12, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_4]], 0, killed [[V_MOV_B32_dpp5]], 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: [[V_MOV_B32_dpp6:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_5]], 312, 15, 15, 0, implicit $exec
+ ; GFX940-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 63
+ ; GFX940-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_5]], killed [[S_MOV_B32_2]]
+ ; GFX940-NEXT: early-clobber %2:sgpr_32 = STRICT_WWM killed [[V_READLANE_B32_]], implicit $exec
+ ; GFX940-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_MBCNT_HI_U32_B32_e64_]], [[S_MOV_B32_]], implicit $exec
+ ; GFX940-NEXT: [[DEF1:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; GFX940-NEXT: [[SI_IF1:%[0-9]+]]:sreg_64 = SI_IF killed [[V_CMP_EQ_U32_e64_]], %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; GFX940-NEXT: S_BRANCH %bb.2
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: bb.2 (%ir-block.36):
+ ; GFX940-NEXT: successors: %bb.4(0x80000000)
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; GFX940-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY %2
+ ; GFX940-NEXT: [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN killed [[V_MOV_B32_e32_1]], [[COPY8]], [[COPY3]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
+ ; GFX940-NEXT: S_BRANCH %bb.4
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: bb.3.Flow:
+ ; GFX940-NEXT: successors: %bb.5(0x80000000)
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[DEF]], %bb.0, %7, %bb.4
+ ; GFX940-NEXT: SI_END_CF [[SI_IF]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; GFX940-NEXT: S_BRANCH %bb.5
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: bb.4 (%ir-block.39):
+ ; GFX940-NEXT: successors: %bb.3(0x80000000)
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[DEF1]], %bb.1, [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]], %bb.2
+ ; GFX940-NEXT: SI_END_CF [[SI_IF1]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; GFX940-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[PHI1]], implicit $exec
+ ; GFX940-NEXT: early-clobber %44:vgpr_32 = STRICT_WWM [[V_MOV_B32_dpp6]], implicit $exec
+ ; GFX940-NEXT: [[V_ADD_F32_e64_6:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[V_READFIRSTLANE_B32_]], 0, killed %44, 0, 0, implicit $mode, implicit $exec
+ ; GFX940-NEXT: S_BRANCH %bb.3
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: bb.5 (%ir-block.46):
+ ; GFX940-NEXT: $vgpr0 = COPY [[PHI]]
+ ; GFX940-NEXT: SI_RETURN_TO_EPILOG $vgpr0
+ ;
; GFX11-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw
; GFX11: bb.0 (%ir-block.0):
; GFX11-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000)
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd-wrong-subtarget.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd-wrong-subtarget.ll
index 9fc0b5c57cc3a..32c2078f08fc0 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd-wrong-subtarget.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd-wrong-subtarget.ll
@@ -4,42 +4,55 @@
define amdgpu_kernel void @global_atomic_fadd_ret_f32_wrong_subtarget(ptr addrspace(1) %ptr) #1 {
; GCN-LABEL: global_atomic_fadd_ret_f32_wrong_subtarget:
; GCN: ; %bb.0:
-; GCN-NEXT: s_mov_b64 s[4:5], exec
-; GCN-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0
-; GCN-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-NEXT: ; implicit-def: $vgpr1
+; GCN-NEXT: s_mov_b64 s[2:3], exec
+; GCN-NEXT: v_bfrev_b32_e32 v1, 1
+; GCN-NEXT: v_mov_b32_e32 v2, 4.0
+; GCN-NEXT: ; implicit-def: $vgpr0
+; GCN-NEXT: .LBB0_1: ; %ComputeLoop
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: s_ff1_i32_b64 s6, s[2:3]
+; GCN-NEXT: s_lshl_b64 s[4:5], 1, s6
+; GCN-NEXT: v_readfirstlane_b32 s7, v1
+; GCN-NEXT: v_readlane_b32 s8, v2, s6
+; GCN-NEXT: s_mov_b32 m0, s6
+; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
+; GCN-NEXT: v_writelane_b32 v0, s7, m0
+; GCN-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GCN-NEXT: v_add_f32_e32 v1, s8, v1
+; GCN-NEXT: s_cbranch_scc1 .LBB0_1
+; GCN-NEXT: ; %bb.2: ; %ComputeEnd
+; GCN-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GCN-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; GCN-NEXT: ; implicit-def: $vgpr2
; GCN-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GCN-NEXT: s_cbranch_execz .LBB0_4
-; GCN-NEXT: ; %bb.1:
+; GCN-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
+; GCN-NEXT: s_cbranch_execz .LBB0_6
+; GCN-NEXT: ; %bb.3:
; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
-; GCN-NEXT: s_bcnt1_i32_b64 s7, s[4:5]
-; GCN-NEXT: v_cvt_f32_ubyte0_e32 v1, s7
; GCN-NEXT: s_mov_b64 s[4:5], 0
-; GCN-NEXT: v_mul_f32_e32 v2, 4.0, v1
+; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_load_dword s6, s[0:1], 0x0
-; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: v_mov_b32_e32 v1, s6
-; GCN-NEXT: .LBB0_2: ; %atomicrmw.start
+; GCN-NEXT: v_mov_b32_e32 v2, s6
+; GCN-NEXT: .LBB0_4: ; %atomicrmw.start
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-NEXT: v_mov_b32_e32 v5, v1
-; GCN-NEXT: v_add_f32_e32 v4, v5, v2
-; GCN-NEXT: global_atomic_cmpswap v1, v3, v[4:5], s[0:1] glc
+; GCN-NEXT: v_mov_b32_e32 v5, v2
+; GCN-NEXT: v_add_f32_e32 v4, v5, v1
+; GCN-NEXT: global_atomic_cmpswap v2, v3, v[4:5], s[0:1] glc
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: buffer_wbinvl1
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5
; GCN-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GCN-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN-NEXT: s_cbranch_execnz .LBB0_2
-; GCN-NEXT: ; %bb.3: ; %Flow
+; GCN-NEXT: s_cbranch_execnz .LBB0_4
+; GCN-NEXT: ; %bb.5: ; %Flow
; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN-NEXT: .LBB0_4: ; %Flow2
+; GCN-NEXT: .LBB0_6: ; %Flow4
; GCN-NEXT: s_or_b64 exec, exec, s[2:3]
-; GCN-NEXT: v_readfirstlane_b32 s0, v1
-; GCN-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GCN-NEXT: v_mad_f32 v0, v0, 4.0, s0
+; GCN-NEXT: v_readfirstlane_b32 s0, v2
+; GCN-NEXT: v_add_f32_e32 v0, s0, v0
; GCN-NEXT: global_store_dword v[0:1], v0, off
; GCN-NEXT: s_endpgm
%result = atomicrmw fadd ptr addrspace(1) %ptr, float 4.0 syncscope("agent") seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
index fca86a91dfdd0..791862dcae8dd 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
@@ -7,37 +7,101 @@
; strategies are valid for only divergent values. This optimization is valid for divergent addresses. Test also covers different scopes.
define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, float inreg %val) #0 {
-; IR-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
-; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
-; IR: 2:
-; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
-; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
-; IR-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
-; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR: 14:
-; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-NEXT: br label [[TMP16]]
-; IR: 16:
-; IR-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]])
-; IR-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
-; IR-NEXT: [[TMP21:%.*]] = uitofp i32 [[TMP8]] to float
-; IR-NEXT: [[TMP22:%.*]] = fmul float [[VAL]], [[TMP21]]
-; IR-NEXT: [[TMP23:%.*]] = fadd float [[TMP20]], [[TMP22]]
-; IR-NEXT: br label [[TMP24]]
-; IR: 24:
-; IR-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
-; IR-NEXT: ret float [[TMP25]]
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
+; IR-ITERATIVE: 10:
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29:%.*]] syncscope("agent") monotonic, align 4
+; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
+; IR-ITERATIVE: 12:
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]])
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = fadd float [[TMP16]], [[TMP28:%.*]]
+; IR-ITERATIVE-NEXT: br label [[TMP18]]
+; IR-ITERATIVE: 18:
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP17]], [[TMP12]] ]
+; IR-ITERATIVE-NEXT: ret float [[TMP19]]
+; IR-ITERATIVE: ComputeLoop:
+; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP29]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP28]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP32:%.*]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]])
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]])
+; IR-ITERATIVE-NEXT: [[TMP28]] = bitcast i32 [[TMP27]] to float
+; IR-ITERATIVE-NEXT: [[TMP29]] = fadd float [[ACCUMULATOR]], [[TMP24]]
+; IR-ITERATIVE-NEXT: [[TMP30:%.*]] = shl i64 1, [[TMP20]]
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = xor i64 [[TMP30]], -1
+; IR-ITERATIVE-NEXT: [[TMP32]] = and i64 [[ACTIVEBITS]], [[TMP31]]
+; IR-ITERATIVE-NEXT: [[TMP33:%.*]] = icmp eq i64 [[TMP32]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP33]], label [[COMPUTEEND]], label [[COMPUTELOOP]]
+; IR-ITERATIVE: ComputeEnd:
+; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]]
+;
+; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = bitcast float [[VAL:%.*]] to i32
+; IR-DPP-NEXT: [[TMP10:%.*]] = call i32 @llvm.amdgcn.set.inactive.i32(i32 [[TMP9]], i32 -2147483648)
+; IR-DPP-NEXT: [[TMP11:%.*]] = bitcast i32 [[TMP10]] to float
+; IR-DPP-NEXT: [[TMP12:%.*]] = bitcast i32 [[TMP9]] to float
+; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 273, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP14:%.*]] = fadd float [[TMP11]], [[TMP13]]
+; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP14]], i32 274, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP16:%.*]] = fadd float [[TMP14]], [[TMP15]]
+; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP16]], i32 276, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP18:%.*]] = fadd float [[TMP16]], [[TMP17]]
+; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP18]], i32 280, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP20:%.*]] = fadd float [[TMP18]], [[TMP19]]
+; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP20]], i32 322, i32 10, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP22:%.*]] = fadd float [[TMP20]], [[TMP21]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP24:%.*]] = fadd float [[TMP22]], [[TMP23]]
+; IR-DPP-NEXT: [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast float [[TMP24]] to i32
+; IR-DPP-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63)
+; IR-DPP-NEXT: [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
+; IR-DPP-NEXT: [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]])
+; IR-DPP-NEXT: [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP30]], label [[TMP31:%.*]], label [[TMP33:%.*]]
+; IR-DPP: 31:
+; IR-DPP-NEXT: [[TMP32:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29]] syncscope("agent") monotonic, align 4
+; IR-DPP-NEXT: br label [[TMP33]]
+; IR-DPP: 33:
+; IR-DPP-NEXT: [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ]
+; IR-DPP-NEXT: [[TMP35:%.*]] = bitcast float [[TMP34]] to i32
+; IR-DPP-NEXT: [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]])
+; IR-DPP-NEXT: [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float
+; IR-DPP-NEXT: [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]])
+; IR-DPP-NEXT: [[TMP39:%.*]] = fadd float [[TMP37]], [[TMP38]]
+; IR-DPP-NEXT: br label [[TMP40]]
+; IR-DPP: 40:
+; IR-DPP-NEXT: [[TMP41:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP39]], [[TMP33]] ]
+; IR-DPP-NEXT: ret float [[TMP41]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret float %result
@@ -147,7 +211,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_scope_agent_sco
define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7:[0-9]+]]
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]]
; IR-ITERATIVE: 2:
; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
@@ -155,31 +219,47 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-ITERATIVE: 14:
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("one-as") monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP16]]
-; IR-ITERATIVE: 16:
-; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br label [[TMP24]]
-; IR-ITERATIVE: 24:
-; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
-; IR-ITERATIVE-NEXT: ret float [[TMP25]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
+; IR-ITERATIVE: 10:
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29:%.*]] syncscope("one-as") monotonic, align 4
+; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
+; IR-ITERATIVE: 12:
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP28:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP18]]
+; IR-ITERATIVE: 18:
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP17]], [[TMP12]] ]
+; IR-ITERATIVE-NEXT: ret float [[TMP19]]
+; IR-ITERATIVE: ComputeLoop:
+; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP29]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP28]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP32:%.*]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28]] = bitcast i32 [[TMP27]] to float
+; IR-ITERATIVE-NEXT: [[TMP29]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP24]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP30:%.*]] = shl i64 1, [[TMP20]]
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = xor i64 [[TMP30]], -1
+; IR-ITERATIVE-NEXT: [[TMP32]] = and i64 [[ACTIVEBITS]], [[TMP31]]
+; IR-ITERATIVE-NEXT: [[TMP33:%.*]] = icmp eq i64 [[TMP32]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP33]], label [[COMPUTEEND]], label [[COMPUTELOOP]]
+; IR-ITERATIVE: ComputeEnd:
+; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8:[0-9]+]]
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]]
; IR-DPP: 2:
; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
@@ -187,27 +267,43 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-DPP: 14:
-; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("one-as") monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP16]]
-; IR-DPP: 16:
-; IR-DPP-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-DPP-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
-; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: br label [[TMP24]]
-; IR-DPP: 24:
-; IR-DPP-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
-; IR-DPP-NEXT: ret float [[TMP25]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = bitcast float [[VAL:%.*]] to i32
+; IR-DPP-NEXT: [[TMP10:%.*]] = call i32 @llvm.amdgcn.set.inactive.i32(i32 [[TMP9]], i32 -2147483648) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP11:%.*]] = bitcast i32 [[TMP10]] to float
+; IR-DPP-NEXT: [[TMP12:%.*]] = bitcast i32 [[TMP9]] to float
+; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP14]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP14]], float [[TMP15]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP16]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP17]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP18]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP20]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast float [[TMP24]] to i32
+; IR-DPP-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
+; IR-DPP-NEXT: [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP30]], label [[TMP31:%.*]], label [[TMP33:%.*]]
+; IR-DPP: 31:
+; IR-DPP-NEXT: [[TMP32:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29]] syncscope("one-as") monotonic, align 4
+; IR-DPP-NEXT: br label [[TMP33]]
+; IR-DPP: 33:
+; IR-DPP-NEXT: [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ]
+; IR-DPP-NEXT: [[TMP35:%.*]] = bitcast float [[TMP34]] to i32
+; IR-DPP-NEXT: [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float
+; IR-DPP-NEXT: [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP39:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP37]], float [[TMP38]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP40]]
+; IR-DPP: 40:
+; IR-DPP-NEXT: [[TMP41:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP39]], [[TMP33]] ]
+; IR-DPP-NEXT: ret float [[TMP41]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
ret float %result
@@ -317,7 +413,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un
define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]]
; IR-ITERATIVE: 2:
; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
@@ -325,31 +421,47 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-ITERATIVE: 14:
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP16]]
-; IR-ITERATIVE: 16:
-; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br label [[TMP24]]
-; IR-ITERATIVE: 24:
-; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
-; IR-ITERATIVE-NEXT: ret float [[TMP25]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
+; IR-ITERATIVE: 10:
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29:%.*]] syncscope("agent") monotonic, align 4
+; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
+; IR-ITERATIVE: 12:
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP28:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP18]]
+; IR-ITERATIVE: 18:
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP17]], [[TMP12]] ]
+; IR-ITERATIVE-NEXT: ret float [[TMP19]]
+; IR-ITERATIVE: ComputeLoop:
+; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP29]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP28]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP32:%.*]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28]] = bitcast i32 [[TMP27]] to float
+; IR-ITERATIVE-NEXT: [[TMP29]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP24]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP30:%.*]] = shl i64 1, [[TMP20]]
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = xor i64 [[TMP30]], -1
+; IR-ITERATIVE-NEXT: [[TMP32]] = and i64 [[ACTIVEBITS]], [[TMP31]]
+; IR-ITERATIVE-NEXT: [[TMP33:%.*]] = icmp eq i64 [[TMP32]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP33]], label [[COMPUTEEND]], label [[COMPUTELOOP]]
+; IR-ITERATIVE: ComputeEnd:
+; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]]
; IR-DPP: 2:
; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
@@ -357,27 +469,43 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-DPP: 14:
-; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP16]]
-; IR-DPP: 16:
-; IR-DPP-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-DPP-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
-; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: br label [[TMP24]]
-; IR-DPP: 24:
-; IR-DPP-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
-; IR-DPP-NEXT: ret float [[TMP25]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = bitcast float [[VAL:%.*]] to i32
+; IR-DPP-NEXT: [[TMP10:%.*]] = call i32 @llvm.amdgcn.set.inactive.i32(i32 [[TMP9]], i32 -2147483648) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP11:%.*]] = bitcast i32 [[TMP10]] to float
+; IR-DPP-NEXT: [[TMP12:%.*]] = bitcast i32 [[TMP9]] to float
+; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP14]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP14]], float [[TMP15]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP16]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP17]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP18]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP20]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast float [[TMP24]] to i32
+; IR-DPP-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
+; IR-DPP-NEXT: [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP30]], label [[TMP31:%.*]], label [[TMP33:%.*]]
+; IR-DPP: 31:
+; IR-DPP-NEXT: [[TMP32:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29]] syncscope("agent") monotonic, align 4
+; IR-DPP-NEXT: br label [[TMP33]]
+; IR-DPP: 33:
+; IR-DPP-NEXT: [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ]
+; IR-DPP-NEXT: [[TMP35:%.*]] = bitcast float [[TMP34]] to i32
+; IR-DPP-NEXT: [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float
+; IR-DPP-NEXT: [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP39:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP37]], float [[TMP38]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP40]]
+; IR-DPP: 40:
+; IR-DPP-NEXT: [[TMP41:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP39]], [[TMP33]] ]
+; IR-DPP-NEXT: ret float [[TMP41]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
@@ -783,7 +911,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns
define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]]
; IR-ITERATIVE: 2:
; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
@@ -791,31 +919,47 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_st
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-ITERATIVE: 14:
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP16]]
-; IR-ITERATIVE: 16:
-; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br label [[TMP24]]
-; IR-ITERATIVE: 24:
-; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
-; IR-ITERATIVE-NEXT: ret float [[TMP25]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
+; IR-ITERATIVE: 10:
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29:%.*]] monotonic, align 4
+; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
+; IR-ITERATIVE: 12:
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP28:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP18]]
+; IR-ITERATIVE: 18:
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP17]], [[TMP12]] ]
+; IR-ITERATIVE-NEXT: ret float [[TMP19]]
+; IR-ITERATIVE: ComputeLoop:
+; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP29]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP28]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP32:%.*]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28]] = bitcast i32 [[TMP27]] to float
+; IR-ITERATIVE-NEXT: [[TMP29]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP24]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP30:%.*]] = shl i64 1, [[TMP20]]
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = xor i64 [[TMP30]], -1
+; IR-ITERATIVE-NEXT: [[TMP32]] = and i64 [[ACTIVEBITS]], [[TMP31]]
+; IR-ITERATIVE-NEXT: [[TMP33:%.*]] = icmp eq i64 [[TMP32]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP33]], label [[COMPUTEEND]], label [[COMPUTELOOP]]
+; IR-ITERATIVE: ComputeEnd:
+; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]]
; IR-DPP: 2:
; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
@@ -823,27 +967,43 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_st
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-DPP: 14:
-; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP16]]
-; IR-DPP: 16:
-; IR-DPP-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-DPP-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
-; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: br label [[TMP24]]
-; IR-DPP: 24:
-; IR-DPP-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
-; IR-DPP-NEXT: ret float [[TMP25]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = bitcast float [[VAL:%.*]] to i32
+; IR-DPP-NEXT: [[TMP10:%.*]] = call i32 @llvm.amdgcn.set.inactive.i32(i32 [[TMP9]], i32 -2147483648) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP11:%.*]] = bitcast i32 [[TMP10]] to float
+; IR-DPP-NEXT: [[TMP12:%.*]] = bitcast i32 [[TMP9]] to float
+; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP14]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP14]], float [[TMP15]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP16]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP17]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP18]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP20]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast float [[TMP24]] to i32
+; IR-DPP-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
+; IR-DPP-NEXT: [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP30]], label [[TMP31:%.*]], label [[TMP33:%.*]]
+; IR-DPP: 31:
+; IR-DPP-NEXT: [[TMP32:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29]] monotonic, align 4
+; IR-DPP-NEXT: br label [[TMP33]]
+; IR-DPP: 33:
+; IR-DPP-NEXT: [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ]
+; IR-DPP-NEXT: [[TMP35:%.*]] = bitcast float [[TMP34]] to i32
+; IR-DPP-NEXT: [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float
+; IR-DPP-NEXT: [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP39:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP37]], float [[TMP38]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP40]]
+; IR-DPP: 40:
+; IR-DPP-NEXT: [[TMP41:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP39]], [[TMP33]] ]
+; IR-DPP-NEXT: ret float [[TMP41]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
ret float %result
@@ -1060,42 +1220,8 @@ define amdgpu_ps float @global_atomic_fadd_div_address_div_value_system_scope_st
define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(
-; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
-; IR: 2:
-; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
-; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to double
-; IR-NEXT: [[TMP12:%.*]] = fmul double [[VAL:%.*]], [[TMP11]]
-; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR: 14:
-; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-NEXT: br label [[TMP16]]
-; IR: 16:
-; IR-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
-; IR-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
-; IR-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
-; IR-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]])
-; IR-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]])
-; IR-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
-; IR-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
-; IR-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
-; IR-NEXT: [[TMP27:%.*]] = uitofp i32 [[TMP8]] to double
-; IR-NEXT: [[TMP28:%.*]] = fmul double [[VAL]], [[TMP27]]
-; IR-NEXT: [[TMP29:%.*]] = fadd double [[TMP26]], [[TMP28]]
-; IR-NEXT: br label [[TMP30]]
-; IR: 30:
-; IR-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
-; IR-NEXT: ret double [[TMP31]]
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
ret double %result
@@ -1111,81 +1237,9 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_scope_a
}
define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-ITERATIVE: 14:
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
-; IR-ITERATIVE-NEXT: br label [[TMP16]]
-; IR-ITERATIVE: 16:
-; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
-; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
-; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
-; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
-; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br label [[TMP30]]
-; IR-ITERATIVE: 30:
-; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
-; IR-ITERATIVE-NEXT: ret double [[TMP31]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-DPP: 14:
-; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
-; IR-DPP-NEXT: br label [[TMP16]]
-; IR-DPP: 16:
-; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
-; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
-; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
-; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
-; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
-; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
-; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: br label [[TMP30]]
-; IR-DPP: 30:
-; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
-; IR-DPP-NEXT: ret double [[TMP31]]
+; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
ret double %result
@@ -1201,81 +1255,9 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_
}
define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-ITERATIVE: 14:
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
-; IR-ITERATIVE-NEXT: br label [[TMP16]]
-; IR-ITERATIVE: 16:
-; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
-; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
-; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
-; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
-; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br label [[TMP30]]
-; IR-ITERATIVE: 30:
-; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
-; IR-ITERATIVE-NEXT: ret double [[TMP31]]
-;
-; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-DPP: 14:
-; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
-; IR-DPP-NEXT: br label [[TMP16]]
-; IR-DPP: 16:
-; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
-; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
-; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
-; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
-; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
-; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
-; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: br label [[TMP30]]
-; IR-DPP: 30:
-; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
-; IR-DPP-NEXT: ret double [[TMP31]]
+; IR-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
ret double %result
@@ -1421,81 +1403,9 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_
}
define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-ITERATIVE: 14:
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP16]]
-; IR-ITERATIVE: 16:
-; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
-; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
-; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
-; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
-; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br label [[TMP30]]
-; IR-ITERATIVE: 30:
-; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
-; IR-ITERATIVE-NEXT: ret double [[TMP31]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-DPP: 14:
-; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP16]]
-; IR-DPP: 16:
-; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
-; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
-; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
-; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
-; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
-; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
-; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: br label [[TMP30]]
-; IR-DPP: 30:
-; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
-; IR-DPP-NEXT: ret double [[TMP31]]
+; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
ret double %result
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
index 99b67a278a027..9744bd42786ea 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
@@ -6979,36 +6979,50 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) %ptrf, i32 %idx) {
; GFX12-LABEL: local_ds_fadd:
; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v1, 0x42280000
+; GFX12-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-NEXT: s_brev_b32 s4, 1
+; GFX12-NEXT: ; implicit-def: $vgpr0
+; GFX12-NEXT: .LBB28_1: ; %ComputeLoop
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_ctz_i32_b32 s3, s2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: v_readlane_b32 s5, v1, s3
+; GFX12-NEXT: s_lshl_b32 s6, 1, s3
+; GFX12-NEXT: v_writelane_b32 v0, s4, s3
+; GFX12-NEXT: s_and_not1_b32 s2, s2, s6
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_cmp_lg_u32 s2, 0
+; GFX12-NEXT: s_add_f32 s4, s4, s5
+; GFX12-NEXT: s_cbranch_scc1 .LBB28_1
+; GFX12-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12-NEXT: s_load_b64 s[2:3], s[0:1], 0x8
-; GFX12-NEXT: s_mov_b32 s5, exec_lo
-; GFX12-NEXT: s_mov_b32 s4, exec_lo
-; GFX12-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0
+; GFX12-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
; GFX12-NEXT: ; implicit-def: $vgpr1
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_add_co_i32 s3, s3, 4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_cmpx_eq_u32_e32 0, v0
-; GFX12-NEXT: s_cbranch_execz .LBB28_2
-; GFX12-NEXT: ; %bb.1:
-; GFX12-NEXT: s_bcnt1_i32_b32 s5, s5
+; GFX12-NEXT: s_and_saveexec_b32 s5, vcc_lo
+; GFX12-NEXT: s_xor_b32 s5, exec_lo, s5
+; GFX12-NEXT: s_cbranch_execz .LBB28_4
+; GFX12-NEXT: ; %bb.3:
+; GFX12-NEXT: s_lshl_b32 s6, s3, 3
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v1, s5
-; GFX12-NEXT: s_lshl_b32 s5, s3, 3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX12-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1
-; GFX12-NEXT: ds_add_rtn_f32 v1, v2, v1
+; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s6
+; GFX12-NEXT: ds_add_rtn_f32 v1, v1, v2
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: .LBB28_2:
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: .LBB28_4:
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
; GFX12-NEXT: s_mov_b32 s6, exec_lo
; GFX12-NEXT: v_readfirstlane_b32 s5, v1
; GFX12-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
; GFX12-NEXT: s_mov_b32 s4, exec_lo
; GFX12-NEXT: v_cmpx_eq_u32_e32 0, v2
-; GFX12-NEXT: s_cbranch_execz .LBB28_4
-; GFX12-NEXT: ; %bb.3:
+; GFX12-NEXT: s_cbranch_execz .LBB28_6
+; GFX12-NEXT: ; %bb.5:
; GFX12-NEXT: s_bcnt1_i32_b32 s6, s6
; GFX12-NEXT: s_lshl_b32 s3, s3, 4
; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
@@ -7017,16 +7031,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX12-NEXT: ds_add_f32 v2, v1
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: .LBB28_4:
+; GFX12-NEXT: .LBB28_6:
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; GFX12-NEXT: v_add_f32_e32 v1, s5, v0
; GFX12-NEXT: s_mov_b32 s4, exec_lo
; GFX12-NEXT: s_brev_b32 s3, 1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
-; GFX12-NEXT: v_add_f32_e32 v1, s5, v0
; GFX12-NEXT: ; implicit-def: $vgpr0
-; GFX12-NEXT: .LBB28_5: ; %ComputeLoop
+; GFX12-NEXT: .LBB28_7: ; %ComputeLoop1
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_ctz_i32_b32 s5, s4
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
@@ -7037,21 +7048,21 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_cmp_lg_u32 s4, 0
; GFX12-NEXT: s_add_f32 s3, s3, s6
-; GFX12-NEXT: s_cbranch_scc1 .LBB28_5
-; GFX12-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX12-NEXT: s_cbranch_scc1 .LBB28_7
+; GFX12-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX12-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
; GFX12-NEXT: ; implicit-def: $vgpr1
; GFX12-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX12-NEXT: s_xor_b32 s4, exec_lo, s4
-; GFX12-NEXT: s_cbranch_execz .LBB28_8
-; GFX12-NEXT: ; %bb.7:
+; GFX12-NEXT: s_cbranch_execz .LBB28_10
+; GFX12-NEXT: ; %bb.9:
; GFX12-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3
; GFX12-NEXT: ds_add_rtn_f32 v1, v1, v2
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: .LBB28_8:
+; GFX12-NEXT: .LBB28_10:
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
; GFX12-NEXT: v_readfirstlane_b32 s2, v1
@@ -7065,34 +7076,48 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
;
; GFX940-LABEL: local_ds_fadd:
; GFX940: ; %bb.0:
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX940-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX940-NEXT: ; implicit-def: $vgpr0
+; GFX940-NEXT: .LBB28_1: ; %ComputeLoop
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_ff1_i32_b64 s6, s[2:3]
+; GFX940-NEXT: s_lshl_b64 s[4:5], 1, s6
+; GFX940-NEXT: v_readfirstlane_b32 s7, v1
+; GFX940-NEXT: v_readlane_b32 s8, v2, s6
+; GFX940-NEXT: s_mov_b32 m0, s6
+; GFX940-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
+; GFX940-NEXT: v_writelane_b32 v0, s7, m0
+; GFX940-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX940-NEXT: v_add_f32_e32 v1, s8, v1
+; GFX940-NEXT: s_cbranch_scc1 .LBB28_1
+; GFX940-NEXT: ; %bb.2: ; %ComputeEnd
; GFX940-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; GFX940-NEXT: s_mov_b64 s[4:5], exec
-; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0
-; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0
-; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX940-NEXT: ; implicit-def: $vgpr2
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: s_add_i32 s3, s3, 4
-; GFX940-NEXT: ; implicit-def: $vgpr1
-; GFX940-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB28_2
-; GFX940-NEXT: ; %bb.1:
-; GFX940-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX940-NEXT: s_lshl_b32 s8, s3, 3
-; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s4
-; GFX940-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
-; GFX940-NEXT: v_mov_b32_e32 v2, s8
-; GFX940-NEXT: ds_add_rtn_f32 v1, v2, v1
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX940-NEXT: s_cbranch_execz .LBB28_4
+; GFX940-NEXT: ; %bb.3:
+; GFX940-NEXT: s_lshl_b32 s6, s3, 3
+; GFX940-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB28_2:
-; GFX940-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX940-NEXT: .LBB28_4:
+; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX940-NEXT: s_mov_b64 s[6:7], exec
-; GFX940-NEXT: v_readfirstlane_b32 s8, v1
; GFX940-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1
+; GFX940-NEXT: v_readfirstlane_b32 s8, v2
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB28_4
-; GFX940-NEXT: ; %bb.3:
+; GFX940-NEXT: s_cbranch_execz .LBB28_6
+; GFX940-NEXT: ; %bb.5:
; GFX940-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
; GFX940-NEXT: s_lshl_b32 s3, s3, 4
@@ -7100,15 +7125,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: v_mov_b32_e32 v2, s3
; GFX940-NEXT: ds_add_f32 v2, v1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB28_4:
+; GFX940-NEXT: .LBB28_6:
; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX940-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
; GFX940-NEXT: s_mov_b64 s[4:5], exec
; GFX940-NEXT: v_add_f32_e32 v2, s8, v0
; GFX940-NEXT: v_bfrev_b32_e32 v1, 1
; GFX940-NEXT: ; implicit-def: $vgpr0
-; GFX940-NEXT: .LBB28_5: ; %ComputeLoop
+; GFX940-NEXT: .LBB28_7: ; %ComputeLoop1
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX940-NEXT: s_ff1_i32_b64 s3, s[4:5]
; GFX940-NEXT: s_lshl_b64 s[6:7], 1, s3
@@ -7119,20 +7142,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: v_writelane_b32 v0, s8, m0
; GFX940-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX940-NEXT: v_add_f32_e32 v1, s9, v1
-; GFX940-NEXT: s_cbranch_scc1 .LBB28_5
-; GFX940-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX940-NEXT: s_cbranch_scc1 .LBB28_7
+; GFX940-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX940-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX940-NEXT: ; implicit-def: $vgpr2
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX940-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX940-NEXT: s_cbranch_execz .LBB28_8
-; GFX940-NEXT: ; %bb.7:
+; GFX940-NEXT: s_cbranch_execz .LBB28_10
+; GFX940-NEXT: ; %bb.9:
; GFX940-NEXT: v_mov_b32_e32 v2, s2
; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB28_8:
+; GFX940-NEXT: .LBB28_10:
; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX940-NEXT: v_readfirstlane_b32 s2, v2
@@ -7145,36 +7168,52 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
;
; GFX11-LABEL: local_ds_fadd:
; GFX11: ; %bb.0:
+; GFX11-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX11-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX11-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-NEXT: ; implicit-def: $vgpr0
+; GFX11-NEXT: .LBB28_1: ; %ComputeLoop
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: s_ctz_i32_b32 s3, s2
+; GFX11-NEXT: v_readfirstlane_b32 s4, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_readlane_b32 s5, v2, s3
+; GFX11-NEXT: s_lshl_b32 s6, 1, s3
+; GFX11-NEXT: s_and_not1_b32 s2, s2, s6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_writelane_b32 v0, s4, s3
+; GFX11-NEXT: v_add_f32_e32 v1, s5, v1
+; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_cbranch_scc1 .LBB28_1
+; GFX11-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x8
-; GFX11-NEXT: s_mov_b32 s5, exec_lo
-; GFX11-NEXT: s_mov_b32 s4, exec_lo
-; GFX11-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0
-; GFX11-NEXT: ; implicit-def: $vgpr1
+; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX11-NEXT: ; implicit-def: $vgpr2
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_add_i32 s3, s3, 4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v0
-; GFX11-NEXT: s_cbranch_execz .LBB28_2
-; GFX11-NEXT: ; %bb.1:
-; GFX11-NEXT: s_bcnt1_i32_b32 s5, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v1, s5
+; GFX11-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX11-NEXT: s_xor_b32 s4, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execz .LBB28_4
+; GFX11-NEXT: ; %bb.3:
; GFX11-NEXT: s_lshl_b32 s5, s3, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1
-; GFX11-NEXT: ds_add_rtn_f32 v1, v2, v1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_mov_b32_e32 v2, s5
+; GFX11-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: .LBB28_2:
+; GFX11-NEXT: .LBB28_4:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
; GFX11-NEXT: s_mov_b32 s6, exec_lo
-; GFX11-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
+; GFX11-NEXT: v_readfirstlane_b32 s4, v2
+; GFX11-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
; GFX11-NEXT: s_mov_b32 s5, exec_lo
-; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v2
-; GFX11-NEXT: s_cbranch_execz .LBB28_4
-; GFX11-NEXT: ; %bb.3:
+; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v1
+; GFX11-NEXT: s_cbranch_execz .LBB28_6
+; GFX11-NEXT: ; %bb.5:
; GFX11-NEXT: s_bcnt1_i32_b32 s6, s6
; GFX11-NEXT: s_lshl_b32 s3, s3, 4
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
@@ -7183,19 +7222,16 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX11-NEXT: ds_add_f32 v2, v1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: .LBB28_4:
+; GFX11-NEXT: .LBB28_6:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; GFX11-NEXT: v_add_f32_e32 v2, s4, v0
; GFX11-NEXT: v_bfrev_b32_e32 v1, 1
; GFX11-NEXT: s_mov_b32 s3, exec_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
-; GFX11-NEXT: v_add_f32_e32 v2, s4, v0
; GFX11-NEXT: ; implicit-def: $vgpr0
-; GFX11-NEXT: .LBB28_5: ; %ComputeLoop
+; GFX11-NEXT: .LBB28_7: ; %ComputeLoop1
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: s_ctz_i32_b32 s4, s3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s5, v1
; GFX11-NEXT: v_readlane_b32 s6, v2, s4
; GFX11-NEXT: s_lshl_b32 s7, 1, s4
@@ -7205,21 +7241,21 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_add_f32_e32 v1, s6, v1
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_cbranch_scc1 .LBB28_5
-; GFX11-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX11-NEXT: s_cbranch_scc1 .LBB28_7
+; GFX11-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
; GFX11-NEXT: ; implicit-def: $vgpr2
; GFX11-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX11-NEXT: s_xor_b32 s3, exec_lo, s3
-; GFX11-NEXT: s_cbranch_execz .LBB28_8
-; GFX11-NEXT: ; %bb.7:
+; GFX11-NEXT: s_cbranch_execz .LBB28_10
+; GFX11-NEXT: ; %bb.9:
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: .LBB28_8:
+; GFX11-NEXT: .LBB28_10:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s3
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
; GFX11-NEXT: v_readfirstlane_b32 s2, v2
@@ -7233,34 +7269,47 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
;
; GFX10-LABEL: local_ds_fadd:
; GFX10: ; %bb.0:
+; GFX10-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX10-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX10-NEXT: s_mov_b32 s2, exec_lo
+; GFX10-NEXT: ; implicit-def: $vgpr0
+; GFX10-NEXT: .LBB28_1: ; %ComputeLoop
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_ff1_i32_b32 s3, s2
+; GFX10-NEXT: v_readfirstlane_b32 s4, v1
+; GFX10-NEXT: v_readlane_b32 s5, v2, s3
+; GFX10-NEXT: s_lshl_b32 s6, 1, s3
+; GFX10-NEXT: s_andn2_b32 s2, s2, s6
+; GFX10-NEXT: v_writelane_b32 v0, s4, s3
+; GFX10-NEXT: v_add_f32_e32 v1, s5, v1
+; GFX10-NEXT: s_cmp_lg_u32 s2, 0
+; GFX10-NEXT: s_cbranch_scc1 .LBB28_1
+; GFX10-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; GFX10-NEXT: s_mov_b32 s5, exec_lo
-; GFX10-NEXT: ; implicit-def: $vgpr1
-; GFX10-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX10-NEXT: ; implicit-def: $vgpr2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_i32 s3, s3, 4
; GFX10-NEXT: s_and_saveexec_b32 s4, vcc_lo
-; GFX10-NEXT: s_cbranch_execz .LBB28_2
-; GFX10-NEXT: ; %bb.1:
-; GFX10-NEXT: s_bcnt1_i32_b32 s5, s5
-; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s5
+; GFX10-NEXT: s_xor_b32 s4, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execz .LBB28_4
+; GFX10-NEXT: ; %bb.3:
; GFX10-NEXT: s_lshl_b32 s5, s3, 3
; GFX10-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
-; GFX10-NEXT: ds_add_rtn_f32 v1, v2, v1
+; GFX10-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: .LBB28_2:
+; GFX10-NEXT: .LBB28_4:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_mov_b32 s6, exec_lo
-; GFX10-NEXT: v_readfirstlane_b32 s4, v1
-; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX10-NEXT: v_readfirstlane_b32 s4, v2
+; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
; GFX10-NEXT: s_and_saveexec_b32 s5, vcc_lo
-; GFX10-NEXT: s_cbranch_execz .LBB28_4
-; GFX10-NEXT: ; %bb.3:
+; GFX10-NEXT: s_cbranch_execz .LBB28_6
+; GFX10-NEXT: ; %bb.5:
; GFX10-NEXT: s_bcnt1_i32_b32 s6, s6
; GFX10-NEXT: s_lshl_b32 s3, s3, 4
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
@@ -7270,16 +7319,14 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX10-NEXT: ds_add_f32 v2, v1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: .LBB28_4:
+; GFX10-NEXT: .LBB28_6:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; GFX10-NEXT: v_add_f32_e32 v2, s4, v0
; GFX10-NEXT: v_bfrev_b32_e32 v1, 1
; GFX10-NEXT: s_mov_b32 s3, exec_lo
-; GFX10-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
-; GFX10-NEXT: v_add_f32_e32 v2, s4, v0
; GFX10-NEXT: ; implicit-def: $vgpr0
-; GFX10-NEXT: .LBB28_5: ; %ComputeLoop
+; GFX10-NEXT: .LBB28_7: ; %ComputeLoop1
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_ff1_i32_b32 s4, s3
; GFX10-NEXT: v_readfirstlane_b32 s5, v1
@@ -7289,21 +7336,21 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX10-NEXT: v_writelane_b32 v0, s5, s4
; GFX10-NEXT: v_add_f32_e32 v1, s6, v1
; GFX10-NEXT: s_cmp_lg_u32 s3, 0
-; GFX10-NEXT: s_cbranch_scc1 .LBB28_5
-; GFX10-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX10-NEXT: s_cbranch_scc1 .LBB28_7
+; GFX10-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
; GFX10-NEXT: ; implicit-def: $vgpr2
; GFX10-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX10-NEXT: s_xor_b32 s3, exec_lo, s3
-; GFX10-NEXT: s_cbranch_execz .LBB28_8
-; GFX10-NEXT: ; %bb.7:
+; GFX10-NEXT: s_cbranch_execz .LBB28_10
+; GFX10-NEXT: ; %bb.9:
; GFX10-NEXT: v_mov_b32_e32 v2, s2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: .LBB28_8:
+; GFX10-NEXT: .LBB28_10:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s3
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
@@ -7316,34 +7363,48 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
;
; GFX90A-LABEL: local_ds_fadd:
; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX90A-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX90A-NEXT: ; implicit-def: $vgpr0
+; GFX90A-NEXT: .LBB28_1: ; %ComputeLoop
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_ff1_i32_b64 s6, s[2:3]
+; GFX90A-NEXT: s_lshl_b64 s[4:5], 1, s6
+; GFX90A-NEXT: v_readfirstlane_b32 s7, v1
+; GFX90A-NEXT: v_readlane_b32 s8, v2, s6
+; GFX90A-NEXT: s_mov_b32 m0, s6
+; GFX90A-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
+; GFX90A-NEXT: v_writelane_b32 v0, s7, m0
+; GFX90A-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX90A-NEXT: v_add_f32_e32 v1, s8, v1
+; GFX90A-NEXT: s_cbranch_scc1 .LBB28_1
+; GFX90A-NEXT: ; %bb.2: ; %ComputeEnd
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; GFX90A-NEXT: s_mov_b64 s[4:5], exec
-; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0
-; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_add_i32 s3, s3, 4
-; GFX90A-NEXT: ; implicit-def: $vgpr1
-; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB28_2
-; GFX90A-NEXT: ; %bb.1:
-; GFX90A-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX90A-NEXT: s_lshl_b32 s8, s3, 3
-; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s4
-; GFX90A-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
-; GFX90A-NEXT: v_mov_b32_e32 v2, s8
-; GFX90A-NEXT: ds_add_rtn_f32 v1, v2, v1
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execz .LBB28_4
+; GFX90A-NEXT: ; %bb.3:
+; GFX90A-NEXT: s_lshl_b32 s6, s3, 3
+; GFX90A-NEXT: v_mov_b32_e32 v2, s6
+; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB28_2:
-; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX90A-NEXT: .LBB28_4:
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
-; GFX90A-NEXT: v_readfirstlane_b32 s8, v1
; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1
+; GFX90A-NEXT: v_readfirstlane_b32 s8, v2
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB28_4
-; GFX90A-NEXT: ; %bb.3:
+; GFX90A-NEXT: s_cbranch_execz .LBB28_6
+; GFX90A-NEXT: ; %bb.5:
; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
; GFX90A-NEXT: s_lshl_b32 s3, s3, 4
@@ -7351,15 +7412,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: v_mov_b32_e32 v2, s3
; GFX90A-NEXT: ds_add_f32 v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB28_4:
+; GFX90A-NEXT: .LBB28_6:
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX90A-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
; GFX90A-NEXT: s_mov_b64 s[4:5], exec
; GFX90A-NEXT: v_add_f32_e32 v2, s8, v0
; GFX90A-NEXT: v_bfrev_b32_e32 v1, 1
; GFX90A-NEXT: ; implicit-def: $vgpr0
-; GFX90A-NEXT: .LBB28_5: ; %ComputeLoop
+; GFX90A-NEXT: .LBB28_7: ; %ComputeLoop1
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_ff1_i32_b64 s3, s[4:5]
; GFX90A-NEXT: s_lshl_b64 s[6:7], 1, s3
@@ -7370,20 +7429,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: v_writelane_b32 v0, s8, m0
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: v_add_f32_e32 v1, s9, v1
-; GFX90A-NEXT: s_cbranch_scc1 .LBB28_5
-; GFX90A-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX90A-NEXT: s_cbranch_scc1 .LBB28_7
+; GFX90A-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX90A-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB28_8
-; GFX90A-NEXT: ; %bb.7:
+; GFX90A-NEXT: s_cbranch_execz .LBB28_10
+; GFX90A-NEXT: ; %bb.9:
; GFX90A-NEXT: v_mov_b32_e32 v2, s2
; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB28_8:
+; GFX90A-NEXT: .LBB28_10:
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX90A-NEXT: v_readfirstlane_b32 s2, v2
@@ -7395,34 +7454,48 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
;
; GFX908-LABEL: local_ds_fadd:
; GFX908: ; %bb.0:
+; GFX908-NEXT: s_mov_b64 s[2:3], exec
+; GFX908-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX908-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX908-NEXT: ; implicit-def: $vgpr0
+; GFX908-NEXT: .LBB28_1: ; %ComputeLoop
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_ff1_i32_b64 s6, s[2:3]
+; GFX908-NEXT: s_lshl_b64 s[4:5], 1, s6
+; GFX908-NEXT: v_readfirstlane_b32 s7, v1
+; GFX908-NEXT: v_readlane_b32 s8, v2, s6
+; GFX908-NEXT: s_mov_b32 m0, s6
+; GFX908-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
+; GFX908-NEXT: v_writelane_b32 v0, s7, m0
+; GFX908-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX908-NEXT: v_add_f32_e32 v1, s8, v1
+; GFX908-NEXT: s_cbranch_scc1 .LBB28_1
+; GFX908-NEXT: ; %bb.2: ; %ComputeEnd
; GFX908-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; GFX908-NEXT: s_mov_b64 s[4:5], exec
-; GFX908-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0
-; GFX908-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX908-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX908-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX908-NEXT: ; implicit-def: $vgpr2
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
; GFX908-NEXT: s_add_i32 s3, s3, 4
-; GFX908-NEXT: ; implicit-def: $vgpr1
-; GFX908-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX908-NEXT: s_cbranch_execz .LBB28_2
-; GFX908-NEXT: ; %bb.1:
-; GFX908-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX908-NEXT: s_lshl_b32 s8, s3, 3
-; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s4
-; GFX908-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
-; GFX908-NEXT: v_mov_b32_e32 v2, s8
-; GFX908-NEXT: ds_add_rtn_f32 v1, v2, v1
+; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX908-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execz .LBB28_4
+; GFX908-NEXT: ; %bb.3:
+; GFX908-NEXT: s_lshl_b32 s6, s3, 3
+; GFX908-NEXT: v_mov_b32_e32 v2, s6
+; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: .LBB28_2:
-; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX908-NEXT: .LBB28_4:
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_mov_b64 s[6:7], exec
-; GFX908-NEXT: v_readfirstlane_b32 s8, v1
; GFX908-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
; GFX908-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1
+; GFX908-NEXT: v_readfirstlane_b32 s8, v2
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX908-NEXT: s_cbranch_execz .LBB28_4
-; GFX908-NEXT: ; %bb.3:
+; GFX908-NEXT: s_cbranch_execz .LBB28_6
+; GFX908-NEXT: ; %bb.5:
; GFX908-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
; GFX908-NEXT: s_lshl_b32 s3, s3, 4
@@ -7430,15 +7503,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: v_mov_b32_e32 v2, s3
; GFX908-NEXT: ds_add_f32 v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: .LBB28_4:
+; GFX908-NEXT: .LBB28_6:
; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX908-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
; GFX908-NEXT: s_mov_b64 s[4:5], exec
; GFX908-NEXT: v_add_f32_e32 v2, s8, v0
; GFX908-NEXT: v_bfrev_b32_e32 v1, 1
; GFX908-NEXT: ; implicit-def: $vgpr0
-; GFX908-NEXT: .LBB28_5: ; %ComputeLoop
+; GFX908-NEXT: .LBB28_7: ; %ComputeLoop1
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_ff1_i32_b64 s3, s[4:5]
; GFX908-NEXT: s_lshl_b64 s[6:7], 1, s3
@@ -7449,20 +7520,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: v_writelane_b32 v0, s8, m0
; GFX908-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX908-NEXT: v_add_f32_e32 v1, s9, v1
-; GFX908-NEXT: s_cbranch_scc1 .LBB28_5
-; GFX908-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX908-NEXT: s_cbranch_scc1 .LBB28_7
+; GFX908-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX908-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX908-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX908-NEXT: ; implicit-def: $vgpr2
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX908-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX908-NEXT: s_cbranch_execz .LBB28_8
-; GFX908-NEXT: ; %bb.7:
+; GFX908-NEXT: s_cbranch_execz .LBB28_10
+; GFX908-NEXT: ; %bb.9:
; GFX908-NEXT: v_mov_b32_e32 v2, s2
; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: .LBB28_8:
+; GFX908-NEXT: .LBB28_10:
; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX908-NEXT: v_readfirstlane_b32 s2, v2
@@ -7474,35 +7545,49 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
;
; GFX8-LABEL: local_ds_fadd:
; GFX8: ; %bb.0:
+; GFX8-NEXT: s_mov_b64 s[2:3], exec
+; GFX8-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX8-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX8-NEXT: ; implicit-def: $vgpr0
+; GFX8-NEXT: .LBB28_1: ; %ComputeLoop
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_ff1_i32_b64 s6, s[2:3]
+; GFX8-NEXT: s_lshl_b64 s[4:5], 1, s6
+; GFX8-NEXT: v_readfirstlane_b32 s7, v1
+; GFX8-NEXT: v_readlane_b32 s8, v2, s6
+; GFX8-NEXT: s_mov_b32 m0, s6
+; GFX8-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
+; GFX8-NEXT: v_writelane_b32 v0, s7, m0
+; GFX8-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX8-NEXT: v_add_f32_e32 v1, s8, v1
+; GFX8-NEXT: s_cbranch_scc1 .LBB28_1
+; GFX8-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; GFX8-NEXT: s_mov_b64 s[4:5], exec
-; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0
-; GFX8-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
+; GFX8-NEXT: v_mbcnt_hi_u32_b32 v3, exec_hi, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GFX8-NEXT: ; implicit-def: $vgpr2
+; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_add_i32 s3, s3, 4
-; GFX8-NEXT: ; implicit-def: $vgpr1
-; GFX8-NEXT: s_mov_b32 m0, -1
-; GFX8-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB28_2
-; GFX8-NEXT: ; %bb.1:
-; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX8-NEXT: s_lshl_b32 s8, s3, 3
-; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s4
-; GFX8-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
-; GFX8-NEXT: v_mov_b32_e32 v2, s8
-; GFX8-NEXT: ds_add_rtn_f32 v1, v2, v1
+; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execz .LBB28_4
+; GFX8-NEXT: ; %bb.3:
+; GFX8-NEXT: s_lshl_b32 s6, s3, 3
+; GFX8-NEXT: v_mov_b32_e32 v2, s6
+; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB28_2:
-; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX8-NEXT: .LBB28_4:
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_mov_b64 s[6:7], exec
-; GFX8-NEXT: v_readfirstlane_b32 s8, v1
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1
+; GFX8-NEXT: v_readfirstlane_b32 s8, v2
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB28_4
-; GFX8-NEXT: ; %bb.3:
+; GFX8-NEXT: s_cbranch_execz .LBB28_6
+; GFX8-NEXT: ; %bb.5:
; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
; GFX8-NEXT: s_lshl_b32 s3, s3, 4
@@ -7510,15 +7595,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: v_mov_b32_e32 v2, s3
; GFX8-NEXT: ds_add_f32 v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB28_4:
+; GFX8-NEXT: .LBB28_6:
; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX8-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
; GFX8-NEXT: s_mov_b64 s[4:5], exec
; GFX8-NEXT: v_add_f32_e32 v2, s8, v0
; GFX8-NEXT: v_bfrev_b32_e32 v1, 1
; GFX8-NEXT: ; implicit-def: $vgpr0
-; GFX8-NEXT: .LBB28_5: ; %ComputeLoop
+; GFX8-NEXT: .LBB28_7: ; %ComputeLoop1
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_ff1_i32_b64 s3, s[4:5]
; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
@@ -7529,21 +7612,21 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: v_writelane_b32 v0, s8, m0
; GFX8-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX8-NEXT: v_add_f32_e32 v1, s9, v1
-; GFX8-NEXT: s_cbranch_scc1 .LBB28_5
-; GFX8-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX8-NEXT: s_cbranch_scc1 .LBB28_7
+; GFX8-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX8-NEXT: ; implicit-def: $vgpr2
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execz .LBB28_8
-; GFX8-NEXT: ; %bb.7:
+; GFX8-NEXT: s_cbranch_execz .LBB28_10
+; GFX8-NEXT: ; %bb.9:
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB28_8:
+; GFX8-NEXT: .LBB28_10:
; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX8-NEXT: v_readfirstlane_b32 s2, v2
@@ -7557,47 +7640,35 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX7-LABEL: local_ds_fadd:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2
-; GFX7-NEXT: s_mov_b64 s[6:7], exec
-; GFX7-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0
-; GFX7-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s7, v0
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: s_mov_b32 m0, -1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshl_b32 s4, s3, 3
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: ds_read_b32 v0, v0 offset:32
; GFX7-NEXT: s_add_i32 s3, s3, 4
-; GFX7-NEXT: ; implicit-def: $vgpr1
-; GFX7-NEXT: s_mov_b32 m0, -1
-; GFX7-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX7-NEXT: s_cbranch_execz .LBB28_4
-; GFX7-NEXT: ; %bb.1:
-; GFX7-NEXT: s_lshl_b32 s8, s3, 3
-; GFX7-NEXT: v_mov_b32_e32 v2, s8
-; GFX7-NEXT: ds_read_b32 v1, v2
-; GFX7-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
-; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v3, s6
-; GFX7-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
-; GFX7-NEXT: s_mov_b64 s[6:7], 0
-; GFX7-NEXT: .LBB28_2: ; %atomicrmw.start
+; GFX7-NEXT: s_lshl_b32 s6, s3, 3
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: .LBB28_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v4, v1
-; GFX7-NEXT: v_add_f32_e32 v1, v4, v3
-; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v2, v4, v1
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: v_add_f32_e32 v0, 0x42280000, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v0, v1, v2, v0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v4
-; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GFX7-NEXT: s_cbranch_execnz .LBB28_2
-; GFX7-NEXT: ; %bb.3: ; %Flow18
-; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: .LBB28_4: ; %Flow19
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB28_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_mov_b64 s[6:7], exec
-; GFX7-NEXT: v_readfirstlane_b32 s8, v1
; GFX7-NEXT: v_mbcnt_lo_u32_b32_e64 v1, s6, 0
; GFX7-NEXT: v_mbcnt_hi_u32_b32_e32 v1, s7, v1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX7-NEXT: s_cbranch_execz .LBB28_7
-; GFX7-NEXT: ; %bb.5:
+; GFX7-NEXT: s_cbranch_execz .LBB28_5
+; GFX7-NEXT: ; %bb.3:
; GFX7-NEXT: s_lshl_b32 s3, s3, 4
; GFX7-NEXT: v_mov_b32_e32 v1, s3
; GFX7-NEXT: ds_read_b32 v3, v1
@@ -7605,7 +7676,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v2, s3
; GFX7-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
; GFX7-NEXT: s_mov_b64 s[6:7], 0
-; GFX7-NEXT: .LBB28_6: ; %atomicrmw.start2
+; GFX7-NEXT: .LBB28_4: ; %atomicrmw.start2
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_add_f32_e32 v4, v3, v2
@@ -7615,16 +7686,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GFX7-NEXT: s_cbranch_execnz .LBB28_6
-; GFX7-NEXT: .LBB28_7: ; %Flow17
+; GFX7-NEXT: s_cbranch_execnz .LBB28_4
+; GFX7-NEXT: .LBB28_5: ; %Flow17
; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: v_mov_b32_e32 v2, s2
; GFX7-NEXT: ds_read_b32 v1, v2
-; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX7-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
-; GFX7-NEXT: v_add_f32_e32 v0, s8, v0
; GFX7-NEXT: s_mov_b64 s[2:3], 0
-; GFX7-NEXT: .LBB28_8: ; %atomicrmw.start8
+; GFX7-NEXT: .LBB28_6: ; %atomicrmw.start8
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v3, v1
@@ -7634,8 +7702,8 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
; GFX7-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX7-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX7-NEXT: s_cbranch_execnz .LBB28_8
-; GFX7-NEXT: ; %bb.9: ; %atomicrmw.end7
+; GFX7-NEXT: s_cbranch_execnz .LBB28_6
+; GFX7-NEXT: ; %bb.7: ; %atomicrmw.end7
; GFX7-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX7-NEXT: s_mov_b32 s3, 0xf000
@@ -7647,47 +7715,36 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX6-LABEL: local_ds_fadd:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2
-; GFX6-NEXT: s_mov_b64 s[6:7], exec
-; GFX6-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0
-; GFX6-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s7, v0
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX6-NEXT: s_mov_b32 m0, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_lshl_b32 s4, s3, 3
+; GFX6-NEXT: s_add_i32 s4, s4, 32
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: ds_read_b32 v0, v0
; GFX6-NEXT: s_add_i32 s3, s3, 4
-; GFX6-NEXT: ; implicit-def: $vgpr1
-; GFX6-NEXT: s_mov_b32 m0, -1
-; GFX6-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX6-NEXT: s_cbranch_execz .LBB28_4
-; GFX6-NEXT: ; %bb.1:
-; GFX6-NEXT: s_lshl_b32 s8, s3, 3
-; GFX6-NEXT: v_mov_b32_e32 v2, s8
-; GFX6-NEXT: ds_read_b32 v1, v2
-; GFX6-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
-; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, s6
-; GFX6-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
-; GFX6-NEXT: s_mov_b64 s[6:7], 0
-; GFX6-NEXT: .LBB28_2: ; %atomicrmw.start
+; GFX6-NEXT: s_lshl_b32 s6, s3, 3
+; GFX6-NEXT: s_mov_b64 s[4:5], 0
+; GFX6-NEXT: v_mov_b32_e32 v1, s6
+; GFX6-NEXT: .LBB28_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v4, v1
-; GFX6-NEXT: v_add_f32_e32 v1, v4, v3
-; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v2, v4, v1
+; GFX6-NEXT: v_mov_b32_e32 v2, v0
+; GFX6-NEXT: v_add_f32_e32 v0, 0x42280000, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v0, v1, v2, v0
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v4
-; GFX6-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX6-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GFX6-NEXT: s_cbranch_execnz .LBB28_2
-; GFX6-NEXT: ; %bb.3: ; %Flow16
-; GFX6-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX6-NEXT: .LBB28_4: ; %Flow17
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
+; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX6-NEXT: s_cbranch_execnz .LBB28_1
+; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX6-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_mov_b64 s[6:7], exec
-; GFX6-NEXT: v_readfirstlane_b32 s8, v1
; GFX6-NEXT: v_mbcnt_lo_u32_b32_e64 v1, s6, 0
; GFX6-NEXT: v_mbcnt_hi_u32_b32_e32 v1, s7, v1
; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX6-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX6-NEXT: s_cbranch_execz .LBB28_7
-; GFX6-NEXT: ; %bb.5:
+; GFX6-NEXT: s_cbranch_execz .LBB28_5
+; GFX6-NEXT: ; %bb.3:
; GFX6-NEXT: s_lshl_b32 s3, s3, 4
; GFX6-NEXT: v_mov_b32_e32 v1, s3
; GFX6-NEXT: ds_read_b32 v3, v1
@@ -7695,7 +7752,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v2, s3
; GFX6-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
; GFX6-NEXT: s_mov_b64 s[6:7], 0
-; GFX6-NEXT: .LBB28_6: ; %atomicrmw.start2
+; GFX6-NEXT: .LBB28_4: ; %atomicrmw.start2
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: v_add_f32_e32 v4, v3, v2
@@ -7705,16 +7762,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX6-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GFX6-NEXT: s_cbranch_execnz .LBB28_6
-; GFX6-NEXT: .LBB28_7: ; %Flow15
+; GFX6-NEXT: s_cbranch_execnz .LBB28_4
+; GFX6-NEXT: .LBB28_5: ; %Flow15
; GFX6-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX6-NEXT: v_mov_b32_e32 v2, s2
; GFX6-NEXT: ds_read_b32 v1, v2
-; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX6-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
-; GFX6-NEXT: v_add_f32_e32 v0, s8, v0
; GFX6-NEXT: s_mov_b64 s[2:3], 0
-; GFX6-NEXT: .LBB28_8: ; %atomicrmw.start8
+; GFX6-NEXT: .LBB28_6: ; %atomicrmw.start8
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v3, v1
@@ -7724,8 +7778,8 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
; GFX6-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX6-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX6-NEXT: s_cbranch_execnz .LBB28_8
-; GFX6-NEXT: ; %bb.9: ; %atomicrmw.end7
+; GFX6-NEXT: s_cbranch_execnz .LBB28_6
+; GFX6-NEXT: ; %bb.7: ; %atomicrmw.end7
; GFX6-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -7748,26 +7802,40 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrspace(3) %ptrf, i32 %idx) {
; GFX12-LABEL: local_ds_fadd_one_as:
; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v1, 0x42280000
+; GFX12-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-NEXT: s_brev_b32 s4, 1
+; GFX12-NEXT: ; implicit-def: $vgpr0
+; GFX12-NEXT: .LBB29_1: ; %ComputeLoop
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_ctz_i32_b32 s3, s2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: v_readlane_b32 s5, v1, s3
+; GFX12-NEXT: s_lshl_b32 s6, 1, s3
+; GFX12-NEXT: v_writelane_b32 v0, s4, s3
+; GFX12-NEXT: s_and_not1_b32 s2, s2, s6
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_cmp_lg_u32 s2, 0
+; GFX12-NEXT: s_add_f32 s4, s4, s5
+; GFX12-NEXT: s_cbranch_scc1 .LBB29_1
+; GFX12-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12-NEXT: s_load_b64 s[2:3], s[0:1], 0x8
-; GFX12-NEXT: s_mov_b32 s5, exec_lo
-; GFX12-NEXT: s_mov_b32 s4, exec_lo
-; GFX12-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0
+; GFX12-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
; GFX12-NEXT: ; implicit-def: $vgpr1
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_add_co_i32 s3, s3, 4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_cmpx_eq_u32_e32 0, v0
-; GFX12-NEXT: s_cbranch_execz .LBB29_2
-; GFX12-NEXT: ; %bb.1:
-; GFX12-NEXT: s_bcnt1_i32_b32 s5, s5
+; GFX12-NEXT: s_and_saveexec_b32 s5, vcc_lo
+; GFX12-NEXT: s_xor_b32 s5, exec_lo, s5
+; GFX12-NEXT: s_cbranch_execz .LBB29_4
+; GFX12-NEXT: ; %bb.3:
+; GFX12-NEXT: s_lshl_b32 s6, s3, 3
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v1, s5
-; GFX12-NEXT: s_lshl_b32 s5, s3, 3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX12-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1
-; GFX12-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX12-NEXT: .LBB29_2:
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s6
+; GFX12-NEXT: ds_add_rtn_f32 v1, v1, v2
+; GFX12-NEXT: .LBB29_4:
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
; GFX12-NEXT: s_mov_b32 s6, exec_lo
; GFX12-NEXT: s_wait_dscnt 0x0
@@ -7775,24 +7843,21 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
; GFX12-NEXT: s_mov_b32 s4, exec_lo
; GFX12-NEXT: v_cmpx_eq_u32_e32 0, v2
-; GFX12-NEXT: s_cbranch_execz .LBB29_4
-; GFX12-NEXT: ; %bb.3:
+; GFX12-NEXT: s_cbranch_execz .LBB29_6
+; GFX12-NEXT: ; %bb.5:
; GFX12-NEXT: s_bcnt1_i32_b32 s6, s6
; GFX12-NEXT: s_lshl_b32 s3, s3, 4
; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_dual_mov_b32 v2, s3 :: v_dual_mul_f32 v1, 0x42280000, v1
; GFX12-NEXT: ds_add_f32 v2, v1
-; GFX12-NEXT: .LBB29_4:
+; GFX12-NEXT: .LBB29_6:
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; GFX12-NEXT: v_add_f32_e32 v1, s5, v0
; GFX12-NEXT: s_mov_b32 s4, exec_lo
; GFX12-NEXT: s_brev_b32 s3, 1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
-; GFX12-NEXT: v_add_f32_e32 v1, s5, v0
; GFX12-NEXT: ; implicit-def: $vgpr0
-; GFX12-NEXT: .LBB29_5: ; %ComputeLoop
+; GFX12-NEXT: .LBB29_7: ; %ComputeLoop1
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_ctz_i32_b32 s5, s4
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
@@ -7803,19 +7868,19 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_cmp_lg_u32 s4, 0
; GFX12-NEXT: s_add_f32 s3, s3, s6
-; GFX12-NEXT: s_cbranch_scc1 .LBB29_5
-; GFX12-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX12-NEXT: s_cbranch_scc1 .LBB29_7
+; GFX12-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX12-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
; GFX12-NEXT: ; implicit-def: $vgpr1
; GFX12-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX12-NEXT: s_xor_b32 s4, exec_lo, s4
-; GFX12-NEXT: s_cbranch_execz .LBB29_8
-; GFX12-NEXT: ; %bb.7:
+; GFX12-NEXT: s_cbranch_execz .LBB29_10
+; GFX12-NEXT: ; %bb.9:
; GFX12-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3
; GFX12-NEXT: ds_add_rtn_f32 v1, v1, v2
-; GFX12-NEXT: .LBB29_8:
+; GFX12-NEXT: .LBB29_10:
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
; GFX12-NEXT: s_wait_dscnt 0x0
@@ -7830,49 +7895,61 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
;
; GFX940-LABEL: local_ds_fadd_one_as:
; GFX940: ; %bb.0:
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX940-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX940-NEXT: ; implicit-def: $vgpr0
+; GFX940-NEXT: .LBB29_1: ; %ComputeLoop
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_ff1_i32_b64 s6, s[2:3]
+; GFX940-NEXT: s_lshl_b64 s[4:5], 1, s6
+; GFX940-NEXT: v_readfirstlane_b32 s7, v1
+; GFX940-NEXT: v_readlane_b32 s8, v2, s6
+; GFX940-NEXT: s_mov_b32 m0, s6
+; GFX940-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
+; GFX940-NEXT: v_writelane_b32 v0, s7, m0
+; GFX940-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX940-NEXT: v_add_f32_e32 v1, s8, v1
+; GFX940-NEXT: s_cbranch_scc1 .LBB29_1
+; GFX940-NEXT: ; %bb.2: ; %ComputeEnd
; GFX940-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; GFX940-NEXT: s_mov_b64 s[4:5], exec
-; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0
-; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0
-; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX940-NEXT: ; implicit-def: $vgpr2
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: s_add_i32 s3, s3, 4
-; GFX940-NEXT: ; implicit-def: $vgpr1
-; GFX940-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB29_2
-; GFX940-NEXT: ; %bb.1:
-; GFX940-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX940-NEXT: s_lshl_b32 s8, s3, 3
-; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s4
-; GFX940-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
-; GFX940-NEXT: v_mov_b32_e32 v2, s8
-; GFX940-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX940-NEXT: .LBB29_2:
-; GFX940-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX940-NEXT: s_cbranch_execz .LBB29_4
+; GFX940-NEXT: ; %bb.3:
+; GFX940-NEXT: s_lshl_b32 s6, s3, 3
+; GFX940-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1
+; GFX940-NEXT: .LBB29_4:
+; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX940-NEXT: s_mov_b64 s[6:7], exec
-; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_readfirstlane_b32 s8, v1
; GFX940-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1
+; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: v_readfirstlane_b32 s8, v2
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB29_4
-; GFX940-NEXT: ; %bb.3:
+; GFX940-NEXT: s_cbranch_execz .LBB29_6
+; GFX940-NEXT: ; %bb.5:
; GFX940-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
; GFX940-NEXT: s_lshl_b32 s3, s3, 4
; GFX940-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX940-NEXT: v_mov_b32_e32 v2, s3
; GFX940-NEXT: ds_add_f32 v2, v1
-; GFX940-NEXT: .LBB29_4:
+; GFX940-NEXT: .LBB29_6:
; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX940-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
; GFX940-NEXT: s_mov_b64 s[4:5], exec
; GFX940-NEXT: v_add_f32_e32 v2, s8, v0
; GFX940-NEXT: v_bfrev_b32_e32 v1, 1
; GFX940-NEXT: ; implicit-def: $vgpr0
-; GFX940-NEXT: .LBB29_5: ; %ComputeLoop
+; GFX940-NEXT: .LBB29_7: ; %ComputeLoop1
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX940-NEXT: s_ff1_i32_b64 s3, s[4:5]
; GFX940-NEXT: s_lshl_b64 s[6:7], 1, s3
@@ -7883,19 +7960,19 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX940-NEXT: v_writelane_b32 v0, s8, m0
; GFX940-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX940-NEXT: v_add_f32_e32 v1, s9, v1
-; GFX940-NEXT: s_cbranch_scc1 .LBB29_5
-; GFX940-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX940-NEXT: s_cbranch_scc1 .LBB29_7
+; GFX940-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX940-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX940-NEXT: ; implicit-def: $vgpr2
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX940-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX940-NEXT: s_cbranch_execz .LBB29_8
-; GFX940-NEXT: ; %bb.7:
+; GFX940-NEXT: s_cbranch_execz .LBB29_10
+; GFX940-NEXT: ; %bb.9:
; GFX940-NEXT: v_mov_b32_e32 v2, s2
; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX940-NEXT: .LBB29_8:
+; GFX940-NEXT: .LBB29_10:
; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
@@ -7908,54 +7985,67 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
;
; GFX11-LABEL: local_ds_fadd_one_as:
; GFX11: ; %bb.0:
+; GFX11-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX11-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX11-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-NEXT: ; implicit-def: $vgpr0
+; GFX11-NEXT: .LBB29_1: ; %ComputeLoop
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: s_ctz_i32_b32 s3, s2
+; GFX11-NEXT: v_readfirstlane_b32 s4, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_readlane_b32 s5, v2, s3
+; GFX11-NEXT: s_lshl_b32 s6, 1, s3
+; GFX11-NEXT: s_and_not1_b32 s2, s2, s6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_writelane_b32 v0, s4, s3
+; GFX11-NEXT: v_add_f32_e32 v1, s5, v1
+; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_cbranch_scc1 .LBB29_1
+; GFX11-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x8
-; GFX11-NEXT: s_mov_b32 s5, exec_lo
-; GFX11-NEXT: s_mov_b32 s4, exec_lo
-; GFX11-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0
-; GFX11-NEXT: ; implicit-def: $vgpr1
+; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX11-NEXT: ; implicit-def: $vgpr2
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_add_i32 s3, s3, 4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v0
-; GFX11-NEXT: s_cbranch_execz .LBB29_2
-; GFX11-NEXT: ; %bb.1:
-; GFX11-NEXT: s_bcnt1_i32_b32 s5, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v1, s5
+; GFX11-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX11-NEXT: s_xor_b32 s4, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execz .LBB29_4
+; GFX11-NEXT: ; %bb.3:
; GFX11-NEXT: s_lshl_b32 s5, s3, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1
-; GFX11-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX11-NEXT: .LBB29_2:
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_mov_b32_e32 v2, s5
+; GFX11-NEXT: ds_add_rtn_f32 v2, v2, v1
+; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
; GFX11-NEXT: s_mov_b32 s6, exec_lo
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
+; GFX11-NEXT: v_readfirstlane_b32 s4, v2
+; GFX11-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
; GFX11-NEXT: s_mov_b32 s5, exec_lo
-; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v2
-; GFX11-NEXT: s_cbranch_execz .LBB29_4
-; GFX11-NEXT: ; %bb.3:
+; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v1
+; GFX11-NEXT: s_cbranch_execz .LBB29_6
+; GFX11-NEXT: ; %bb.5:
; GFX11-NEXT: s_bcnt1_i32_b32 s6, s6
; GFX11-NEXT: s_lshl_b32 s3, s3, 4
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_dual_mov_b32 v2, s3 :: v_dual_mul_f32 v1, 0x42280000, v1
; GFX11-NEXT: ds_add_f32 v2, v1
-; GFX11-NEXT: .LBB29_4:
+; GFX11-NEXT: .LBB29_6:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; GFX11-NEXT: v_add_f32_e32 v2, s4, v0
; GFX11-NEXT: v_bfrev_b32_e32 v1, 1
; GFX11-NEXT: s_mov_b32 s3, exec_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
-; GFX11-NEXT: v_add_f32_e32 v2, s4, v0
; GFX11-NEXT: ; implicit-def: $vgpr0
-; GFX11-NEXT: .LBB29_5: ; %ComputeLoop
+; GFX11-NEXT: .LBB29_7: ; %ComputeLoop1
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: s_ctz_i32_b32 s4, s3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s5, v1
; GFX11-NEXT: v_readlane_b32 s6, v2, s4
; GFX11-NEXT: s_lshl_b32 s7, 1, s4
@@ -7965,19 +8055,19 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_add_f32_e32 v1, s6, v1
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_cbranch_scc1 .LBB29_5
-; GFX11-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX11-NEXT: s_cbranch_scc1 .LBB29_7
+; GFX11-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
; GFX11-NEXT: ; implicit-def: $vgpr2
; GFX11-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX11-NEXT: s_xor_b32 s3, exec_lo, s3
-; GFX11-NEXT: s_cbranch_execz .LBB29_8
-; GFX11-NEXT: ; %bb.7:
+; GFX11-NEXT: s_cbranch_execz .LBB29_10
+; GFX11-NEXT: ; %bb.9:
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX11-NEXT: .LBB29_8:
+; GFX11-NEXT: .LBB29_10:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s3
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
@@ -7991,49 +8081,60 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
;
; GFX10-LABEL: local_ds_fadd_one_as:
; GFX10: ; %bb.0:
+; GFX10-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX10-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX10-NEXT: s_mov_b32 s2, exec_lo
+; GFX10-NEXT: ; implicit-def: $vgpr0
+; GFX10-NEXT: .LBB29_1: ; %ComputeLoop
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_ff1_i32_b32 s3, s2
+; GFX10-NEXT: v_readfirstlane_b32 s4, v1
+; GFX10-NEXT: v_readlane_b32 s5, v2, s3
+; GFX10-NEXT: s_lshl_b32 s6, 1, s3
+; GFX10-NEXT: s_andn2_b32 s2, s2, s6
+; GFX10-NEXT: v_writelane_b32 v0, s4, s3
+; GFX10-NEXT: v_add_f32_e32 v1, s5, v1
+; GFX10-NEXT: s_cmp_lg_u32 s2, 0
+; GFX10-NEXT: s_cbranch_scc1 .LBB29_1
+; GFX10-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; GFX10-NEXT: s_mov_b32 s5, exec_lo
-; GFX10-NEXT: ; implicit-def: $vgpr1
-; GFX10-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX10-NEXT: ; implicit-def: $vgpr2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_i32 s3, s3, 4
; GFX10-NEXT: s_and_saveexec_b32 s4, vcc_lo
-; GFX10-NEXT: s_cbranch_execz .LBB29_2
-; GFX10-NEXT: ; %bb.1:
-; GFX10-NEXT: s_bcnt1_i32_b32 s5, s5
-; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s5
+; GFX10-NEXT: s_xor_b32 s4, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execz .LBB29_4
+; GFX10-NEXT: ; %bb.3:
; GFX10-NEXT: s_lshl_b32 s5, s3, 3
; GFX10-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
-; GFX10-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX10-NEXT: .LBB29_2:
+; GFX10-NEXT: ds_add_rtn_f32 v2, v2, v1
+; GFX10-NEXT: .LBB29_4:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_readfirstlane_b32 s4, v1
-; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX10-NEXT: v_readfirstlane_b32 s4, v2
+; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
; GFX10-NEXT: s_and_saveexec_b32 s5, vcc_lo
-; GFX10-NEXT: s_cbranch_execz .LBB29_4
-; GFX10-NEXT: ; %bb.3:
+; GFX10-NEXT: s_cbranch_execz .LBB29_6
+; GFX10-NEXT: ; %bb.5:
; GFX10-NEXT: s_bcnt1_i32_b32 s6, s6
; GFX10-NEXT: s_lshl_b32 s3, s3, 4
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
; GFX10-NEXT: v_mov_b32_e32 v2, s3
; GFX10-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX10-NEXT: ds_add_f32 v2, v1
-; GFX10-NEXT: .LBB29_4:
+; GFX10-NEXT: .LBB29_6:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; GFX10-NEXT: v_add_f32_e32 v2, s4, v0
; GFX10-NEXT: v_bfrev_b32_e32 v1, 1
; GFX10-NEXT: s_mov_b32 s3, exec_lo
-; GFX10-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
-; GFX10-NEXT: v_add_f32_e32 v2, s4, v0
; GFX10-NEXT: ; implicit-def: $vgpr0
-; GFX10-NEXT: .LBB29_5: ; %ComputeLoop
+; GFX10-NEXT: .LBB29_7: ; %ComputeLoop1
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_ff1_i32_b32 s4, s3
; GFX10-NEXT: v_readfirstlane_b32 s5, v1
@@ -8043,18 +8144,18 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: v_writelane_b32 v0, s5, s4
; GFX10-NEXT: v_add_f32_e32 v1, s6, v1
; GFX10-NEXT: s_cmp_lg_u32 s3, 0
-; GFX10-NEXT: s_cbranch_scc1 .LBB29_5
-; GFX10-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX10-NEXT: s_cbranch_scc1 .LBB29_7
+; GFX10-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
; GFX10-NEXT: ; implicit-def: $vgpr2
; GFX10-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX10-NEXT: s_xor_b32 s3, exec_lo, s3
-; GFX10-NEXT: s_cbranch_execz .LBB29_8
-; GFX10-NEXT: ; %bb.7:
+; GFX10-NEXT: s_cbranch_execz .LBB29_10
+; GFX10-NEXT: ; %bb.9:
; GFX10-NEXT: v_mov_b32_e32 v2, s2
; GFX10-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX10-NEXT: .LBB29_8:
+; GFX10-NEXT: .LBB29_10:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s3
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
@@ -8068,49 +8169,61 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
;
; GFX90A-LABEL: local_ds_fadd_one_as:
; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX90A-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX90A-NEXT: ; implicit-def: $vgpr0
+; GFX90A-NEXT: .LBB29_1: ; %ComputeLoop
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_ff1_i32_b64 s6, s[2:3]
+; GFX90A-NEXT: s_lshl_b64 s[4:5], 1, s6
+; GFX90A-NEXT: v_readfirstlane_b32 s7, v1
+; GFX90A-NEXT: v_readlane_b32 s8, v2, s6
+; GFX90A-NEXT: s_mov_b32 m0, s6
+; GFX90A-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
+; GFX90A-NEXT: v_writelane_b32 v0, s7, m0
+; GFX90A-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX90A-NEXT: v_add_f32_e32 v1, s8, v1
+; GFX90A-NEXT: s_cbranch_scc1 .LBB29_1
+; GFX90A-NEXT: ; %bb.2: ; %ComputeEnd
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; GFX90A-NEXT: s_mov_b64 s[4:5], exec
-; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0
-; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_add_i32 s3, s3, 4
-; GFX90A-NEXT: ; implicit-def: $vgpr1
-; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB29_2
-; GFX90A-NEXT: ; %bb.1:
-; GFX90A-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX90A-NEXT: s_lshl_b32 s8, s3, 3
-; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s4
-; GFX90A-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
-; GFX90A-NEXT: v_mov_b32_e32 v2, s8
-; GFX90A-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX90A-NEXT: .LBB29_2:
-; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execz .LBB29_4
+; GFX90A-NEXT: ; %bb.3:
+; GFX90A-NEXT: s_lshl_b32 s6, s3, 3
+; GFX90A-NEXT: v_mov_b32_e32 v2, s6
+; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1
+; GFX90A-NEXT: .LBB29_4:
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
-; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_readfirstlane_b32 s8, v1
; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: v_readfirstlane_b32 s8, v2
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB29_4
-; GFX90A-NEXT: ; %bb.3:
+; GFX90A-NEXT: s_cbranch_execz .LBB29_6
+; GFX90A-NEXT: ; %bb.5:
; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
; GFX90A-NEXT: s_lshl_b32 s3, s3, 4
; GFX90A-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX90A-NEXT: v_mov_b32_e32 v2, s3
; GFX90A-NEXT: ds_add_f32 v2, v1
-; GFX90A-NEXT: .LBB29_4:
+; GFX90A-NEXT: .LBB29_6:
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX90A-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
; GFX90A-NEXT: s_mov_b64 s[4:5], exec
; GFX90A-NEXT: v_add_f32_e32 v2, s8, v0
; GFX90A-NEXT: v_bfrev_b32_e32 v1, 1
; GFX90A-NEXT: ; implicit-def: $vgpr0
-; GFX90A-NEXT: .LBB29_5: ; %ComputeLoop
+; GFX90A-NEXT: .LBB29_7: ; %ComputeLoop1
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_ff1_i32_b64 s3, s[4:5]
; GFX90A-NEXT: s_lshl_b64 s[6:7], 1, s3
@@ -8121,19 +8234,19 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: v_writelane_b32 v0, s8, m0
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: v_add_f32_e32 v1, s9, v1
-; GFX90A-NEXT: s_cbranch_scc1 .LBB29_5
-; GFX90A-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX90A-NEXT: s_cbranch_scc1 .LBB29_7
+; GFX90A-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX90A-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB29_8
-; GFX90A-NEXT: ; %bb.7:
+; GFX90A-NEXT: s_cbranch_execz .LBB29_10
+; GFX90A-NEXT: ; %bb.9:
; GFX90A-NEXT: v_mov_b32_e32 v2, s2
; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX90A-NEXT: .LBB29_8:
+; GFX90A-NEXT: .LBB29_10:
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
@@ -8145,49 +8258,61 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
;
; GFX908-LABEL: local_ds_fadd_one_as:
; GFX908: ; %bb.0:
+; GFX908-NEXT: s_mov_b64 s[2:3], exec
+; GFX908-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX908-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX908-NEXT: ; implicit-def: $vgpr0
+; GFX908-NEXT: .LBB29_1: ; %ComputeLoop
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_ff1_i32_b64 s6, s[2:3]
+; GFX908-NEXT: s_lshl_b64 s[4:5], 1, s6
+; GFX908-NEXT: v_readfirstlane_b32 s7, v1
+; GFX908-NEXT: v_readlane_b32 s8, v2, s6
+; GFX908-NEXT: s_mov_b32 m0, s6
+; GFX908-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
+; GFX908-NEXT: v_writelane_b32 v0, s7, m0
+; GFX908-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX908-NEXT: v_add_f32_e32 v1, s8, v1
+; GFX908-NEXT: s_cbranch_scc1 .LBB29_1
+; GFX908-NEXT: ; %bb.2: ; %ComputeEnd
; GFX908-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; GFX908-NEXT: s_mov_b64 s[4:5], exec
-; GFX908-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0
-; GFX908-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX908-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX908-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX908-NEXT: ; implicit-def: $vgpr2
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
; GFX908-NEXT: s_add_i32 s3, s3, 4
-; GFX908-NEXT: ; implicit-def: $vgpr1
-; GFX908-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX908-NEXT: s_cbranch_execz .LBB29_2
-; GFX908-NEXT: ; %bb.1:
-; GFX908-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX908-NEXT: s_lshl_b32 s8, s3, 3
-; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s4
-; GFX908-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
-; GFX908-NEXT: v_mov_b32_e32 v2, s8
-; GFX908-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX908-NEXT: .LBB29_2:
-; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX908-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execz .LBB29_4
+; GFX908-NEXT: ; %bb.3:
+; GFX908-NEXT: s_lshl_b32 s6, s3, 3
+; GFX908-NEXT: v_mov_b32_e32 v2, s6
+; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1
+; GFX908-NEXT: .LBB29_4:
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_mov_b64 s[6:7], exec
-; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_readfirstlane_b32 s8, v1
; GFX908-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
; GFX908-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1
+; GFX908-NEXT: s_waitcnt lgkmcnt(0)
+; GFX908-NEXT: v_readfirstlane_b32 s8, v2
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX908-NEXT: s_cbranch_execz .LBB29_4
-; GFX908-NEXT: ; %bb.3:
+; GFX908-NEXT: s_cbranch_execz .LBB29_6
+; GFX908-NEXT: ; %bb.5:
; GFX908-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
; GFX908-NEXT: s_lshl_b32 s3, s3, 4
; GFX908-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX908-NEXT: v_mov_b32_e32 v2, s3
; GFX908-NEXT: ds_add_f32 v2, v1
-; GFX908-NEXT: .LBB29_4:
+; GFX908-NEXT: .LBB29_6:
; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX908-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
; GFX908-NEXT: s_mov_b64 s[4:5], exec
; GFX908-NEXT: v_add_f32_e32 v2, s8, v0
; GFX908-NEXT: v_bfrev_b32_e32 v1, 1
; GFX908-NEXT: ; implicit-def: $vgpr0
-; GFX908-NEXT: .LBB29_5: ; %ComputeLoop
+; GFX908-NEXT: .LBB29_7: ; %ComputeLoop1
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_ff1_i32_b64 s3, s[4:5]
; GFX908-NEXT: s_lshl_b64 s[6:7], 1, s3
@@ -8198,19 +8323,19 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: v_writelane_b32 v0, s8, m0
; GFX908-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX908-NEXT: v_add_f32_e32 v1, s9, v1
-; GFX908-NEXT: s_cbranch_scc1 .LBB29_5
-; GFX908-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX908-NEXT: s_cbranch_scc1 .LBB29_7
+; GFX908-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX908-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX908-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX908-NEXT: ; implicit-def: $vgpr2
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX908-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX908-NEXT: s_cbranch_execz .LBB29_8
-; GFX908-NEXT: ; %bb.7:
+; GFX908-NEXT: s_cbranch_execz .LBB29_10
+; GFX908-NEXT: ; %bb.9:
; GFX908-NEXT: v_mov_b32_e32 v2, s2
; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX908-NEXT: .LBB29_8:
+; GFX908-NEXT: .LBB29_10:
; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
@@ -8222,50 +8347,62 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
;
; GFX8-LABEL: local_ds_fadd_one_as:
; GFX8: ; %bb.0:
+; GFX8-NEXT: s_mov_b64 s[2:3], exec
+; GFX8-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX8-NEXT: v_mov_b32_e32 v2, 0x42280000
+; GFX8-NEXT: ; implicit-def: $vgpr0
+; GFX8-NEXT: .LBB29_1: ; %ComputeLoop
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_ff1_i32_b64 s6, s[2:3]
+; GFX8-NEXT: s_lshl_b64 s[4:5], 1, s6
+; GFX8-NEXT: v_readfirstlane_b32 s7, v1
+; GFX8-NEXT: v_readlane_b32 s8, v2, s6
+; GFX8-NEXT: s_mov_b32 m0, s6
+; GFX8-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
+; GFX8-NEXT: v_writelane_b32 v0, s7, m0
+; GFX8-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX8-NEXT: v_add_f32_e32 v1, s8, v1
+; GFX8-NEXT: s_cbranch_scc1 .LBB29_1
+; GFX8-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; GFX8-NEXT: s_mov_b64 s[4:5], exec
-; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0
-; GFX8-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
+; GFX8-NEXT: v_mbcnt_hi_u32_b32 v3, exec_hi, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GFX8-NEXT: ; implicit-def: $vgpr2
+; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_add_i32 s3, s3, 4
-; GFX8-NEXT: ; implicit-def: $vgpr1
-; GFX8-NEXT: s_mov_b32 m0, -1
-; GFX8-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB29_2
-; GFX8-NEXT: ; %bb.1:
-; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX8-NEXT: s_lshl_b32 s8, s3, 3
-; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s4
-; GFX8-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
-; GFX8-NEXT: v_mov_b32_e32 v2, s8
-; GFX8-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX8-NEXT: .LBB29_2:
-; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execz .LBB29_4
+; GFX8-NEXT: ; %bb.3:
+; GFX8-NEXT: s_lshl_b32 s6, s3, 3
+; GFX8-NEXT: v_mov_b32_e32 v2, s6
+; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1
+; GFX8-NEXT: .LBB29_4:
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_mov_b64 s[6:7], exec
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_readfirstlane_b32 s8, v1
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_readfirstlane_b32 s8, v2
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB29_4
-; GFX8-NEXT: ; %bb.3:
+; GFX8-NEXT: s_cbranch_execz .LBB29_6
+; GFX8-NEXT: ; %bb.5:
; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s6
; GFX8-NEXT: s_lshl_b32 s3, s3, 4
; GFX8-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX8-NEXT: v_mov_b32_e32 v2, s3
; GFX8-NEXT: ds_add_f32 v2, v1
-; GFX8-NEXT: .LBB29_4:
+; GFX8-NEXT: .LBB29_6:
; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX8-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
; GFX8-NEXT: s_mov_b64 s[4:5], exec
; GFX8-NEXT: v_add_f32_e32 v2, s8, v0
; GFX8-NEXT: v_bfrev_b32_e32 v1, 1
; GFX8-NEXT: ; implicit-def: $vgpr0
-; GFX8-NEXT: .LBB29_5: ; %ComputeLoop
+; GFX8-NEXT: .LBB29_7: ; %ComputeLoop1
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_ff1_i32_b64 s3, s[4:5]
; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
@@ -8276,20 +8413,20 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: v_writelane_b32 v0, s8, m0
; GFX8-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX8-NEXT: v_add_f32_e32 v1, s9, v1
-; GFX8-NEXT: s_cbranch_scc1 .LBB29_5
-; GFX8-NEXT: ; %bb.6: ; %ComputeEnd
+; GFX8-NEXT: s_cbranch_scc1 .LBB29_7
+; GFX8-NEXT: ; %bb.8: ; %ComputeEnd2
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX8-NEXT: ; implicit-def: $vgpr2
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execz .LBB29_8
-; GFX8-NEXT: ; %bb.7:
+; GFX8-NEXT: s_cbranch_execz .LBB29_10
+; GFX8-NEXT: ; %bb.9:
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX8-NEXT: .LBB29_8:
+; GFX8-NEXT: .LBB29_10:
; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -8303,47 +8440,35 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX7-LABEL: local_ds_fadd_one_as:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2
-; GFX7-NEXT: s_mov_b64 s[6:7], exec
-; GFX7-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0
-; GFX7-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s7, v0
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: s_mov_b32 m0, -1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshl_b32 s4, s3, 3
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: ds_read_b32 v0, v0 offset:32
; GFX7-NEXT: s_add_i32 s3, s3, 4
-; GFX7-NEXT: ; implicit-def: $vgpr1
-; GFX7-NEXT: s_mov_b32 m0, -1
-; GFX7-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX7-NEXT: s_cbranch_execz .LBB29_4
-; GFX7-NEXT: ; %bb.1:
-; GFX7-NEXT: s_lshl_b32 s8, s3, 3
-; GFX7-NEXT: v_mov_b32_e32 v2, s8
-; GFX7-NEXT: ds_read_b32 v1, v2
-; GFX7-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
-; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v3, s6
-; GFX7-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
-; GFX7-NEXT: s_mov_b64 s[6:7], 0
-; GFX7-NEXT: .LBB29_2: ; %atomicrmw.start
+; GFX7-NEXT: s_lshl_b32 s6, s3, 3
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v4, v1
-; GFX7-NEXT: v_add_f32_e32 v1, v4, v3
-; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v2, v4, v1
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: v_add_f32_e32 v0, 0x42280000, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v0, v1, v2, v0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v4
-; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GFX7-NEXT: s_cbranch_execnz .LBB29_2
-; GFX7-NEXT: ; %bb.3: ; %Flow18
-; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: .LBB29_4: ; %Flow19
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB29_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_mov_b64 s[6:7], exec
-; GFX7-NEXT: v_readfirstlane_b32 s8, v1
; GFX7-NEXT: v_mbcnt_lo_u32_b32_e64 v1, s6, 0
; GFX7-NEXT: v_mbcnt_hi_u32_b32_e32 v1, s7, v1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX7-NEXT: s_cbranch_execz .LBB29_7
-; GFX7-NEXT: ; %bb.5:
+; GFX7-NEXT: s_cbranch_execz .LBB29_5
+; GFX7-NEXT: ; %bb.3:
; GFX7-NEXT: s_lshl_b32 s3, s3, 4
; GFX7-NEXT: v_mov_b32_e32 v1, s3
; GFX7-NEXT: ds_read_b32 v3, v1
@@ -8351,7 +8476,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v2, s3
; GFX7-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
; GFX7-NEXT: s_mov_b64 s[6:7], 0
-; GFX7-NEXT: .LBB29_6: ; %atomicrmw.start2
+; GFX7-NEXT: .LBB29_4: ; %atomicrmw.start2
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_add_f32_e32 v4, v3, v2
@@ -8361,16 +8486,13 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GFX7-NEXT: s_cbranch_execnz .LBB29_6
-; GFX7-NEXT: .LBB29_7: ; %Flow17
+; GFX7-NEXT: s_cbranch_execnz .LBB29_4
+; GFX7-NEXT: .LBB29_5: ; %Flow17
; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: v_mov_b32_e32 v2, s2
; GFX7-NEXT: ds_read_b32 v1, v2
-; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX7-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
-; GFX7-NEXT: v_add_f32_e32 v0, s8, v0
; GFX7-NEXT: s_mov_b64 s[2:3], 0
-; GFX7-NEXT: .LBB29_8: ; %atomicrmw.start8
+; GFX7-NEXT: .LBB29_6: ; %atomicrmw.start8
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v3, v1
@@ -8380,8 +8502,8 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
; GFX7-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX7-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX7-NEXT: s_cbranch_execnz .LBB29_8
-; GFX7-NEXT: ; %bb.9: ; %atomicrmw.end7
+; GFX7-NEXT: s_cbranch_execnz .LBB29_6
+; GFX7-NEXT: ; %bb.7: ; %atomicrmw.end7
; GFX7-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX7-NEXT: s_mov_b32 s3, 0xf000
@@ -8393,47 +8515,36 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX6-LABEL: local_ds_fadd_one_as:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2
-; GFX6-NEXT: s_mov_b64 s[6:7], exec
-; GFX6-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0
-; GFX6-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s7, v0
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX6-NEXT: s_mov_b32 m0, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_lshl_b32 s4, s3, 3
+; GFX6-NEXT: s_add_i32 s4, s4, 32
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: ds_read_b32 v0, v0
; GFX6-NEXT: s_add_i32 s3, s3, 4
-; GFX6-NEXT: ; implicit-def: $vgpr1
-; GFX6-NEXT: s_mov_b32 m0, -1
-; GFX6-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX6-NEXT: s_cbranch_execz .LBB29_4
-; GFX6-NEXT: ; %bb.1:
-; GFX6-NEXT: s_lshl_b32 s8, s3, 3
-; GFX6-NEXT: v_mov_b32_e32 v2, s8
-; GFX6-NEXT: ds_read_b32 v1, v2
-; GFX6-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
-; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, s6
-; GFX6-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
-; GFX6-NEXT: s_mov_b64 s[6:7], 0
-; GFX6-NEXT: .LBB29_2: ; %atomicrmw.start
+; GFX6-NEXT: s_lshl_b32 s6, s3, 3
+; GFX6-NEXT: s_mov_b64 s[4:5], 0
+; GFX6-NEXT: v_mov_b32_e32 v1, s6
+; GFX6-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v4, v1
-; GFX6-NEXT: v_add_f32_e32 v1, v4, v3
-; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v2, v4, v1
+; GFX6-NEXT: v_mov_b32_e32 v2, v0
+; GFX6-NEXT: v_add_f32_e32 v0, 0x42280000, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v0, v1, v2, v0
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v4
-; GFX6-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX6-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GFX6-NEXT: s_cbranch_execnz .LBB29_2
-; GFX6-NEXT: ; %bb.3: ; %Flow16
-; GFX6-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX6-NEXT: .LBB29_4: ; %Flow17
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
+; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX6-NEXT: s_cbranch_execnz .LBB29_1
+; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX6-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_mov_b64 s[6:7], exec
-; GFX6-NEXT: v_readfirstlane_b32 s8, v1
; GFX6-NEXT: v_mbcnt_lo_u32_b32_e64 v1, s6, 0
; GFX6-NEXT: v_mbcnt_hi_u32_b32_e32 v1, s7, v1
; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
; GFX6-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX6-NEXT: s_cbranch_execz .LBB29_7
-; GFX6-NEXT: ; %bb.5:
+; GFX6-NEXT: s_cbranch_execz .LBB29_5
+; GFX6-NEXT: ; %bb.3:
; GFX6-NEXT: s_lshl_b32 s3, s3, 4
; GFX6-NEXT: v_mov_b32_e32 v1, s3
; GFX6-NEXT: ds_read_b32 v3, v1
@@ -8441,7 +8552,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v2, s3
; GFX6-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
; GFX6-NEXT: s_mov_b64 s[6:7], 0
-; GFX6-NEXT: .LBB29_6: ; %atomicrmw.start2
+; GFX6-NEXT: .LBB29_4: ; %atomicrmw.start2
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: v_add_f32_e32 v4, v3, v2
@@ -8451,16 +8562,13 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX6-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GFX6-NEXT: s_cbranch_execnz .LBB29_6
-; GFX6-NEXT: .LBB29_7: ; %Flow15
+; GFX6-NEXT: s_cbranch_execnz .LBB29_4
+; GFX6-NEXT: .LBB29_5: ; %Flow15
; GFX6-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX6-NEXT: v_mov_b32_e32 v2, s2
; GFX6-NEXT: ds_read_b32 v1, v2
-; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
-; GFX6-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
-; GFX6-NEXT: v_add_f32_e32 v0, s8, v0
; GFX6-NEXT: s_mov_b64 s[2:3], 0
-; GFX6-NEXT: .LBB29_8: ; %atomicrmw.start8
+; GFX6-NEXT: .LBB29_6: ; %atomicrmw.start8
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v3, v1
@@ -8470,8 +8578,8 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
; GFX6-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX6-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX6-NEXT: s_cbranch_execnz .LBB29_8
-; GFX6-NEXT: ; %bb.9: ; %atomicrmw.end7
+; GFX6-NEXT: s_cbranch_execnz .LBB29_6
+; GFX6-NEXT: ; %bb.7: ; %atomicrmw.end7
; GFX6-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
>From 1eec81a831dcf925c0c86c153e8dce019a5b5436 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 3 Jul 2024 12:41:49 +0200
Subject: [PATCH 102/246] [CVP][LVI] Add support for vectors (#97428)
The core change here is to add support for converting vector constants
into constant ranges. The rest is just relaxing isIntegerTy() checks and
making sure we don't use APIs that assume vectors.
There are a couple of places that don't support vectors yet, most
notably the "simplest" fold (comparisons to a constant) isn't supported
yet. I'll leave these to a followup.
---
llvm/lib/Analysis/LazyValueInfo.cpp | 32 +++++++++--
.../Scalar/CorrelatedValuePropagation.cpp | 53 ++++-------------
.../CorrelatedValuePropagation/icmp.ll | 4 +-
.../CorrelatedValuePropagation/vectors.ll | 57 ++++++++++++++-----
4 files changed, 83 insertions(+), 63 deletions(-)
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index d8b03eaa3928f..e9051e74b4577 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -650,7 +650,7 @@ LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
if (PT && isKnownNonZero(BBI, DL))
return ValueLatticeElement::getNot(ConstantPointerNull::get(PT));
- if (BBI->getType()->isIntegerTy()) {
+ if (BBI->getType()->isIntOrIntVectorTy()) {
if (auto *CI = dyn_cast<CastInst>(BBI))
return solveBlockValueCast(CI, BB);
@@ -836,6 +836,24 @@ void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
}
}
+static ConstantRange getConstantRangeFromFixedVector(Constant *C,
+ FixedVectorType *Ty) {
+ unsigned BW = Ty->getScalarSizeInBits();
+ ConstantRange CR = ConstantRange::getEmpty(BW);
+ for (unsigned I = 0; I < Ty->getNumElements(); ++I) {
+ Constant *Elem = C->getAggregateElement(I);
+ if (!Elem)
+ return ConstantRange::getFull(BW);
+ if (isa<PoisonValue>(Elem))
+ continue;
+ auto *CI = dyn_cast<ConstantInt>(Elem);
+ if (!CI)
+ return ConstantRange::getFull(BW);
+ CR = CR.unionWith(CI->getValue());
+ }
+ return CR;
+}
+
static ConstantRange toConstantRange(const ValueLatticeElement &Val,
Type *Ty, bool UndefAllowed = false) {
assert(Ty->isIntOrIntVectorTy() && "Must be integer type");
@@ -844,6 +862,13 @@ static ConstantRange toConstantRange(const ValueLatticeElement &Val,
unsigned BW = Ty->getScalarSizeInBits();
if (Val.isUnknown())
return ConstantRange::getEmpty(BW);
+ if (Val.isConstant() && Ty->isVectorTy()) {
+ if (auto *CI = dyn_cast_or_null<ConstantInt>(
+ Val.getConstant()->getSplatValue(/*AllowPoison=*/true)))
+ return ConstantRange(CI->getValue());
+ if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
+ return getConstantRangeFromFixedVector(Val.getConstant(), VTy);
+ }
return ConstantRange::getFull(BW);
}
@@ -968,7 +993,7 @@ LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
return std::nullopt;
const ConstantRange &LHSRange = *LHSRes;
- const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
+ const unsigned ResultBitWidth = CI->getType()->getScalarSizeInBits();
// NOTE: We're currently limited by the set of operations that ConstantRange
// can evaluate symbolically. Enhancing that set will allows us to analyze
@@ -1108,7 +1133,7 @@ LazyValueInfoImpl::getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,
const APInt &Offset,
Instruction *CxtI,
bool UseBlockValue) {
- ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(),
+ ConstantRange RHSRange(RHS->getType()->getScalarSizeInBits(),
/*isFullSet=*/true);
if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
RHSRange = ConstantRange(CI->getValue());
@@ -1728,7 +1753,6 @@ Constant *LazyValueInfo::getConstant(Value *V, Instruction *CxtI) {
ConstantRange LazyValueInfo::getConstantRange(Value *V, Instruction *CxtI,
bool UndefAllowed) {
- assert(V->getType()->isIntegerTy());
BasicBlock *BB = CxtI->getParent();
ValueLatticeElement Result =
getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 875d3ea78fae5..34304c2245e30 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -288,9 +288,8 @@ static bool processPHI(PHINode *P, LazyValueInfo *LVI, DominatorTree *DT,
}
static bool processICmp(ICmpInst *Cmp, LazyValueInfo *LVI) {
- // Only for signed relational comparisons of scalar integers.
- if (Cmp->getType()->isVectorTy() ||
- !Cmp->getOperand(0)->getType()->isIntegerTy())
+ // Only for signed relational comparisons of integers.
+ if (!Cmp->getOperand(0)->getType()->isIntOrIntVectorTy())
return false;
if (!Cmp->isSigned())
@@ -505,12 +504,8 @@ static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI);
// because it is negation-invariant.
static bool processAbsIntrinsic(IntrinsicInst *II, LazyValueInfo *LVI) {
Value *X = II->getArgOperand(0);
- Type *Ty = X->getType();
- if (!Ty->isIntegerTy())
- return false;
-
bool IsIntMinPoison = cast<ConstantInt>(II->getArgOperand(1))->isOne();
- APInt IntMin = APInt::getSignedMinValue(Ty->getScalarSizeInBits());
+ APInt IntMin = APInt::getSignedMinValue(X->getType()->getScalarSizeInBits());
ConstantRange Range = LVI->getConstantRangeAtUse(
II->getOperandUse(0), /*UndefAllowed*/ IsIntMinPoison);
@@ -679,15 +674,13 @@ static bool processCallSite(CallBase &CB, LazyValueInfo *LVI) {
}
if (auto *WO = dyn_cast<WithOverflowInst>(&CB)) {
- if (WO->getLHS()->getType()->isIntegerTy() && willNotOverflow(WO, LVI)) {
+ if (willNotOverflow(WO, LVI))
return processOverflowIntrinsic(WO, LVI);
- }
}
if (auto *SI = dyn_cast<SaturatingInst>(&CB)) {
- if (SI->getType()->isIntegerTy() && willNotOverflow(SI, LVI)) {
+ if (willNotOverflow(SI, LVI))
return processSaturatingInst(SI, LVI);
- }
}
bool Changed = false;
@@ -761,11 +754,10 @@ static bool narrowSDivOrSRem(BinaryOperator *Instr, const ConstantRange &LCR,
const ConstantRange &RCR) {
assert(Instr->getOpcode() == Instruction::SDiv ||
Instr->getOpcode() == Instruction::SRem);
- assert(!Instr->getType()->isVectorTy());
// Find the smallest power of two bitwidth that's sufficient to hold Instr's
// operands.
- unsigned OrigWidth = Instr->getType()->getIntegerBitWidth();
+ unsigned OrigWidth = Instr->getType()->getScalarSizeInBits();
// What is the smallest bit width that can accommodate the entire value ranges
// of both of the operands?
@@ -788,7 +780,7 @@ static bool narrowSDivOrSRem(BinaryOperator *Instr, const ConstantRange &LCR,
++NumSDivSRemsNarrowed;
IRBuilder<> B{Instr};
- auto *TruncTy = Type::getIntNTy(Instr->getContext(), NewWidth);
+ auto *TruncTy = Instr->getType()->getWithNewBitWidth(NewWidth);
auto *LHS = B.CreateTruncOrBitCast(Instr->getOperand(0), TruncTy,
Instr->getName() + ".lhs.trunc");
auto *RHS = B.CreateTruncOrBitCast(Instr->getOperand(1), TruncTy,
@@ -809,7 +801,6 @@ static bool expandUDivOrURem(BinaryOperator *Instr, const ConstantRange &XCR,
Type *Ty = Instr->getType();
assert(Instr->getOpcode() == Instruction::UDiv ||
Instr->getOpcode() == Instruction::URem);
- assert(!Ty->isVectorTy());
bool IsRem = Instr->getOpcode() == Instruction::URem;
Value *X = Instr->getOperand(0);
@@ -892,7 +883,6 @@ static bool narrowUDivOrURem(BinaryOperator *Instr, const ConstantRange &XCR,
const ConstantRange &YCR) {
assert(Instr->getOpcode() == Instruction::UDiv ||
Instr->getOpcode() == Instruction::URem);
- assert(!Instr->getType()->isVectorTy());
// Find the smallest power of two bitwidth that's sufficient to hold Instr's
// operands.
@@ -905,12 +895,12 @@ static bool narrowUDivOrURem(BinaryOperator *Instr, const ConstantRange &XCR,
// NewWidth might be greater than OrigWidth if OrigWidth is not a power of
// two.
- if (NewWidth >= Instr->getType()->getIntegerBitWidth())
+ if (NewWidth >= Instr->getType()->getScalarSizeInBits())
return false;
++NumUDivURemsNarrowed;
IRBuilder<> B{Instr};
- auto *TruncTy = Type::getIntNTy(Instr->getContext(), NewWidth);
+ auto *TruncTy = Instr->getType()->getWithNewBitWidth(NewWidth);
auto *LHS = B.CreateTruncOrBitCast(Instr->getOperand(0), TruncTy,
Instr->getName() + ".lhs.trunc");
auto *RHS = B.CreateTruncOrBitCast(Instr->getOperand(1), TruncTy,
@@ -929,9 +919,6 @@ static bool narrowUDivOrURem(BinaryOperator *Instr, const ConstantRange &XCR,
static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
assert(Instr->getOpcode() == Instruction::UDiv ||
Instr->getOpcode() == Instruction::URem);
- if (Instr->getType()->isVectorTy())
- return false;
-
ConstantRange XCR = LVI->getConstantRangeAtUse(Instr->getOperandUse(0),
/*UndefAllowed*/ false);
// Allow undef for RHS, as we can assume it is division by zero UB.
@@ -946,7 +933,6 @@ static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
static bool processSRem(BinaryOperator *SDI, const ConstantRange &LCR,
const ConstantRange &RCR, LazyValueInfo *LVI) {
assert(SDI->getOpcode() == Instruction::SRem);
- assert(!SDI->getType()->isVectorTy());
if (LCR.abs().icmp(CmpInst::ICMP_ULT, RCR.abs())) {
SDI->replaceAllUsesWith(SDI->getOperand(0));
@@ -1006,7 +992,6 @@ static bool processSRem(BinaryOperator *SDI, const ConstantRange &LCR,
static bool processSDiv(BinaryOperator *SDI, const ConstantRange &LCR,
const ConstantRange &RCR, LazyValueInfo *LVI) {
assert(SDI->getOpcode() == Instruction::SDiv);
- assert(!SDI->getType()->isVectorTy());
// Check whether the division folds to a constant.
ConstantRange DivCR = LCR.sdiv(RCR);
@@ -1064,9 +1049,6 @@ static bool processSDiv(BinaryOperator *SDI, const ConstantRange &LCR,
static bool processSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
assert(Instr->getOpcode() == Instruction::SDiv ||
Instr->getOpcode() == Instruction::SRem);
- if (Instr->getType()->isVectorTy())
- return false;
-
ConstantRange LCR =
LVI->getConstantRangeAtUse(Instr->getOperandUse(0), /*AllowUndef*/ false);
// Allow undef for RHS, as we can assume it is division by zero UB.
@@ -1085,12 +1067,9 @@ static bool processSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
}
static bool processAShr(BinaryOperator *SDI, LazyValueInfo *LVI) {
- if (SDI->getType()->isVectorTy())
- return false;
-
ConstantRange LRange =
LVI->getConstantRangeAtUse(SDI->getOperandUse(0), /*UndefAllowed*/ false);
- unsigned OrigWidth = SDI->getType()->getIntegerBitWidth();
+ unsigned OrigWidth = SDI->getType()->getScalarSizeInBits();
ConstantRange NegOneOrZero =
ConstantRange(APInt(OrigWidth, (uint64_t)-1, true), APInt(OrigWidth, 1));
if (NegOneOrZero.contains(LRange)) {
@@ -1117,9 +1096,6 @@ static bool processAShr(BinaryOperator *SDI, LazyValueInfo *LVI) {
}
static bool processSExt(SExtInst *SDI, LazyValueInfo *LVI) {
- if (SDI->getType()->isVectorTy())
- return false;
-
const Use &Base = SDI->getOperandUse(0);
if (!LVI->getConstantRangeAtUse(Base, /*UndefAllowed*/ false)
.isAllNonNegative())
@@ -1138,9 +1114,6 @@ static bool processSExt(SExtInst *SDI, LazyValueInfo *LVI) {
}
static bool processPossibleNonNeg(PossiblyNonNegInst *I, LazyValueInfo *LVI) {
- if (I->getType()->isVectorTy())
- return false;
-
if (I->hasNonNeg())
return false;
@@ -1164,9 +1137,6 @@ static bool processUIToFP(UIToFPInst *UIToFP, LazyValueInfo *LVI) {
}
static bool processSIToFP(SIToFPInst *SIToFP, LazyValueInfo *LVI) {
- if (SIToFP->getType()->isVectorTy())
- return false;
-
const Use &Base = SIToFP->getOperandUse(0);
if (!LVI->getConstantRangeAtUse(Base, /*UndefAllowed*/ false)
.isAllNonNegative())
@@ -1187,9 +1157,6 @@ static bool processSIToFP(SIToFPInst *SIToFP, LazyValueInfo *LVI) {
static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI) {
using OBO = OverflowingBinaryOperator;
- if (BinOp->getType()->isVectorTy())
- return false;
-
bool NSW = BinOp->hasNoSignedWrap();
bool NUW = BinOp->hasNoUnsignedWrap();
if (NSW && NUW)
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll b/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll
index ca70713440219..200793918f0ef 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll
@@ -1246,13 +1246,11 @@ define i1 @non_const_range_minmax(i8 %a, i8 %b) {
ret i1 %cmp1
}
-; FIXME: Also support vectors.
define <2 x i1> @non_const_range_minmax_vec(<2 x i8> %a, <2 x i8> %b) {
; CHECK-LABEL: @non_const_range_minmax_vec(
; CHECK-NEXT: [[A2:%.*]] = call <2 x i8> @llvm.umin.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 10>)
; CHECK-NEXT: [[B2:%.*]] = call <2 x i8> @llvm.umax.v2i8(<2 x i8> [[B:%.*]], <2 x i8> <i8 11, i8 11>)
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ult <2 x i8> [[A2]], [[B2]]
-; CHECK-NEXT: ret <2 x i1> [[CMP1]]
+; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
;
%a2 = call <2 x i8> @llvm.umin.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
%b2 = call <2 x i8> @llvm.umax.v2i8(<2 x i8> %b, <2 x i8> <i8 11, i8 11>)
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll b/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll
index 9862dd56e31b2..a06fa2c106609 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S -passes=correlated-propagation < %s | FileCheck %s
+; TODO: Add support for this.
define <2 x i1> @cmp1(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i1> @cmp1(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
@@ -13,6 +14,7 @@ define <2 x i1> @cmp1(<2 x i8> %a) {
ret <2 x i1> %cmp
}
+; TODO: Add support for this.
define <2 x i1> @cmp2(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i1> @cmp2(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
@@ -29,7 +31,7 @@ define <2 x i1> @cmp_signedness(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i1> @cmp_signedness(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i16> [[ZEXT]], <i16 5, i16 5>
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i16> [[ZEXT]], <i16 5, i16 5>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%zext = zext <2 x i8> %a to <2 x i16>
@@ -41,7 +43,7 @@ define <2 x i16> @infer_nowrap(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i16> @infer_nowrap(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = add <2 x i16> [[ZEXT]], <i16 1, i16 1>
+; CHECK-NEXT: [[RES:%.*]] = add nuw nsw <2 x i16> [[ZEXT]], <i16 1, i16 1>
; CHECK-NEXT: ret <2 x i16> [[RES]]
;
%zext = zext <2 x i8> %a to <2 x i16>
@@ -53,7 +55,7 @@ define <2 x i16> @infer_nowrap_nonsplat(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i16> @infer_nowrap_nonsplat(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = add <2 x i16> [[ZEXT]], <i16 1, i16 2>
+; CHECK-NEXT: [[RES:%.*]] = add nuw nsw <2 x i16> [[ZEXT]], <i16 1, i16 2>
; CHECK-NEXT: ret <2 x i16> [[RES]]
;
%zext = zext <2 x i8> %a to <2 x i16>
@@ -61,11 +63,35 @@ define <2 x i16> @infer_nowrap_nonsplat(<2 x i8> %a) {
ret <2 x i16> %res
}
+define <vscale x 2 x i16> @infer_nowrap_scalable(<vscale x 2 x i8> %a) {
+; CHECK-LABEL: define <vscale x 2 x i16> @infer_nowrap_scalable(
+; CHECK-SAME: <vscale x 2 x i8> [[A:%.*]]) {
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <vscale x 2 x i8> [[A]] to <vscale x 2 x i16>
+; CHECK-NEXT: [[RES:%.*]] = add nuw nsw <vscale x 2 x i16> [[ZEXT]], shufflevector (<vscale x 2 x i16> insertelement (<vscale x 2 x i16> poison, i16 1, i64 0), <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 2 x i16> [[RES]]
+;
+ %zext = zext <vscale x 2 x i8> %a to <vscale x 2 x i16>
+ %res = add <vscale x 2 x i16> %zext, splat (i16 1)
+ ret <vscale x 2 x i16> %res
+}
+
+define <2 x i16> @infer_nowrap_poison(<2 x i8> %a) {
+; CHECK-LABEL: define <2 x i16> @infer_nowrap_poison(
+; CHECK-SAME: <2 x i8> [[A:%.*]]) {
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
+; CHECK-NEXT: [[RES:%.*]] = add nuw nsw <2 x i16> [[ZEXT]], <i16 1, i16 poison>
+; CHECK-NEXT: ret <2 x i16> [[RES]]
+;
+ %zext = zext <2 x i8> %a to <2 x i16>
+ %res = add <2 x i16> %zext, <i16 1, i16 poison>
+ ret <2 x i16> %res
+}
+
define <2 x i16> @infer_nowrap_nonsplat_nsw_only(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i16> @infer_nowrap_nonsplat_nsw_only(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = add <2 x i16> [[ZEXT]], <i16 1, i16 -1>
+; CHECK-NEXT: [[RES:%.*]] = add nsw <2 x i16> [[ZEXT]], <i16 1, i16 -1>
; CHECK-NEXT: ret <2 x i16> [[RES]]
;
%zext = zext <2 x i8> %a to <2 x i16>
@@ -77,8 +103,7 @@ define <2 x i16> @abs(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i16> @abs(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = call <2 x i16> @llvm.abs.v2i16(<2 x i16> [[ZEXT]], i1 false)
-; CHECK-NEXT: ret <2 x i16> [[RES]]
+; CHECK-NEXT: ret <2 x i16> [[ZEXT]]
;
%zext = zext <2 x i8> %a to <2 x i16>
%res = call <2 x i16> @llvm.abs(<2 x i16> %zext, i1 false)
@@ -89,7 +114,7 @@ define <2 x i16> @saturating(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i16> @saturating(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ZEXT]], <2 x i16> <i16 1, i16 1>)
+; CHECK-NEXT: [[RES:%.*]] = add nuw nsw <2 x i16> [[ZEXT]], <i16 1, i16 1>
; CHECK-NEXT: ret <2 x i16> [[RES]]
;
%zext = zext <2 x i8> %a to <2 x i16>
@@ -101,7 +126,8 @@ define {<2 x i16>, <2 x i1>} @with_overflow(<2 x i8> %a) {
; CHECK-LABEL: define { <2 x i16>, <2 x i1> } @with_overflow(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = call { <2 x i16>, <2 x i1> } @llvm.uadd.with.overflow.v2i16(<2 x i16> [[ZEXT]], <2 x i16> <i16 1, i16 1>)
+; CHECK-NEXT: [[RES1:%.*]] = add nuw nsw <2 x i16> [[ZEXT]], <i16 1, i16 1>
+; CHECK-NEXT: [[RES:%.*]] = insertvalue { <2 x i16>, <2 x i1> } { <2 x i16> poison, <2 x i1> zeroinitializer }, <2 x i16> [[RES1]], 0
; CHECK-NEXT: ret { <2 x i16>, <2 x i1> } [[RES]]
;
%zext = zext <2 x i8> %a to <2 x i16>
@@ -113,7 +139,9 @@ define <2 x i16> @srem1(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i16> @srem1(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = srem <2 x i16> [[ZEXT]], <i16 42, i16 42>
+; CHECK-NEXT: [[RES1_LHS_TRUNC:%.*]] = trunc <2 x i16> [[ZEXT]] to <2 x i8>
+; CHECK-NEXT: [[RES12:%.*]] = urem <2 x i8> [[RES1_LHS_TRUNC]], <i8 42, i8 42>
+; CHECK-NEXT: [[RES:%.*]] = zext <2 x i8> [[RES12]] to <2 x i16>
; CHECK-NEXT: ret <2 x i16> [[RES]]
;
%zext = zext <2 x i8> %a to <2 x i16>
@@ -125,7 +153,9 @@ define <2 x i16> @srem2(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i16> @srem2(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = sext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = srem <2 x i16> [[ZEXT]], <i16 42, i16 42>
+; CHECK-NEXT: [[RES_LHS_TRUNC:%.*]] = trunc <2 x i16> [[ZEXT]] to <2 x i8>
+; CHECK-NEXT: [[RES1:%.*]] = srem <2 x i8> [[RES_LHS_TRUNC]], <i8 42, i8 42>
+; CHECK-NEXT: [[RES:%.*]] = sext <2 x i8> [[RES1]] to <2 x i16>
; CHECK-NEXT: ret <2 x i16> [[RES]]
;
%zext = sext <2 x i8> %a to <2 x i16>
@@ -137,7 +167,7 @@ define <2 x i16> @ashr(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i16> @ashr(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = ashr <2 x i16> [[ZEXT]], <i16 1, i16 1>
+; CHECK-NEXT: [[RES:%.*]] = lshr <2 x i16> [[ZEXT]], <i16 1, i16 1>
; CHECK-NEXT: ret <2 x i16> [[RES]]
;
%zext = zext <2 x i8> %a to <2 x i16>
@@ -149,7 +179,7 @@ define <2 x i32> @sext(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i32> @sext(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = sext <2 x i16> [[ZEXT]] to <2 x i32>
+; CHECK-NEXT: [[RES:%.*]] = zext nneg <2 x i16> [[ZEXT]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[RES]]
;
%zext = zext <2 x i8> %a to <2 x i16>
@@ -161,7 +191,7 @@ define <2 x float> @sitofp(<2 x i8> %a) {
; CHECK-LABEL: define <2 x float> @sitofp(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = sitofp <2 x i16> [[ZEXT]] to <2 x float>
+; CHECK-NEXT: [[RES:%.*]] = uitofp nneg <2 x i16> [[ZEXT]] to <2 x float>
; CHECK-NEXT: ret <2 x float> [[RES]]
;
%zext = zext <2 x i8> %a to <2 x i16>
@@ -169,6 +199,7 @@ define <2 x float> @sitofp(<2 x i8> %a) {
ret <2 x float> %res
}
+; TODO: Add support for this.
define <2 x i16> @and(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i16> @and(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
>From a6d289da0cce99b28658f264b7f9496e3648232e Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 3 Jul 2024 11:47:48 +0100
Subject: [PATCH 103/246] [X86] Add checkSignTestSetCCCombine - if
X86ISD::CMP/OR is testing for signbits, attempt to test for the signbit
source instead. (#97433)
There's a lot more we could do here (including the reverse fold back to X86::COND_S/NS with some other X86ISD nodes), but I wanted to address the MOVMSK issue initially.
Fixes #66191
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 59 ++++++++++++++
llvm/test/CodeGen/X86/is_fpclass-fp80.ll | 43 +++++------
llvm/test/CodeGen/X86/movmsk-bittest.ll | 98 +++++++++---------------
3 files changed, 117 insertions(+), 83 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 8eadf079d4f2f..de26ce2853c5b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -46433,6 +46433,62 @@ static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
return LockOp;
}
+// Check whether we're just testing the signbit, and whether we can simplify
+// this by tracking where the signbit came from.
+static SDValue checkSignTestSetCCCombine(SDValue Cmp, X86::CondCode &CC,
+ SelectionDAG &DAG) {
+ if (CC != X86::COND_S && CC != X86::COND_NS)
+ return SDValue();
+
+ if (!Cmp.hasOneUse())
+ return SDValue();
+
+ SDValue Src;
+ if (Cmp.getOpcode() == X86ISD::CMP) {
+ // CMP(X,0) -> signbit test
+ if (!isNullConstant(Cmp.getOperand(1)))
+ return SDValue();
+ Src = Cmp.getOperand(0);
+ // Peek through a SRA node as we just need the signbit.
+ // TODO: Remove one use limit once sdiv-fix regressions are fixed.
+ // TODO: Use SimplifyDemandedBits instead of just SRA?
+ if (Src.getOpcode() != ISD::SRA || !Src.hasOneUse())
+ return SDValue();
+ Src = Src.getOperand(0);
+ } else if (Cmp.getOpcode() == X86ISD::OR) {
+ // OR(X,Y) -> see if only one operand contributes to the signbit.
+ // TODO: XOR(X,Y) -> see if only one operand contributes to the signbit.
+ if (DAG.SignBitIsZero(Cmp.getOperand(0)))
+ Src = Cmp.getOperand(1);
+ else if (DAG.SignBitIsZero(Cmp.getOperand(1)))
+ Src = Cmp.getOperand(0);
+ else
+ return SDValue();
+ } else {
+ return SDValue();
+ }
+
+ // Replace with a TEST on the MSB.
+ SDLoc DL(Cmp);
+ MVT SrcVT = Src.getSimpleValueType();
+ APInt BitMask = APInt::getSignMask(SrcVT.getScalarSizeInBits());
+
+ // If Src came from a SHL (probably from an expanded SIGN_EXTEND_INREG), then
+ // peek through and adjust the TEST bit.
+ if (Src.getOpcode() == ISD::SHL) {
+ if (std::optional<uint64_t> ShiftAmt = DAG.getValidShiftAmount(Src)) {
+ Src = Src.getOperand(0);
+ BitMask.lshrInPlace(*ShiftAmt);
+ }
+ }
+
+ SDValue Mask = DAG.getNode(ISD::AND, DL, SrcVT, Src,
+ DAG.getConstant(BitMask, DL, SrcVT));
+ CC = CC == X86::COND_S ? X86::COND_NE : X86::COND_E;
+ return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Mask,
+ DAG.getConstant(0, DL, SrcVT));
+}
+
// Check whether a boolean test is testing a boolean value generated by
// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
// code.
@@ -47072,6 +47128,9 @@ static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
return Flags;
+ if (SDValue R = checkSignTestSetCCCombine(EFLAGS, CC, DAG))
+ return R;
+
if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
return R;
diff --git a/llvm/test/CodeGen/X86/is_fpclass-fp80.ll b/llvm/test/CodeGen/X86/is_fpclass-fp80.ll
index ec2323ac2250c..7d0c5838c1554 100644
--- a/llvm/test/CodeGen/X86/is_fpclass-fp80.ll
+++ b/llvm/test/CodeGen/X86/is_fpclass-fp80.ll
@@ -363,10 +363,9 @@ define i1 @is_posnormal_f80(x86_fp80 %x) {
; CHECK-32-NEXT: pushl %esi
; CHECK-32-NEXT: .cfi_def_cfa_offset 8
; CHECK-32-NEXT: .cfi_offset %esi, -8
-; CHECK-32-NEXT: movzwl {{[0-9]+}}(%esp), %edx
-; CHECK-32-NEXT: movswl %dx, %ecx
-; CHECK-32-NEXT: sarl $15, %ecx
; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT: movl %ecx, %edx
; CHECK-32-NEXT: andl $32767, %edx # imm = 0x7FFF
; CHECK-32-NEXT: decl %edx
; CHECK-32-NEXT: movzwl %dx, %edx
@@ -374,8 +373,8 @@ define i1 @is_posnormal_f80(x86_fp80 %x) {
; CHECK-32-NEXT: cmpl $32766, %edx # imm = 0x7FFE
; CHECK-32-NEXT: sbbl %esi, %esi
; CHECK-32-NEXT: setb %dl
-; CHECK-32-NEXT: testl %ecx, %ecx
-; CHECK-32-NEXT: setns %cl
+; CHECK-32-NEXT: testl $32768, %ecx # imm = 0x8000
+; CHECK-32-NEXT: sete %cl
; CHECK-32-NEXT: shrl $31, %eax
; CHECK-32-NEXT: andb %cl, %al
; CHECK-32-NEXT: andb %dl, %al
@@ -411,10 +410,9 @@ define i1 @is_negnormal_f80(x86_fp80 %x) {
; CHECK-32-NEXT: pushl %esi
; CHECK-32-NEXT: .cfi_def_cfa_offset 8
; CHECK-32-NEXT: .cfi_offset %esi, -8
-; CHECK-32-NEXT: movzwl {{[0-9]+}}(%esp), %edx
-; CHECK-32-NEXT: movswl %dx, %ecx
-; CHECK-32-NEXT: sarl $15, %ecx
; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT: movl %ecx, %edx
; CHECK-32-NEXT: andl $32767, %edx # imm = 0x7FFF
; CHECK-32-NEXT: decl %edx
; CHECK-32-NEXT: movzwl %dx, %edx
@@ -422,8 +420,8 @@ define i1 @is_negnormal_f80(x86_fp80 %x) {
; CHECK-32-NEXT: cmpl $32766, %edx # imm = 0x7FFE
; CHECK-32-NEXT: sbbl %esi, %esi
; CHECK-32-NEXT: setb %dl
-; CHECK-32-NEXT: testl %ecx, %ecx
-; CHECK-32-NEXT: sets %cl
+; CHECK-32-NEXT: testl $32768, %ecx # imm = 0x8000
+; CHECK-32-NEXT: setne %cl
; CHECK-32-NEXT: shrl $31, %eax
; CHECK-32-NEXT: andb %cl, %al
; CHECK-32-NEXT: andb %dl, %al
@@ -543,24 +541,23 @@ define i1 @is_negsubnormal_f80(x86_fp80 %x) {
; CHECK-32-NEXT: .cfi_def_cfa_offset 12
; CHECK-32-NEXT: .cfi_offset %esi, -12
; CHECK-32-NEXT: .cfi_offset %edi, -8
-; CHECK-32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
-; CHECK-32-NEXT: movswl %cx, %eax
-; CHECK-32-NEXT: sarl $15, %eax
-; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %esi
; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT: movl %eax, %ecx
; CHECK-32-NEXT: andl $32767, %ecx # imm = 0x7FFF
-; CHECK-32-NEXT: xorl %edx, %edx
-; CHECK-32-NEXT: addl $-1, %esi
-; CHECK-32-NEXT: adcl $-1, %edi
-; CHECK-32-NEXT: adcl $-1, %ecx
+; CHECK-32-NEXT: xorl %esi, %esi
+; CHECK-32-NEXT: addl $-1, %edi
; CHECK-32-NEXT: adcl $-1, %edx
-; CHECK-32-NEXT: cmpl $-1, %esi
-; CHECK-32-NEXT: sbbl $2147483647, %edi # imm = 0x7FFFFFFF
+; CHECK-32-NEXT: adcl $-1, %ecx
+; CHECK-32-NEXT: adcl $-1, %esi
+; CHECK-32-NEXT: cmpl $-1, %edi
+; CHECK-32-NEXT: sbbl $2147483647, %edx # imm = 0x7FFFFFFF
; CHECK-32-NEXT: sbbl $0, %ecx
-; CHECK-32-NEXT: sbbl $0, %edx
+; CHECK-32-NEXT: sbbl $0, %esi
; CHECK-32-NEXT: setb %cl
-; CHECK-32-NEXT: testl %eax, %eax
-; CHECK-32-NEXT: sets %al
+; CHECK-32-NEXT: testl $32768, %eax # imm = 0x8000
+; CHECK-32-NEXT: setne %al
; CHECK-32-NEXT: andb %cl, %al
; CHECK-32-NEXT: popl %esi
; CHECK-32-NEXT: .cfi_def_cfa_offset 8
diff --git a/llvm/test/CodeGen/X86/movmsk-bittest.ll b/llvm/test/CodeGen/X86/movmsk-bittest.ll
index 7c8fe03ff4741..b67e70e71c3d5 100644
--- a/llvm/test/CodeGen/X86/movmsk-bittest.ll
+++ b/llvm/test/CodeGen/X86/movmsk-bittest.ll
@@ -37,18 +37,16 @@ define i32 @movmsk_slt_v2i64_1(<2 x i64> %v, i32 %a, i32 %b) {
; SSE: # %bb.0:
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: movmskpd %xmm0, %ecx
-; SSE-NEXT: shlb $6, %cl
-; SSE-NEXT: sarb $6, %cl
-; SSE-NEXT: cmovnsl %esi, %eax
+; SSE-NEXT: testb $2, %cl
+; SSE-NEXT: cmovel %esi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: movmsk_slt_v2i64_1:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vmovmskpd %xmm0, %ecx
-; AVX-NEXT: shlb $6, %cl
-; AVX-NEXT: sarb $6, %cl
-; AVX-NEXT: cmovnsl %esi, %eax
+; AVX-NEXT: testb $2, %cl
+; AVX-NEXT: cmovel %esi, %eax
; AVX-NEXT: retq
%cmp = icmp slt <2 x i64> %v, zeroinitializer
%msk = bitcast <2 x i1> %cmp to i2
@@ -62,18 +60,16 @@ define i32 @movmsk_sgt_v2i64_1(<2 x i64> %v, i32 %a, i32 %b) {
; SSE: # %bb.0:
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: movmskpd %xmm0, %ecx
-; SSE-NEXT: shlb $6, %cl
-; SSE-NEXT: sarb $6, %cl
-; SSE-NEXT: cmovsl %esi, %eax
+; SSE-NEXT: testb $2, %cl
+; SSE-NEXT: cmovnel %esi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: movmsk_sgt_v2i64_1:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vmovmskpd %xmm0, %ecx
-; AVX-NEXT: shlb $6, %cl
-; AVX-NEXT: sarb $6, %cl
-; AVX-NEXT: cmovsl %esi, %eax
+; AVX-NEXT: testb $2, %cl
+; AVX-NEXT: cmovnel %esi, %eax
; AVX-NEXT: retq
%cmp = icmp slt <2 x i64> %v, zeroinitializer
%msk = bitcast <2 x i1> %cmp to i2
@@ -111,18 +107,16 @@ define i32 @movmsk_slt_v4i32_3(<4 x i32> %v, i32 %a, i32 %b) {
; SSE: # %bb.0:
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: movmskps %xmm0, %ecx
-; SSE-NEXT: shlb $4, %cl
-; SSE-NEXT: sarb $4, %cl
-; SSE-NEXT: cmovnsl %esi, %eax
+; SSE-NEXT: testb $8, %cl
+; SSE-NEXT: cmovel %esi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: movmsk_slt_v4i32_3:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vmovmskps %xmm0, %ecx
-; AVX-NEXT: shlb $4, %cl
-; AVX-NEXT: sarb $4, %cl
-; AVX-NEXT: cmovnsl %esi, %eax
+; AVX-NEXT: testb $8, %cl
+; AVX-NEXT: cmovel %esi, %eax
; AVX-NEXT: retq
%cmp = icmp slt <4 x i32> %v, zeroinitializer
%msk = bitcast <4 x i1> %cmp to i4
@@ -136,18 +130,16 @@ define i32 @movmsk_sgt_v4i32_3(<4 x i32> %v, i32 %a, i32 %b) {
; SSE: # %bb.0:
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: movmskps %xmm0, %ecx
-; SSE-NEXT: shlb $4, %cl
-; SSE-NEXT: sarb $4, %cl
-; SSE-NEXT: cmovsl %esi, %eax
+; SSE-NEXT: testb $8, %cl
+; SSE-NEXT: cmovnel %esi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: movmsk_sgt_v4i32_3:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vmovmskps %xmm0, %ecx
-; AVX-NEXT: shlb $4, %cl
-; AVX-NEXT: sarb $4, %cl
-; AVX-NEXT: cmovsl %esi, %eax
+; AVX-NEXT: testb $8, %cl
+; AVX-NEXT: cmovnel %esi, %eax
; AVX-NEXT: retq
%cmp = icmp slt <4 x i32> %v, zeroinitializer
%msk = bitcast <4 x i1> %cmp to i4
@@ -256,20 +248,17 @@ define i32 @movmsk_slt_v4i64_3(<4 x i64> %v, i32 %a, i32 %b) {
; SSE-LABEL: movmsk_slt_v4i64_3:
; SSE: # %bb.0:
; SSE-NEXT: movl %edi, %eax
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; SSE-NEXT: movmskps %xmm0, %ecx
-; SSE-NEXT: shlb $4, %cl
-; SSE-NEXT: sarb $4, %cl
-; SSE-NEXT: cmovnsl %esi, %eax
+; SSE-NEXT: movmskps %xmm1, %ecx
+; SSE-NEXT: testb $8, %cl
+; SSE-NEXT: cmovel %esi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: movmsk_slt_v4i64_3:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vmovmskpd %ymm0, %ecx
-; AVX-NEXT: shlb $4, %cl
-; AVX-NEXT: sarb $4, %cl
-; AVX-NEXT: cmovnsl %esi, %eax
+; AVX-NEXT: testb $8, %cl
+; AVX-NEXT: cmovel %esi, %eax
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%cmp = icmp slt <4 x i64> %v, zeroinitializer
@@ -283,20 +272,17 @@ define i32 @movmsk_sgt_v4i64_3(<4 x i64> %v, i32 %a, i32 %b) {
; SSE-LABEL: movmsk_sgt_v4i64_3:
; SSE: # %bb.0:
; SSE-NEXT: movl %edi, %eax
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; SSE-NEXT: movmskps %xmm0, %ecx
-; SSE-NEXT: shlb $4, %cl
-; SSE-NEXT: sarb $4, %cl
-; SSE-NEXT: cmovsl %esi, %eax
+; SSE-NEXT: movmskps %xmm1, %ecx
+; SSE-NEXT: testb $8, %cl
+; SSE-NEXT: cmovnel %esi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: movmsk_sgt_v4i64_3:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vmovmskpd %ymm0, %ecx
-; AVX-NEXT: shlb $4, %cl
-; AVX-NEXT: sarb $4, %cl
-; AVX-NEXT: cmovsl %esi, %eax
+; AVX-NEXT: testb $8, %cl
+; AVX-NEXT: cmovnel %esi, %eax
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%cmp = icmp slt <4 x i64> %v, zeroinitializer
@@ -487,22 +473,18 @@ define i32 @movmsk_slt_v32i8_31(<32 x i8> %v, i32 %a, i32 %b) {
; SSE-LABEL: movmsk_slt_v32i8_31:
; SSE: # %bb.0:
; SSE-NEXT: movl %edi, %eax
-; SSE-NEXT: pmovmskb %xmm0, %ecx
-; SSE-NEXT: pmovmskb %xmm1, %edx
-; SSE-NEXT: shll $16, %edx
-; SSE-NEXT: orl %ecx, %edx
-; SSE-NEXT: cmovnsl %esi, %eax
+; SSE-NEXT: pmovmskb %xmm1, %ecx
+; SSE-NEXT: testl $32768, %ecx # imm = 0x8000
+; SSE-NEXT: cmovel %esi, %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: movmsk_slt_v32i8_31:
; AVX1: # %bb.0:
; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: vpmovmskb %xmm0, %ecx
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpmovmskb %xmm0, %edx
-; AVX1-NEXT: shll $16, %edx
-; AVX1-NEXT: orl %ecx, %edx
-; AVX1-NEXT: cmovnsl %esi, %eax
+; AVX1-NEXT: vpmovmskb %xmm0, %ecx
+; AVX1-NEXT: testl $32768, %ecx # imm = 0x8000
+; AVX1-NEXT: cmovel %esi, %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -534,22 +516,18 @@ define i32 @movmsk_sgt_v32i8_31(<32 x i8> %v, i32 %a, i32 %b) {
; SSE-LABEL: movmsk_sgt_v32i8_31:
; SSE: # %bb.0:
; SSE-NEXT: movl %edi, %eax
-; SSE-NEXT: pmovmskb %xmm0, %ecx
-; SSE-NEXT: pmovmskb %xmm1, %edx
-; SSE-NEXT: shll $16, %edx
-; SSE-NEXT: orl %ecx, %edx
-; SSE-NEXT: cmovsl %esi, %eax
+; SSE-NEXT: pmovmskb %xmm1, %ecx
+; SSE-NEXT: testl $32768, %ecx # imm = 0x8000
+; SSE-NEXT: cmovnel %esi, %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: movmsk_sgt_v32i8_31:
; AVX1: # %bb.0:
; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: vpmovmskb %xmm0, %ecx
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpmovmskb %xmm0, %edx
-; AVX1-NEXT: shll $16, %edx
-; AVX1-NEXT: orl %ecx, %edx
-; AVX1-NEXT: cmovsl %esi, %eax
+; AVX1-NEXT: vpmovmskb %xmm0, %ecx
+; AVX1-NEXT: testl $32768, %ecx # imm = 0x8000
+; AVX1-NEXT: cmovnel %esi, %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
>From 298e292a76289d93f7c1a80c26e354830c6080e4 Mon Sep 17 00:00:00 2001
From: Tsz Chan <keithcth2001 at gmail.com>
Date: Wed, 3 Jul 2024 18:48:04 +0800
Subject: [PATCH 104/246] [IR] Add overflow check in
AllocaInst::getAllocationSize (#97170)
Fixes #91380.
---
llvm/lib/IR/Instructions.cpp | 17 +++++++++++++----
llvm/unittests/IR/InstructionsTest.cpp | 3 +++
2 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 494306815d90d..7a8cf8c230498 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -37,6 +37,7 @@
#include "llvm/IR/Value.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/CheckedArithmetic.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ModRef.h"
@@ -65,7 +66,11 @@ AllocaInst::getAllocationSize(const DataLayout &DL) const {
if (!C)
return std::nullopt;
assert(!Size.isScalable() && "Array elements cannot have a scalable size");
- Size *= C->getZExtValue();
+ auto CheckedProd =
+ checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
+ if (!CheckedProd)
+ return std::nullopt;
+ return TypeSize::getFixed(*CheckedProd);
}
return Size;
}
@@ -73,9 +78,13 @@ AllocaInst::getAllocationSize(const DataLayout &DL) const {
std::optional<TypeSize>
AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
std::optional<TypeSize> Size = getAllocationSize(DL);
- if (Size)
- return *Size * 8;
- return std::nullopt;
+ if (!Size)
+ return std::nullopt;
+ auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
+ static_cast<TypeSize::ScalarTy>(8));
+ if (!CheckedProd)
+ return std::nullopt;
+ return TypeSize::get(*CheckedProd, Size->isScalable());
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/unittests/IR/InstructionsTest.cpp b/llvm/unittests/IR/InstructionsTest.cpp
index b6044b2862920..4c1e9a9acb29a 100644
--- a/llvm/unittests/IR/InstructionsTest.cpp
+++ b/llvm/unittests/IR/InstructionsTest.cpp
@@ -1750,6 +1750,7 @@ TEST(InstructionsTest, AllocaInst) {
%F = alloca [2 x half]
%G = alloca [2 x [3 x i128]]
%H = alloca %T
+ %I = alloca i32, i64 9223372036854775807
ret void
}
)");
@@ -1766,6 +1767,7 @@ TEST(InstructionsTest, AllocaInst) {
AllocaInst &F = cast<AllocaInst>(*It++);
AllocaInst &G = cast<AllocaInst>(*It++);
AllocaInst &H = cast<AllocaInst>(*It++);
+ AllocaInst &I = cast<AllocaInst>(*It++);
EXPECT_EQ(A.getAllocationSizeInBits(DL), TypeSize::getFixed(32));
EXPECT_EQ(B.getAllocationSizeInBits(DL), TypeSize::getFixed(128));
EXPECT_FALSE(C.getAllocationSizeInBits(DL));
@@ -1774,6 +1776,7 @@ TEST(InstructionsTest, AllocaInst) {
EXPECT_EQ(F.getAllocationSizeInBits(DL), TypeSize::getFixed(32));
EXPECT_EQ(G.getAllocationSizeInBits(DL), TypeSize::getFixed(768));
EXPECT_EQ(H.getAllocationSizeInBits(DL), TypeSize::getFixed(160));
+ EXPECT_FALSE(I.getAllocationSizeInBits(DL));
}
TEST(InstructionsTest, InsertAtBegin) {
>From 7de7f50fc9d76ced7a971a66abf59aab6f9e5be6 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 3 Jul 2024 12:21:31 +0100
Subject: [PATCH 105/246] [InstCombine][X86] Fold
blendv(x,y,shuffle(bitcast(sext(m)))) -> select(shuffle(m),x,y) (#96882)
We already handle blendv(x,y,bitcast(sext(m))) -> select(m,x,y) cases, but this adds support for peeking through one-use shuffles as well. VectorCombine should already have canonicalized the IR to shuffle(bitcast(...)) for us.
The particular use case is where we have split generic 256/512-bit code to use target-specific blendv intrinsics (e.g. AVX1 spoofing AVX2 256-bit ops).
Fixes #58895
---
.../Target/X86/X86InstCombineIntrinsic.cpp | 36 ++-
.../PhaseOrdering/X86/blendv-select.ll | 286 ++++++++----------
.../Transforms/PhaseOrdering/X86/pr67803.ll | 37 +--
3 files changed, 175 insertions(+), 184 deletions(-)
diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
index b00c8cc16fe0f..0397fb299d27b 100644
--- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
@@ -2800,6 +2800,23 @@ X86TTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
}
+ // Peek through a one-use shuffle - VectorCombine should have simplified
+ // this for cases where we're splitting wider vectors to use blendv
+ // intrinsics.
+ Value *MaskSrc = nullptr;
+ ArrayRef<int> ShuffleMask;
+ if (match(Mask, PatternMatch::m_OneUse(PatternMatch::m_Shuffle(
+ PatternMatch::m_Value(MaskSrc), PatternMatch::m_Undef(),
+ PatternMatch::m_Mask(ShuffleMask))))) {
+ // Bail if the shuffle was irregular or contains undefs.
+ int NumElts = cast<FixedVectorType>(MaskSrc->getType())->getNumElements();
+ if (NumElts < ShuffleMask.size() || !isPowerOf2_32(NumElts) ||
+ any_of(ShuffleMask,
+ [NumElts](int M) { return M < 0 || M >= NumElts; }))
+ break;
+ Mask = MaskSrc;
+ }
+
// Convert to a vector select if we can bypass casts and find a boolean
// vector condition value.
Value *BoolVec;
@@ -2809,11 +2826,26 @@ X86TTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
BoolVec->getType()->getScalarSizeInBits() == 1) {
auto *MaskTy = cast<FixedVectorType>(Mask->getType());
auto *OpTy = cast<FixedVectorType>(II.getType());
+ unsigned NumMaskElts = MaskTy->getNumElements();
+ unsigned NumOperandElts = OpTy->getNumElements();
+
+ // If we peeked through a shuffle, reapply the shuffle to the bool vector.
+ if (MaskSrc) {
+ unsigned NumMaskSrcElts =
+ cast<FixedVectorType>(MaskSrc->getType())->getNumElements();
+ NumMaskElts = (ShuffleMask.size() * NumMaskElts) / NumMaskSrcElts;
+ // Multiple mask bits maps to the same operand element - bail out.
+ if (NumMaskElts > NumOperandElts)
+ break;
+ SmallVector<int> ScaledMask;
+ if (!llvm::scaleShuffleMaskElts(NumMaskElts, ShuffleMask, ScaledMask))
+ break;
+ BoolVec = IC.Builder.CreateShuffleVector(BoolVec, ScaledMask);
+ MaskTy = FixedVectorType::get(MaskTy->getElementType(), NumMaskElts);
+ }
assert(MaskTy->getPrimitiveSizeInBits() ==
OpTy->getPrimitiveSizeInBits() &&
"Not expecting mask and operands with different sizes");
- unsigned NumMaskElts = MaskTy->getNumElements();
- unsigned NumOperandElts = OpTy->getNumElements();
if (NumMaskElts == NumOperandElts) {
return SelectInst::Create(BoolVec, Op1, Op0);
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/blendv-select.ll b/llvm/test/Transforms/PhaseOrdering/X86/blendv-select.ll
index bccd189d12a82..67c9c333987f6 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/blendv-select.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/blendv-select.ll
@@ -4,7 +4,7 @@
; RUN: opt < %s -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s
;
-; TODO: PR58895 - replace shuffled _mm_blendv_epi8+icmp with select+icmp
+; PR58895 - replace shuffled _mm_blendv_epi8+icmp with select+icmp
;
;
@@ -13,21 +13,21 @@
define <4 x double> @x86_pblendvb_v4f64_v2f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double> %d) {
; CHECK-LABEL: @x86_pblendvb_v4f64_v2f64(
-; CHECK-NEXT: [[A_BC:%.*]] = bitcast <4 x double> [[A:%.*]] to <32 x i8>
-; CHECK-NEXT: [[B_BC:%.*]] = bitcast <4 x double> [[B:%.*]] to <32 x i8>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[CMP:%.*]] = fcmp olt <4 x double> [[C:%.*]], [[D:%.*]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-; CHECK-NEXT: [[SEXT_BC:%.*]] = bitcast <4 x i64> [[SEXT]] to <32 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <32 x i8> [[SEXT_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <32 x i8> [[SEXT_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_LO]], <16 x i8> [[B_LO]], <16 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_HI]], <16 x i8> [[B_HI]], <16 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <16 x i8> [[SEL_LO]], <16 x i8> [[SEL_HI]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[RES:%.*]] = bitcast <32 x i8> [[CONCAT]] to <4 x double>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i1> [[CMP]], <4 x i1> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x double> [[A:%.*]] to <4 x i64>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i64> [[TMP2]], <4 x i64> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x double> [[B:%.*]] to <4 x i64>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i64> [[TMP4]], <4 x i64> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP6:%.*]] = select <2 x i1> [[TMP1]], <2 x i64> [[TMP5]], <2 x i64> [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i1> [[CMP]], <4 x i1> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x double> [[A]] to <4 x i64>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i64> [[TMP8]], <4 x i64> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP10:%.*]] = bitcast <4 x double> [[B]] to <4 x i64>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i64> [[TMP10]], <4 x i64> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP12:%.*]] = select <2 x i1> [[TMP7]], <2 x i64> [[TMP11]], <2 x i64> [[TMP9]]
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x i64> [[TMP6]], <2 x i64> [[TMP12]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[RES:%.*]] = bitcast <4 x i64> [[TMP13]] to <4 x double>
; CHECK-NEXT: ret <4 x double> [[RES]]
;
%a.bc = bitcast <4 x double> %a to <32 x i8>
@@ -50,21 +50,21 @@ define <4 x double> @x86_pblendvb_v4f64_v2f64(<4 x double> %a, <4 x double> %b,
define <8 x float> @x86_pblendvb_v8f32_v4f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) {
; CHECK-LABEL: @x86_pblendvb_v8f32_v4f32(
-; CHECK-NEXT: [[A_BC:%.*]] = bitcast <8 x float> [[A:%.*]] to <32 x i8>
-; CHECK-NEXT: [[B_BC:%.*]] = bitcast <8 x float> [[B:%.*]] to <32 x i8>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[CMP:%.*]] = fcmp olt <8 x float> [[C:%.*]], [[D:%.*]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-; CHECK-NEXT: [[SEXT_BC:%.*]] = bitcast <8 x i32> [[SEXT]] to <32 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <32 x i8> [[SEXT_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <32 x i8> [[SEXT_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_LO]], <16 x i8> [[B_LO]], <16 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_HI]], <16 x i8> [[B_HI]], <16 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <16 x i8> [[SEL_LO]], <16 x i8> [[SEL_HI]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[RES:%.*]] = bitcast <32 x i8> [[CONCAT]] to <8 x float>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i1> [[CMP]], <8 x i1> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x float> [[A:%.*]] to <8 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x float> [[B:%.*]] to <8 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP5]], <4 x i32> [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <8 x i1> [[CMP]], <8 x i1> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x float> [[A]] to <8 x i32>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x i32> [[TMP8]], <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x float> [[B]] to <8 x i32>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <8 x i32> [[TMP10]], <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP7]], <4 x i32> [[TMP11]], <4 x i32> [[TMP9]]
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP12]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[RES:%.*]] = bitcast <8 x i32> [[TMP13]] to <8 x float>
; CHECK-NEXT: ret <8 x float> [[RES]]
;
%a.bc = bitcast <8 x float> %a to <32 x i8>
@@ -87,22 +87,9 @@ define <8 x float> @x86_pblendvb_v8f32_v4f32(<8 x float> %a, <8 x float> %b, <8
define <4 x i64> @x86_pblendvb_v4i64_v2i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; CHECK-LABEL: @x86_pblendvb_v4i64_v2i64(
-; CHECK-NEXT: [[A_BC:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
-; CHECK-NEXT: [[B_BC:%.*]] = bitcast <4 x i64> [[B:%.*]] to <32 x i8>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <4 x i64> [[C:%.*]], [[D:%.*]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-; CHECK-NEXT: [[SEXT_BC:%.*]] = bitcast <4 x i64> [[SEXT]] to <32 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <32 x i8> [[SEXT_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <32 x i8> [[SEXT_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_LO]], <16 x i8> [[B_LO]], <16 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_HI]], <16 x i8> [[B_HI]], <16 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <16 x i8> [[SEL_LO]], <16 x i8> [[SEL_HI]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[RES:%.*]] = bitcast <32 x i8> [[CONCAT]] to <4 x i64>
-; CHECK-NEXT: ret <4 x i64> [[RES]]
+; CHECK-NEXT: [[TMP1:%.*]] = select <4 x i1> [[CMP]], <4 x i64> [[B:%.*]], <4 x i64> [[A:%.*]]
+; CHECK-NEXT: ret <4 x i64> [[TMP1]]
;
%a.bc = bitcast <4 x i64> %a to <32 x i8>
%b.bc = bitcast <4 x i64> %b to <32 x i8>
@@ -124,23 +111,23 @@ define <4 x i64> @x86_pblendvb_v4i64_v2i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>
define <4 x i64> @x86_pblendvb_v8i32_v4i32(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; CHECK-LABEL: @x86_pblendvb_v8i32_v4i32(
-; CHECK-NEXT: [[A_BC:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
-; CHECK-NEXT: [[B_BC:%.*]] = bitcast <4 x i64> [[B:%.*]] to <32 x i8>
; CHECK-NEXT: [[C_BC:%.*]] = bitcast <4 x i64> [[C:%.*]] to <8 x i32>
; CHECK-NEXT: [[D_BC:%.*]] = bitcast <4 x i64> [[D:%.*]] to <8 x i32>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <8 x i32> [[C_BC]], [[D_BC]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-; CHECK-NEXT: [[SEXT_BC:%.*]] = bitcast <8 x i32> [[SEXT]] to <32 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <32 x i8> [[SEXT_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <32 x i8> [[SEXT_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_LO]], <16 x i8> [[B_LO]], <16 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_HI]], <16 x i8> [[B_HI]], <16 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <16 x i8> [[SEL_LO]], <16 x i8> [[SEL_HI]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[RES:%.*]] = bitcast <32 x i8> [[CONCAT]] to <4 x i64>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i1> [[CMP]], <8 x i1> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[A:%.*]] to <8 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[B:%.*]] to <8 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP5]], <4 x i32> [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <8 x i1> [[CMP]], <8 x i1> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x i64> [[A]] to <8 x i32>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x i32> [[TMP8]], <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP10:%.*]] = bitcast <4 x i64> [[B]] to <8 x i32>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <8 x i32> [[TMP10]], <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP7]], <4 x i32> [[TMP11]], <4 x i32> [[TMP9]]
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP12]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[RES:%.*]] = bitcast <8 x i32> [[TMP13]] to <4 x i64>
; CHECK-NEXT: ret <4 x i64> [[RES]]
;
%a.bc = bitcast <4 x i64> %a to <32 x i8>
@@ -165,23 +152,23 @@ define <4 x i64> @x86_pblendvb_v8i32_v4i32(<4 x i64> %a, <4 x i64> %b, <4 x i64>
define <4 x i64> @x86_pblendvb_v16i16_v8i16(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; CHECK-LABEL: @x86_pblendvb_v16i16_v8i16(
-; CHECK-NEXT: [[A_BC:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
-; CHECK-NEXT: [[B_BC:%.*]] = bitcast <4 x i64> [[B:%.*]] to <32 x i8>
; CHECK-NEXT: [[C_BC:%.*]] = bitcast <4 x i64> [[C:%.*]] to <16 x i16>
; CHECK-NEXT: [[D_BC:%.*]] = bitcast <4 x i64> [[D:%.*]] to <16 x i16>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <16 x i16> [[C_BC]], [[D_BC]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-; CHECK-NEXT: [[SEXT_BC:%.*]] = bitcast <16 x i16> [[SEXT]] to <32 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <32 x i8> [[SEXT_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <32 x i8> [[SEXT_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_LO]], <16 x i8> [[B_LO]], <16 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_HI]], <16 x i8> [[B_HI]], <16 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <16 x i8> [[SEL_LO]], <16 x i8> [[SEL_HI]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[RES:%.*]] = bitcast <32 x i8> [[CONCAT]] to <4 x i64>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i1> [[CMP]], <16 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[A:%.*]] to <16 x i16>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i16> [[TMP2]], <16 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[B:%.*]] to <16 x i16>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x i16> [[TMP4]], <16 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[TMP5]], <8 x i16> [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i1> [[CMP]], <16 x i1> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x i64> [[A]] to <16 x i16>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i16> [[TMP8]], <16 x i16> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP10:%.*]] = bitcast <4 x i64> [[B]] to <16 x i16>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <16 x i16> [[TMP10]], <16 x i16> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP12:%.*]] = select <8 x i1> [[TMP7]], <8 x i16> [[TMP11]], <8 x i16> [[TMP9]]
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <8 x i16> [[TMP6]], <8 x i16> [[TMP12]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[RES:%.*]] = bitcast <16 x i16> [[TMP13]] to <4 x i64>
; CHECK-NEXT: ret <4 x i64> [[RES]]
;
%a.bc = bitcast <4 x i64> %a to <32 x i8>
@@ -210,17 +197,8 @@ define <4 x i64> @x86_pblendvb_v32i8_v16i8(<4 x i64> %a, <4 x i64> %b, <4 x i64>
; CHECK-NEXT: [[B_BC:%.*]] = bitcast <4 x i64> [[B:%.*]] to <32 x i8>
; CHECK-NEXT: [[C_BC:%.*]] = bitcast <4 x i64> [[C:%.*]] to <32 x i8>
; CHECK-NEXT: [[D_BC:%.*]] = bitcast <4 x i64> [[D:%.*]] to <32 x i8>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <32 x i8> [[A_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <32 x i8> [[B_BC]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <32 x i8> [[C_BC]], [[D_BC]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <32 x i8> [[SEXT]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <32 x i8> [[SEXT]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_LO]], <16 x i8> [[B_LO]], <16 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A_HI]], <16 x i8> [[B_HI]], <16 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <16 x i8> [[SEL_LO]], <16 x i8> [[SEL_HI]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[CONCAT:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[B_BC]], <32 x i8> [[A_BC]]
; CHECK-NEXT: [[RES:%.*]] = bitcast <32 x i8> [[CONCAT]] to <4 x i64>
; CHECK-NEXT: ret <4 x i64> [[RES]]
;
@@ -249,21 +227,21 @@ define <4 x i64> @x86_pblendvb_v32i8_v16i8(<4 x i64> %a, <4 x i64> %b, <4 x i64>
define <8 x double> @x86_pblendvb_v8f64_v4f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double> %d) {
; CHECK-LABEL: @x86_pblendvb_v8f64_v4f64(
-; CHECK-NEXT: [[A_BC:%.*]] = bitcast <8 x double> [[A:%.*]] to <64 x i8>
-; CHECK-NEXT: [[B_BC:%.*]] = bitcast <8 x double> [[B:%.*]] to <64 x i8>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
; CHECK-NEXT: [[CMP:%.*]] = fcmp olt <8 x double> [[C:%.*]], [[D:%.*]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64>
-; CHECK-NEXT: [[SEXT_BC:%.*]] = bitcast <8 x i64> [[SEXT]] to <64 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <64 x i8> [[SEXT_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <64 x i8> [[SEXT_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_LO]], <32 x i8> [[B_LO]], <32 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_HI]], <32 x i8> [[B_HI]], <32 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <32 x i8> [[SEL_LO]], <32 x i8> [[SEL_HI]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[RES:%.*]] = bitcast <64 x i8> [[CONCAT]] to <8 x double>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i1> [[CMP]], <8 x i1> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x double> [[A:%.*]] to <8 x i64>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[TMP2]], <8 x i64> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x double> [[B:%.*]] to <8 x i64>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i64> [[TMP4]], <8 x i64> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP1]], <4 x i64> [[TMP5]], <4 x i64> [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <8 x i1> [[CMP]], <8 x i1> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x double> [[A]] to <8 x i64>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x i64> [[TMP8]], <8 x i64> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x double> [[B]] to <8 x i64>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <8 x i64> [[TMP10]], <8 x i64> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP7]], <4 x i64> [[TMP11]], <4 x i64> [[TMP9]]
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x i64> [[TMP6]], <4 x i64> [[TMP12]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[RES:%.*]] = bitcast <8 x i64> [[TMP13]] to <8 x double>
; CHECK-NEXT: ret <8 x double> [[RES]]
;
%a.bc = bitcast <8 x double> %a to <64 x i8>
@@ -286,21 +264,21 @@ define <8 x double> @x86_pblendvb_v8f64_v4f64(<8 x double> %a, <8 x double> %b,
define <16 x float> @x86_pblendvb_v16f32_v8f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x float> %d) {
; CHECK-LABEL: @x86_pblendvb_v16f32_v8f32(
-; CHECK-NEXT: [[A_BC:%.*]] = bitcast <16 x float> [[A:%.*]] to <64 x i8>
-; CHECK-NEXT: [[B_BC:%.*]] = bitcast <16 x float> [[B:%.*]] to <64 x i8>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
; CHECK-NEXT: [[CMP:%.*]] = fcmp olt <16 x float> [[C:%.*]], [[D:%.*]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32>
-; CHECK-NEXT: [[SEXT_BC:%.*]] = bitcast <16 x i32> [[SEXT]] to <64 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <64 x i8> [[SEXT_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <64 x i8> [[SEXT_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_LO]], <32 x i8> [[B_LO]], <32 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_HI]], <32 x i8> [[B_HI]], <32 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <32 x i8> [[SEL_LO]], <32 x i8> [[SEL_HI]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[RES:%.*]] = bitcast <64 x i8> [[CONCAT]] to <16 x float>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i1> [[CMP]], <16 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x float> [[A:%.*]] to <16 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[TMP2]], <16 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x float> [[B:%.*]] to <16 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> [[TMP4]], <16 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> [[TMP5]], <8 x i32> [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i1> [[CMP]], <16 x i1> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP8:%.*]] = bitcast <16 x float> [[A]] to <16 x i32>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i32> [[TMP8]], <16 x i32> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP10:%.*]] = bitcast <16 x float> [[B]] to <16 x i32>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <16 x i32> [[TMP10]], <16 x i32> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP12:%.*]] = select <8 x i1> [[TMP7]], <8 x i32> [[TMP11]], <8 x i32> [[TMP9]]
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <8 x i32> [[TMP6]], <8 x i32> [[TMP12]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[RES:%.*]] = bitcast <16 x i32> [[TMP13]] to <16 x float>
; CHECK-NEXT: ret <16 x float> [[RES]]
;
%a.bc = bitcast <16 x float> %a to <64 x i8>
@@ -323,22 +301,9 @@ define <16 x float> @x86_pblendvb_v16f32_v8f32(<16 x float> %a, <16 x float> %b,
define <8 x i64> @x86_pblendvb_v8i64_v4i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; CHECK-LABEL: @x86_pblendvb_v8i64_v4i64(
-; CHECK-NEXT: [[A_BC:%.*]] = bitcast <8 x i64> [[A:%.*]] to <64 x i8>
-; CHECK-NEXT: [[B_BC:%.*]] = bitcast <8 x i64> [[B:%.*]] to <64 x i8>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <8 x i64> [[C:%.*]], [[D:%.*]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64>
-; CHECK-NEXT: [[SEXT_BC:%.*]] = bitcast <8 x i64> [[SEXT]] to <64 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <64 x i8> [[SEXT_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <64 x i8> [[SEXT_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_LO]], <32 x i8> [[B_LO]], <32 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_HI]], <32 x i8> [[B_HI]], <32 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <32 x i8> [[SEL_LO]], <32 x i8> [[SEL_HI]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[RES:%.*]] = bitcast <64 x i8> [[CONCAT]] to <8 x i64>
-; CHECK-NEXT: ret <8 x i64> [[RES]]
+; CHECK-NEXT: [[TMP1:%.*]] = select <8 x i1> [[CMP]], <8 x i64> [[B:%.*]], <8 x i64> [[A:%.*]]
+; CHECK-NEXT: ret <8 x i64> [[TMP1]]
;
%a.bc = bitcast <8 x i64> %a to <64 x i8>
%b.bc = bitcast <8 x i64> %b to <64 x i8>
@@ -360,23 +325,23 @@ define <8 x i64> @x86_pblendvb_v8i64_v4i64(<8 x i64> %a, <8 x i64> %b, <8 x i64>
define <8 x i64> @x86_pblendvb_v16i32_v8i32(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; CHECK-LABEL: @x86_pblendvb_v16i32_v8i32(
-; CHECK-NEXT: [[A_BC:%.*]] = bitcast <8 x i64> [[A:%.*]] to <64 x i8>
-; CHECK-NEXT: [[B_BC:%.*]] = bitcast <8 x i64> [[B:%.*]] to <64 x i8>
-; CHECK-NEXT: [[C_BC:%.*]] = bitcast <8 x i64> [[A]] to <16 x i32>
-; CHECK-NEXT: [[D_BC:%.*]] = bitcast <8 x i64> [[B]] to <16 x i32>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+; CHECK-NEXT: [[C_BC:%.*]] = bitcast <8 x i64> [[A:%.*]] to <16 x i32>
+; CHECK-NEXT: [[D_BC:%.*]] = bitcast <8 x i64> [[B:%.*]] to <16 x i32>
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <16 x i32> [[C_BC]], [[D_BC]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32>
-; CHECK-NEXT: [[SEXT_BC:%.*]] = bitcast <16 x i32> [[SEXT]] to <64 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <64 x i8> [[SEXT_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <64 x i8> [[SEXT_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_LO]], <32 x i8> [[B_LO]], <32 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_HI]], <32 x i8> [[B_HI]], <32 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <32 x i8> [[SEL_LO]], <32 x i8> [[SEL_HI]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[RES:%.*]] = bitcast <64 x i8> [[CONCAT]] to <8 x i64>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i1> [[CMP]], <16 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i64> [[A]] to <16 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[TMP2]], <16 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[B]] to <16 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> [[TMP4]], <16 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> [[TMP5]], <8 x i32> [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i1> [[CMP]], <16 x i1> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i64> [[A]] to <16 x i32>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i32> [[TMP8]], <16 x i32> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x i64> [[B]] to <16 x i32>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <16 x i32> [[TMP10]], <16 x i32> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP12:%.*]] = select <8 x i1> [[TMP7]], <8 x i32> [[TMP11]], <8 x i32> [[TMP9]]
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <8 x i32> [[TMP6]], <8 x i32> [[TMP12]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[RES:%.*]] = bitcast <16 x i32> [[TMP13]] to <8 x i64>
; CHECK-NEXT: ret <8 x i64> [[RES]]
;
%a.bc = bitcast <8 x i64> %a to <64 x i8>
@@ -401,23 +366,23 @@ define <8 x i64> @x86_pblendvb_v16i32_v8i32(<8 x i64> %a, <8 x i64> %b, <8 x i64
define <8 x i64> @x86_pblendvb_v32i16_v16i16(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; CHECK-LABEL: @x86_pblendvb_v32i16_v16i16(
-; CHECK-NEXT: [[A_BC:%.*]] = bitcast <8 x i64> [[A:%.*]] to <64 x i8>
-; CHECK-NEXT: [[B_BC:%.*]] = bitcast <8 x i64> [[B:%.*]] to <64 x i8>
; CHECK-NEXT: [[C_BC:%.*]] = bitcast <8 x i64> [[C:%.*]] to <32 x i16>
; CHECK-NEXT: [[D_BC:%.*]] = bitcast <8 x i64> [[D:%.*]] to <32 x i16>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <32 x i16> [[C_BC]], [[D_BC]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16>
-; CHECK-NEXT: [[SEXT_BC:%.*]] = bitcast <32 x i16> [[SEXT]] to <64 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <64 x i8> [[SEXT_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <64 x i8> [[SEXT_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_LO]], <32 x i8> [[B_LO]], <32 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_HI]], <32 x i8> [[B_HI]], <32 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <32 x i8> [[SEL_LO]], <32 x i8> [[SEL_HI]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[RES:%.*]] = bitcast <64 x i8> [[CONCAT]] to <8 x i64>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i1> [[CMP]], <32 x i1> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i64> [[A:%.*]] to <32 x i16>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i16> [[TMP2]], <32 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[B:%.*]] to <32 x i16>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <32 x i16> [[TMP4]], <32 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[TMP1]], <16 x i16> [[TMP5]], <16 x i16> [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <32 x i1> [[CMP]], <32 x i1> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i64> [[A]] to <32 x i16>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <32 x i16> [[TMP8]], <32 x i16> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x i64> [[B]] to <32 x i16>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <32 x i16> [[TMP10]], <32 x i16> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP12:%.*]] = select <16 x i1> [[TMP7]], <16 x i16> [[TMP11]], <16 x i16> [[TMP9]]
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <16 x i16> [[TMP6]], <16 x i16> [[TMP12]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[RES:%.*]] = bitcast <32 x i16> [[TMP13]] to <8 x i64>
; CHECK-NEXT: ret <8 x i64> [[RES]]
;
%a.bc = bitcast <8 x i64> %a to <64 x i8>
@@ -446,17 +411,8 @@ define <8 x i64> @x86_pblendvb_v64i8_v32i8(<8 x i64> %a, <8 x i64> %b, <8 x i64>
; CHECK-NEXT: [[B_BC:%.*]] = bitcast <8 x i64> [[B:%.*]] to <64 x i8>
; CHECK-NEXT: [[C_BC:%.*]] = bitcast <8 x i64> [[C:%.*]] to <64 x i8>
; CHECK-NEXT: [[D_BC:%.*]] = bitcast <8 x i64> [[D:%.*]] to <64 x i8>
-; CHECK-NEXT: [[A_LO:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[B_LO:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[A_HI:%.*]] = shufflevector <64 x i8> [[A_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[B_HI:%.*]] = shufflevector <64 x i8> [[B_BC]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <64 x i8> [[C_BC]], [[D_BC]]
-; CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8>
-; CHECK-NEXT: [[SEXT_LO:%.*]] = shufflevector <64 x i8> [[SEXT]], <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SEXT_HI:%.*]] = shufflevector <64 x i8> [[SEXT]], <64 x i8> poison, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-; CHECK-NEXT: [[SEL_LO:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_LO]], <32 x i8> [[B_LO]], <32 x i8> [[SEXT_LO]])
-; CHECK-NEXT: [[SEL_HI:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A_HI]], <32 x i8> [[B_HI]], <32 x i8> [[SEXT_HI]])
-; CHECK-NEXT: [[CONCAT:%.*]] = shufflevector <32 x i8> [[SEL_LO]], <32 x i8> [[SEL_HI]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+; CHECK-NEXT: [[CONCAT:%.*]] = select <64 x i1> [[CMP]], <64 x i8> [[B_BC]], <64 x i8> [[A_BC]]
; CHECK-NEXT: [[RES:%.*]] = bitcast <64 x i8> [[CONCAT]] to <8 x i64>
; CHECK-NEXT: ret <8 x i64> [[RES]]
;
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll
index c7818e8254b2e..7b690341fb805 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll
@@ -9,23 +9,26 @@ define <4 x i64> @PR67803(<4 x i64> %x, <4 x i64> %y, <4 x i64> %a, <4 x i64> %b
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i64> [[X:%.*]] to <8 x i32>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i64> [[Y:%.*]] to <8 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <8 x i32> [[TMP0]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i1> [[TMP2]] to <8 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <32 x i8> [[TMP4]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP6:%.*]] = bitcast <4 x i64> [[B:%.*]] to <32 x i8>
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <32 x i8> [[TMP6]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
-; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <32 x i8> [[TMP8]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP10:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[TMP5]], <16 x i8> [[TMP7]], <16 x i8> [[TMP9]])
-; CHECK-NEXT: [[TMP11:%.*]] = bitcast <4 x i64> [[A]] to <32 x i8>
-; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <32 x i8> [[TMP11]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x i64> [[B]] to <32 x i8>
-; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <32 x i8> [[TMP13]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[TMP15:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
-; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <32 x i8> [[TMP15]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[TMP17:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[TMP12]], <16 x i8> [[TMP14]], <16 x i8> [[TMP16]])
-; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <16 x i8> [[TMP10]], <16 x i8> [[TMP17]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[SHUFFLE_I23:%.*]] = bitcast <32 x i8> [[TMP18]] to <4 x i64>
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i8> [[TMP3]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[B:%.*]] to <32 x i8>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <32 x i8> [[TMP5]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x i32>
+; CHECK-NEXT: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32>
+; CHECK-NEXT: [[TMP10:%.*]] = select <4 x i1> [[TMP7]], <4 x i32> [[TMP9]], <4 x i32> [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
+; CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x i64> [[A]] to <32 x i8>
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <32 x i8> [[TMP12]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP14:%.*]] = bitcast <4 x i64> [[B]] to <32 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <32 x i8> [[TMP14]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
+; CHECK-NEXT: [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x i32>
+; CHECK-NEXT: [[TMP19:%.*]] = select <4 x i1> [[TMP16]], <4 x i32> [[TMP18]], <4 x i32> [[TMP17]]
+; CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x i32> [[TMP19]] to <16 x i8>
+; CHECK-NEXT: [[TMP21:%.*]] = shufflevector <16 x i8> [[TMP11]], <16 x i8> [[TMP20]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[SHUFFLE_I23:%.*]] = bitcast <32 x i8> [[TMP21]] to <4 x i64>
; CHECK-NEXT: ret <4 x i64> [[SHUFFLE_I23]]
;
entry:
>From bcb7c38af7de59f3b2201734ee11987839cd7bbe Mon Sep 17 00:00:00 2001
From: Aaron Ballman <aaron at aaronballman.com>
Date: Wed, 3 Jul 2024 07:23:44 -0400
Subject: [PATCH 106/246] [C23] Add INFINITY and NAN macros to <float.h>
(#96659)
This is in support of WG14 N2848 which only define the macros if
an infinity or nan are supported. However, because we support builtins
that can produce an infinity or a NAN, and because we have pragmas
that control infinity or NAN behavior, we always define the macros even
if the user passed -ffinite-math-only on the command line.
---
clang/docs/ReleaseNotes.rst | 5 +++++
clang/lib/Headers/float.h | 13 +++++++++++++
clang/test/Headers/float.c | 25 +++++++++++++++++++++++++
clang/www/c_status.html | 2 +-
4 files changed, 44 insertions(+), 1 deletion(-)
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 1537eaaba0c66..f40fd1cd145bb 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -338,6 +338,11 @@ C23 Feature Support
- Properly promote bit-fields of bit-precise integer types to the field's type
rather than to ``int``. #GH87641
+- Added the ``INFINITY`` and ``NAN`` macros to Clang's ``<float.h>``
+ freestanding implementation; these macros were defined in ``<math.h>`` in C99
+ but C23 added them to ``<float.h>`` in
+ `WG14 N2848 <https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2848.pdf>`_.
+
Non-comprehensive list of changes in this release
-------------------------------------------------
diff --git a/clang/lib/Headers/float.h b/clang/lib/Headers/float.h
index 642c8f06cc938..a565a33243df1 100644
--- a/clang/lib/Headers/float.h
+++ b/clang/lib/Headers/float.h
@@ -88,6 +88,12 @@
# endif
#endif
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+# undef INFINITY
+# undef NAN
+#endif
+
/* Characteristics of floating point types, C99 5.2.4.2.2 */
#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
@@ -155,6 +161,13 @@
# define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
#endif
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+ /* C23 5.2.5.3.3p29-30 */
+# define INFINITY (__builtin_inf())
+# define NAN (__builtin_nan(""))
+#endif
+
#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__
# define FLT16_MANT_DIG __FLT16_MANT_DIG__
# define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__
diff --git a/clang/test/Headers/float.c b/clang/test/Headers/float.c
index 70c11b0537537..b9e6e971545e5 100644
--- a/clang/test/Headers/float.c
+++ b/clang/test/Headers/float.c
@@ -1,9 +1,13 @@
// RUN: %clang_cc1 -fsyntax-only -verify -std=c89 -ffreestanding %s
// RUN: %clang_cc1 -fsyntax-only -verify -std=c99 -ffreestanding %s
// RUN: %clang_cc1 -fsyntax-only -verify -std=c11 -ffreestanding %s
+// RUN: %clang_cc1 -fsyntax-only -verify -std=c23 -ffreestanding %s
+// RUN: %clang_cc1 -fsyntax-only -verify -std=c23 -ffreestanding -ffinite-math-only %s
// RUN: %clang_cc1 -fsyntax-only -verify -xc++ -std=c++11 -ffreestanding %s
// RUN: %clang_cc1 -fsyntax-only -verify -xc++ -std=c++14 -ffreestanding %s
// RUN: %clang_cc1 -fsyntax-only -verify -xc++ -std=c++17 -ffreestanding %s
+// RUN: %clang_cc1 -fsyntax-only -verify -xc++ -std=c++23 -ffreestanding %s
+// RUN: %clang_cc1 -fsyntax-only -verify -xc++ -std=c++23 -ffreestanding -ffinite-math-only %s
// expected-no-diagnostics
/* Basic floating point conformance checks against:
@@ -207,6 +211,21 @@
#error "Mandatory macros {FLT,DBL,LDBL}_MAX_10_EXP are invalid."
#endif
+#if __STDC_VERSION__ >= 202311L || !defined(__STRICT_ANSI__)
+ #ifndef INFINITY
+ #error "Mandatory macro INFINITY is missing."
+ #endif
+ #ifndef NAN
+ #error "Mandatory macro NAN is missing."
+ #endif
+#else
+ #ifdef INFINITY
+ #error "Macro INFINITY should not be defined."
+ #endif
+ #ifdef NAN
+ #error "Macro NAN should not be defined."
+ #endif
+#endif
/* Internal consistency checks */
_Static_assert(FLT_RADIX == __FLT_RADIX__, "");
@@ -244,3 +263,9 @@ _Static_assert(LDBL_MAX_EXP == __LDBL_MAX_EXP__, "");
_Static_assert(FLT_MAX_10_EXP == __FLT_MAX_10_EXP__, "");
_Static_assert(DBL_MAX_10_EXP == __DBL_MAX_10_EXP__, "");
_Static_assert(LDBL_MAX_10_EXP == __LDBL_MAX_10_EXP__, "");
+
+#if (__STDC_VERSION__ >= 202311L || !defined(__STRICT_ANSI__)) && __FINITE_MATH_ONLY__ == 0
+// Ensure INFINITY and NAN are suitable for use in a constant expression.
+float f1 = INFINITY;
+float f2 = NAN;
+#endif
diff --git a/clang/www/c_status.html b/clang/www/c_status.html
index ccb39a15b25aa..3fb1efc1989e8 100644
--- a/clang/www/c_status.html
+++ b/clang/www/c_status.html
@@ -995,7 +995,7 @@ <h2 id="c2x">C23 implementation status</h2>
<tr>
<td>Contradiction about INFINITY macro</td>
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2848.pdf">N2848</a></td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="unreleased" align="center">Clang 19</td>
</tr>
<tr>
<td>Require exact-width integer type interfaces</td>
>From 5fd5b8ada70d9cbdaa8cc5bea50a6314e3140364 Mon Sep 17 00:00:00 2001
From: Sven van Haastregt <sven.vanhaastregt at arm.com>
Date: Wed, 3 Jul 2024 13:24:22 +0200
Subject: [PATCH 107/246] [OpenCL] Emit opencl.cxx.version metadata for C++
(#92140)
Currently there is no way to tell whether an IR module was generated
using `-cl-std=cl3.0` or `-cl-std=clc++2021`, i.e., whether the origin
was a OpenCL C or C++ for OpenCL source.
Add new `opencl.cxx.version` named metadata when compiling C++. Keep the
`opencl.ocl.version` metadata to convey the compatible OpenCL C version.
Fixes https://github.com/llvm/llvm-project/issues/91912
---
clang/lib/CodeGen/CodeGenModule.cpp | 30 ++++++++++++++---------
clang/test/CodeGenOpenCLCXX/version.clcpp | 21 ++++++++++++++++
2 files changed, 40 insertions(+), 11 deletions(-)
create mode 100644 clang/test/CodeGenOpenCLCXX/version.clcpp
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 652f519d82488..99e986d371cac 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -1399,17 +1399,25 @@ void CodeGenModule::Release() {
void CodeGenModule::EmitOpenCLMetadata() {
// SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
// opencl.ocl.version named metadata node.
- // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL.
- auto Version = LangOpts.getOpenCLCompatibleVersion();
- llvm::Metadata *OCLVerElts[] = {
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, Version / 100)),
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, (Version % 100) / 10))};
- llvm::NamedMDNode *OCLVerMD =
- TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
- llvm::LLVMContext &Ctx = TheModule.getContext();
- OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
+ // C++ for OpenCL has a distinct mapping for versions compatible with OpenCL.
+ auto CLVersion = LangOpts.getOpenCLCompatibleVersion();
+
+ auto EmitVersion = [this](StringRef MDName, int Version) {
+ llvm::Metadata *OCLVerElts[] = {
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(Int32Ty, Version / 100)),
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(Int32Ty, (Version % 100) / 10))};
+ llvm::NamedMDNode *OCLVerMD = TheModule.getOrInsertNamedMetadata(MDName);
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+ OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
+ };
+
+ EmitVersion("opencl.ocl.version", CLVersion);
+ if (LangOpts.OpenCLCPlusPlus) {
+ // In addition to the OpenCL compatible version, emit the C++ version.
+ EmitVersion("opencl.cxx.version", LangOpts.OpenCLCPlusPlusVersion);
+ }
}
void CodeGenModule::EmitBackendOptionsMetadata(
diff --git a/clang/test/CodeGenOpenCLCXX/version.clcpp b/clang/test/CodeGenOpenCLCXX/version.clcpp
new file mode 100644
index 0000000000000..2f2aa022afab2
--- /dev/null
+++ b/clang/test/CodeGenOpenCLCXX/version.clcpp
@@ -0,0 +1,21 @@
+// RUN: %clang_cc1 %s -triple "spir64-unknown-unknown" -emit-llvm -o - -cl-std=CL2.0 | FileCheck %s --check-prefix=CHECK-NO-CXX
+
+// RUN: %clang_cc1 %s -triple "spir64-unknown-unknown" -emit-llvm -o - -cl-std=clc++1.0 | FileCheck %s --check-prefix=CHECK-CXX100
+// RUN: %clang_cc1 %s -triple "spir64-unknown-unknown" -emit-llvm -o - -cl-std=clc++2021 | FileCheck %s --check-prefix=CHECK-CXX2021
+
+// This test checks that opencl.cxx.version metadata is emitted accordingly.
+// It avoids any C++ features to enable checking that the metadata is not emitted in non-C++ mode.
+
+kernel void foo() {}
+
+// CHECK-NO-CXX-NOT: opencl.cxx.version
+
+// CHECK-CXX100-DAG: !opencl.ocl.version = !{[[OCL:![0-9]+]]}
+// CHECK-CXX100-DAG: !opencl.cxx.version = !{[[CXX:![0-9]+]]}
+// CHECK-CXX100-DAG: [[OCL]] = !{i32 2, i32 0}
+// CHECK-CXX100-DAG: [[CXX]] = !{i32 1, i32 0}
+
+// CHECK-CXX2021-DAG: !opencl.ocl.version = !{[[OCL:![0-9]+]]}
+// CHECK-CXX2021-DAG: !opencl.cxx.version = !{[[CXX:![0-9]+]]}
+// CHECK-CXX2021-DAG: [[OCL]] = !{i32 3, i32 0}
+// CHECK-CXX2021-DAG: [[CXX]] = !{i32 2021, i32 0}
>From d37e7ec2c5c3ecfd6a4612c1798d62d343ceb254 Mon Sep 17 00:00:00 2001
From: Fabian Ritter <fabian.ritter at amd.com>
Date: Wed, 3 Jul 2024 13:37:38 +0200
Subject: [PATCH 108/246] [LowerMemIntrinsics] Respect the volatile argument of
llvm.memmove (#97545)
So far, we ignored if a memmove intrinsic is volatile when lowering it
to loops in the IR. This change generates volatile loads and stores in
this case (similar to how memcpy is handled) and adds tests for volatile
memmoves and memcpys.
---
.../Transforms/Utils/LowerMemIntrinsics.cpp | 11 ++--
.../CodeGen/AMDGPU/lower-mem-intrinsics.ll | 61 +++++++++++++++++++
2 files changed, 67 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index d84e9f094e03a..d2814f07530d8 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -422,10 +422,10 @@ static void createMemMoveLoop(Instruction *InsertBefore, Value *SrcAddr,
LoopPhi, ConstantInt::get(TypeOfCopyLen, 1), "index_ptr");
Value *Element = LoopBuilder.CreateAlignedLoad(
EltTy, LoopBuilder.CreateInBoundsGEP(EltTy, SrcAddr, IndexPtr),
- PartSrcAlign, "element");
+ PartSrcAlign, SrcIsVolatile, "element");
LoopBuilder.CreateAlignedStore(
Element, LoopBuilder.CreateInBoundsGEP(EltTy, DstAddr, IndexPtr),
- PartDstAlign);
+ PartDstAlign, DstIsVolatile);
LoopBuilder.CreateCondBr(
LoopBuilder.CreateICmpEQ(IndexPtr, ConstantInt::get(TypeOfCopyLen, 0)),
ExitBB, LoopBB);
@@ -440,10 +440,11 @@ static void createMemMoveLoop(Instruction *InsertBefore, Value *SrcAddr,
IRBuilder<> FwdLoopBuilder(FwdLoopBB);
PHINode *FwdCopyPhi = FwdLoopBuilder.CreatePHI(TypeOfCopyLen, 0, "index_ptr");
Value *SrcGEP = FwdLoopBuilder.CreateInBoundsGEP(EltTy, SrcAddr, FwdCopyPhi);
- Value *FwdElement =
- FwdLoopBuilder.CreateAlignedLoad(EltTy, SrcGEP, PartSrcAlign, "element");
+ Value *FwdElement = FwdLoopBuilder.CreateAlignedLoad(
+ EltTy, SrcGEP, PartSrcAlign, SrcIsVolatile, "element");
Value *DstGEP = FwdLoopBuilder.CreateInBoundsGEP(EltTy, DstAddr, FwdCopyPhi);
- FwdLoopBuilder.CreateAlignedStore(FwdElement, DstGEP, PartDstAlign);
+ FwdLoopBuilder.CreateAlignedStore(FwdElement, DstGEP, PartDstAlign,
+ DstIsVolatile);
Value *FwdIndexPtr = FwdLoopBuilder.CreateAdd(
FwdCopyPhi, ConstantInt::get(TypeOfCopyLen, 1), "index_increment");
FwdLoopBuilder.CreateCondBr(FwdLoopBuilder.CreateICmpEQ(FwdIndexPtr, CopyLen),
diff --git a/llvm/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll
index 1c4e4b8602ff8..d53db69f9f2e0 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll
@@ -1776,6 +1776,67 @@ entry:
ret void
}
+define amdgpu_kernel void @memmove_volatile(ptr addrspace(1) %dst, ptr addrspace(1) %src) #0 {
+; MAX1024-LABEL: @memmove_volatile(
+; MAX1024-NEXT: call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) [[DST:%.*]], ptr addrspace(1) [[SRC:%.*]], i64 64, i1 true)
+; MAX1024-NEXT: ret void
+;
+; ALL-LABEL: @memmove_volatile(
+; ALL-NEXT: [[COMPARE_SRC_DST:%.*]] = icmp ult ptr addrspace(1) [[SRC:%.*]], [[DST:%.*]]
+; ALL-NEXT: [[COMPARE_N_TO_0:%.*]] = icmp eq i64 64, 0
+; ALL-NEXT: br i1 [[COMPARE_SRC_DST]], label [[COPY_BACKWARDS:%.*]], label [[COPY_FORWARD:%.*]]
+; ALL: copy_backwards:
+; ALL-NEXT: br i1 [[COMPARE_N_TO_0]], label [[MEMMOVE_DONE:%.*]], label [[COPY_BACKWARDS_LOOP:%.*]]
+; ALL: copy_backwards_loop:
+; ALL-NEXT: [[TMP1:%.*]] = phi i64 [ [[INDEX_PTR:%.*]], [[COPY_BACKWARDS_LOOP]] ], [ 64, [[COPY_BACKWARDS]] ]
+; ALL-NEXT: [[INDEX_PTR]] = sub i64 [[TMP1]], 1
+; ALL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[SRC]], i64 [[INDEX_PTR]]
+; ALL-NEXT: [[ELEMENT:%.*]] = load volatile i8, ptr addrspace(1) [[TMP2]], align 1
+; ALL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[DST]], i64 [[INDEX_PTR]]
+; ALL-NEXT: store volatile i8 [[ELEMENT]], ptr addrspace(1) [[TMP3]], align 1
+; ALL-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_PTR]], 0
+; ALL-NEXT: br i1 [[TMP4]], label [[MEMMOVE_DONE]], label [[COPY_BACKWARDS_LOOP]]
+; ALL: copy_forward:
+; ALL-NEXT: br i1 [[COMPARE_N_TO_0]], label [[MEMMOVE_DONE]], label [[COPY_FORWARD_LOOP:%.*]]
+; ALL: copy_forward_loop:
+; ALL-NEXT: [[INDEX_PTR1:%.*]] = phi i64 [ [[INDEX_INCREMENT:%.*]], [[COPY_FORWARD_LOOP]] ], [ 0, [[COPY_FORWARD]] ]
+; ALL-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[SRC]], i64 [[INDEX_PTR1]]
+; ALL-NEXT: [[ELEMENT2:%.*]] = load volatile i8, ptr addrspace(1) [[TMP5]], align 1
+; ALL-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[DST]], i64 [[INDEX_PTR1]]
+; ALL-NEXT: store volatile i8 [[ELEMENT2]], ptr addrspace(1) [[TMP6]], align 1
+; ALL-NEXT: [[INDEX_INCREMENT]] = add i64 [[INDEX_PTR1]], 1
+; ALL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_INCREMENT]], 64
+; ALL-NEXT: br i1 [[TMP7]], label [[MEMMOVE_DONE]], label [[COPY_FORWARD_LOOP]]
+; ALL: memmove_done:
+; ALL-NEXT: ret void
+;
+ call void @llvm.memmove.p1.p1.i64(ptr addrspace(1) %dst, ptr addrspace(1) %src, i64 64, i1 true)
+ ret void
+}
+
+define amdgpu_kernel void @memcpy_volatile(ptr addrspace(1) %dst, ptr addrspace(1) %src) #0 {
+; MAX1024-LABEL: @memcpy_volatile(
+; MAX1024-NEXT: call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) [[DST:%.*]], ptr addrspace(1) [[SRC:%.*]], i64 64, i1 true)
+; MAX1024-NEXT: ret void
+;
+; ALL-LABEL: @memcpy_volatile(
+; ALL-NEXT: br label [[LOAD_STORE_LOOP:%.*]]
+; ALL: load-store-loop:
+; ALL-NEXT: [[LOOP_INDEX:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP4:%.*]], [[LOAD_STORE_LOOP]] ]
+; ALL-NEXT: [[TMP1:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[SRC:%.*]], i64 [[LOOP_INDEX]]
+; ALL-NEXT: [[TMP2:%.*]] = load volatile <4 x i32>, ptr addrspace(1) [[TMP1]], align 1
+; ALL-NEXT: [[TMP3:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[DST:%.*]], i64 [[LOOP_INDEX]]
+; ALL-NEXT: store volatile <4 x i32> [[TMP2]], ptr addrspace(1) [[TMP3]], align 1
+; ALL-NEXT: [[TMP4]] = add i64 [[LOOP_INDEX]], 1
+; ALL-NEXT: [[TMP5:%.*]] = icmp ult i64 [[TMP4]], 4
+; ALL-NEXT: br i1 [[TMP5]], label [[LOAD_STORE_LOOP]], label [[MEMCPY_SPLIT:%.*]]
+; ALL: memcpy-split:
+; ALL-NEXT: ret void
+;
+ call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) %dst, ptr addrspace(1) %src, i64 64, i1 true)
+ ret void
+}
+
declare i64 @llvm.umin.i64(i64, i64)
attributes #0 = { nounwind }
>From 82f9a5ba965dc1a40fe955b8205dd863bf6385cf Mon Sep 17 00:00:00 2001
From: Youngsuk Kim <youngsuk.kim at hpe.com>
Date: Wed, 3 Jul 2024 05:34:28 -0500
Subject: [PATCH 109/246] [llvm] Avoid 'raw_string_ostream::str' (NFC)
Since `raw_string_ostream` doesn't own the string buffer, it is
desirable (in terms of memory safety) for users to directly reference
the string buffer rather than use `raw_string_ostream::str()`.
Work towards TODO comment to remove `raw_string_ostream::str()`.
---
llvm/lib/MC/MCParser/AsmParser.cpp | 2 +-
llvm/lib/MC/MCParser/MasmParser.cpp | 2 +-
llvm/lib/Support/JSON.cpp | 2 +-
llvm/lib/Support/LockFileManager.cpp | 4 ++--
llvm/lib/TableGen/Main.cpp | 4 ++--
llvm/lib/TableGen/SetTheory.cpp | 2 +-
6 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp
index 72cf59f7f583b..13879220a25e7 100644
--- a/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -6238,7 +6238,7 @@ bool AsmParser::parseMSInlineAsm(
if (AsmStart != AsmEnd)
OS << StringRef(AsmStart, AsmEnd - AsmStart);
- AsmString = OS.str();
+ AsmString = AsmStringIR;
return false;
}
diff --git a/llvm/lib/MC/MCParser/MasmParser.cpp b/llvm/lib/MC/MCParser/MasmParser.cpp
index 67f35409244f0..653cc64b4c36a 100644
--- a/llvm/lib/MC/MCParser/MasmParser.cpp
+++ b/llvm/lib/MC/MCParser/MasmParser.cpp
@@ -2843,7 +2843,7 @@ bool MasmParser::expandMacro(raw_svector_ostream &OS, StringRef Body,
raw_string_ostream LocalName(Name);
LocalName << "??"
<< format_hex_no_prefix(LocalCounter++, 4, /*Upper=*/true);
- LocalSymbols.insert({Local, LocalName.str()});
+ LocalSymbols.insert({Local, Name});
Name.clear();
}
diff --git a/llvm/lib/Support/JSON.cpp b/llvm/lib/Support/JSON.cpp
index 17779b58f81b7..a5c617bb4a076 100644
--- a/llvm/lib/Support/JSON.cpp
+++ b/llvm/lib/Support/JSON.cpp
@@ -239,7 +239,7 @@ Error Path::Root::getError() const {
OS << '[' << S.index() << ']';
}
}
- return createStringError(llvm::inconvertibleErrorCode(), OS.str());
+ return createStringError(llvm::inconvertibleErrorCode(), S);
}
std::vector<const Object::value_type *> sortedElements(const Object &O) {
diff --git a/llvm/lib/Support/LockFileManager.cpp b/llvm/lib/Support/LockFileManager.cpp
index 3169aa25ec0d9..ea040ccf22b99 100644
--- a/llvm/lib/Support/LockFileManager.cpp
+++ b/llvm/lib/Support/LockFileManager.cpp
@@ -228,7 +228,7 @@ LockFileManager::LockFileManager(StringRef FileName)
std::string S("failed to create link ");
raw_string_ostream OSS(S);
OSS << LockFileName.str() << " to " << UniqueLockFileName.str();
- setError(EC, OSS.str());
+ setError(EC, S);
return;
}
@@ -274,7 +274,7 @@ std::string LockFileManager::getErrorMessage() const {
raw_string_ostream OSS(Str);
if (!ErrCodeMsg.empty())
OSS << ": " << ErrCodeMsg;
- return OSS.str();
+ return Str;
}
return "";
}
diff --git a/llvm/lib/TableGen/Main.cpp b/llvm/lib/TableGen/Main.cpp
index c6e2e6c89fd2a..841fa7c3f3690 100644
--- a/llvm/lib/TableGen/Main.cpp
+++ b/llvm/lib/TableGen/Main.cpp
@@ -159,7 +159,7 @@ int llvm::TableGenMain(const char *argv0,
// aren't any.
if (auto ExistingOrErr =
MemoryBuffer::getFile(OutputFilename, /*IsText=*/true))
- if (std::move(ExistingOrErr.get())->getBuffer() == Out.str())
+ if (std::move(ExistingOrErr.get())->getBuffer() == OutString)
WriteFile = false;
}
if (WriteFile) {
@@ -168,7 +168,7 @@ int llvm::TableGenMain(const char *argv0,
if (EC)
return reportError(argv0, "error opening " + OutputFilename + ": " +
EC.message() + "\n");
- OutFile.os() << Out.str();
+ OutFile.os() << OutString;
if (ErrorsPrinted == 0)
OutFile.keep();
}
diff --git a/llvm/lib/TableGen/SetTheory.cpp b/llvm/lib/TableGen/SetTheory.cpp
index 3db46aae6d967..f4e3e3d4ce473 100644
--- a/llvm/lib/TableGen/SetTheory.cpp
+++ b/llvm/lib/TableGen/SetTheory.cpp
@@ -221,7 +221,7 @@ struct SequenceOp : public SetTheory::Operator {
std::string Name;
raw_string_ostream OS(Name);
OS << format(Format.c_str(), unsigned(From));
- Record *Rec = Records.getDef(OS.str());
+ Record *Rec = Records.getDef(Name);
if (!Rec)
PrintFatalError(Loc, "No def named '" + Name + "': " +
Expr->getAsString());
>From 18eef6802c36fc1744834fa761831aa805714d9e Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 3 Jul 2024 13:02:25 +0100
Subject: [PATCH 110/246] Fix signed/unsigned integer comparison warning. NFC.
---
llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
index 0397fb299d27b..305a998a0e05b 100644
--- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
@@ -2810,7 +2810,7 @@ X86TTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
PatternMatch::m_Mask(ShuffleMask))))) {
// Bail if the shuffle was irregular or contains undefs.
int NumElts = cast<FixedVectorType>(MaskSrc->getType())->getNumElements();
- if (NumElts < ShuffleMask.size() || !isPowerOf2_32(NumElts) ||
+ if (NumElts < (int)ShuffleMask.size() || !isPowerOf2_32(NumElts) ||
any_of(ShuffleMask,
[NumElts](int M) { return M < 0 || M >= NumElts; }))
break;
>From c1af97db1e3846db1188149afe86cee6585dfc9a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Timm=20B=C3=A4der?= <tbaeder at redhat.com>
Date: Wed, 3 Jul 2024 11:29:07 +0200
Subject: [PATCH 111/246] [clang][Interp] Diagnose comparisons against
one-past-end pointers
---
clang/lib/AST/Interp/Interp.h | 15 +++++++++++++++
clang/lib/AST/Interp/Pointer.cpp | 7 ++++---
clang/lib/AST/Interp/Pointer.h | 10 +++++++++-
clang/test/AST/Interp/literals.cpp | 10 ++++++++++
4 files changed, 38 insertions(+), 4 deletions(-)
diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h
index 328db219ca628..5d8362b4fa881 100644
--- a/clang/lib/AST/Interp/Interp.h
+++ b/clang/lib/AST/Interp/Interp.h
@@ -922,6 +922,7 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
return true;
}
+ // Reject comparisons to weak pointers.
for (const auto &P : {LHS, RHS}) {
if (P.isZero())
continue;
@@ -934,6 +935,20 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
}
if (!Pointer::hasSameBase(LHS, RHS)) {
+ if (LHS.isOnePastEnd() && !RHS.isOnePastEnd() && !RHS.isZero() &&
+ RHS.getOffset() == 0) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_past_end)
+ << LHS.toDiagnosticString(S.getCtx());
+ return false;
+ } else if (RHS.isOnePastEnd() && !LHS.isOnePastEnd() && !LHS.isZero() &&
+ LHS.getOffset() == 0) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_past_end)
+ << RHS.toDiagnosticString(S.getCtx());
+ return false;
+ }
+
S.Stk.push<BoolT>(BoolT::from(Fn(ComparisonCategoryResult::Unordered)));
return true;
} else {
diff --git a/clang/lib/AST/Interp/Pointer.cpp b/clang/lib/AST/Interp/Pointer.cpp
index 4070d0c54225d..d6603f91fb127 100644
--- a/clang/lib/AST/Interp/Pointer.cpp
+++ b/clang/lib/AST/Interp/Pointer.cpp
@@ -143,7 +143,7 @@ APValue Pointer::toAPValue() const {
if (isDummy() || isUnknownSizeArray() || Desc->asExpr())
return APValue(Base, CharUnits::Zero(), Path,
- /*IsOnePastEnd=*/false, /*IsNullPtr=*/false);
+ /*IsOnePastEnd=*/isOnePastEnd(), /*IsNullPtr=*/false);
// TODO: compute the offset into the object.
CharUnits Offset = CharUnits::Zero();
@@ -181,7 +181,8 @@ APValue Pointer::toAPValue() const {
// Just invert the order of the elements.
std::reverse(Path.begin(), Path.end());
- return APValue(Base, Offset, Path, /*IsOnePastEnd=*/false, /*IsNullPtr=*/false);
+ return APValue(Base, Offset, Path, /*IsOnePastEnd=*/isOnePastEnd(),
+ /*IsNullPtr=*/false);
}
void Pointer::print(llvm::raw_ostream &OS) const {
@@ -348,7 +349,7 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx) const {
// Invalid pointers.
if (Ptr.isDummy() || !Ptr.isLive() || !Ptr.isBlockPointer() ||
- (!Ptr.isUnknownSizeArray() && Ptr.isOnePastEnd()))
+ Ptr.isPastEnd())
return false;
// Primitive values.
diff --git a/clang/lib/AST/Interp/Pointer.h b/clang/lib/AST/Interp/Pointer.h
index 5faec75cc3ec5..4f277eb7d9e58 100644
--- a/clang/lib/AST/Interp/Pointer.h
+++ b/clang/lib/AST/Interp/Pointer.h
@@ -556,10 +556,18 @@ class Pointer {
if (isUnknownSizeArray())
return false;
- return isElementPastEnd() ||
+ return isElementPastEnd() || isPastEnd() ||
(getSize() == getOffset() && !isZeroSizeArray());
}
+ /// Checks if the pointer points past the end of the object.
+ bool isPastEnd() const {
+ if (isIntegralPointer())
+ return false;
+
+ return !isZero() && Offset > PointeeStorage.BS.Pointee->getSize();
+ }
+
/// Checks if the pointer is an out-of-bounds element pointer.
bool isElementPastEnd() const { return Offset == PastEndMark; }
diff --git a/clang/test/AST/Interp/literals.cpp b/clang/test/AST/Interp/literals.cpp
index f70ca79e216da..630d9b53cca25 100644
--- a/clang/test/AST/Interp/literals.cpp
+++ b/clang/test/AST/Interp/literals.cpp
@@ -1266,3 +1266,13 @@ static_assert(ReturnInStmtExpr() == 1, ""); // both-error {{not an integral cons
// both-note {{in call to}}
#endif
+
+namespace ComparisonAgainstOnePastEnd {
+ int a, b;
+ static_assert(&a + 1 == &b, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{comparison against pointer '&a + 1' that points past the end of a complete object has unspecified value}}
+ static_assert(&a == &b + 1, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{comparison against pointer '&b + 1' that points past the end of a complete object has unspecified value}}
+
+ static_assert(&a + 1 == &b + 1, ""); // both-error {{static assertion failed}}
+};
>From 52cc7c003a2851e66586dc15c6dc813355e147c6 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 3 Jul 2024 13:07:30 +0100
Subject: [PATCH 112/246] [InstCombine][X86] Add tests for special case,
constant folding and demanded elts handling for PMULH style intrinsics
---
.../Transforms/InstCombine/X86/x86-pmulh.ll | 198 ++++++++++++++++++
.../Transforms/InstCombine/X86/x86-pmulhrs.ll | 198 ++++++++++++++++++
.../Transforms/InstCombine/X86/x86-pmulhu.ll | 198 ++++++++++++++++++
3 files changed, 594 insertions(+)
create mode 100644 llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
create mode 100644 llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
create mode 100644 llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
new file mode 100644
index 0000000000000..56eecd869817e
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
@@ -0,0 +1,198 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -mtriple=x86_64-unknown-unknown -S | FileCheck %s
+
+;
+; UNDEF Elts
+;
+
+define <8 x i16> @undef_pmulh_128(<8 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[A0:%.*]], <8 x i16> undef)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> undef)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @undef_pmulh_128_commute(<8 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_128_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> undef, <8 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> undef, <8 x i16> %a0)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @undef_pmulh_256(<16 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[A0:%.*]], <16 x i16> undef)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> undef)
+ ret <16 x i16> %1
+}
+
+define <16 x i16> @undef_pmulh_256_commute(<16 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_256_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> undef, <16 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> undef, <16 x i16> %a0)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @undef_pmulh_512(<32 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[A0:%.*]], <32 x i16> undef)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> %a0, <32 x i16> undef)
+ ret <32 x i16> %1
+}
+
+define <32 x i16> @undef_pmulh_512_commute(<32 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_512_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> undef, <32 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> undef, <32 x i16> %a0)
+ ret <32 x i16> %1
+}
+
+;
+; Zero Elts
+;
+
+define <8 x i16> @zero_pmulh_128(<8 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[A0:%.*]], <8 x i16> zeroinitializer)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> zeroinitializer)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @zero_pmulh_128_commute(<8 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_128_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> zeroinitializer, <8 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> zeroinitializer, <8 x i16> %a0)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @zero_pmulh_256(<16 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[A0:%.*]], <16 x i16> zeroinitializer)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> zeroinitializer)
+ ret <16 x i16> %1
+}
+
+define <16 x i16> @zero_pmulh_256_commute(<16 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_256_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> zeroinitializer, <16 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> zeroinitializer, <16 x i16> %a0)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @zero_pmulh_512(<32 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[A0:%.*]], <32 x i16> zeroinitializer)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> %a0, <32 x i16> zeroinitializer)
+ ret <32 x i16> %1
+}
+
+define <32 x i16> @zero_pmulh_512_commute(<32 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_512_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> zeroinitializer, <32 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> zeroinitializer, <32 x i16> %a0)
+ ret <32 x i16> %1
+}
+
+;
+; Constant Folding
+;
+
+define <8 x i16> @fold_pmulh_128() {
+; CHECK-LABEL: @fold_pmulh_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @fold_pmulh_256() {
+; CHECK-LABEL: @fold_pmulh_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @fold_pmulh_512() {
+; CHECK-LABEL: @fold_pmulh_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
+ ret <32 x i16> %1
+}
+
+;
+; Demanded Elts
+;
+
+define <8 x i16> @elts_pmulh_128(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: @elts_pmulh_128(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[A1:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i16> [[TMP3]], <8 x i16> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: ret <8 x i16> [[TMP4]]
+;
+ %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %a1, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ %3 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %1, <8 x i16> %2)
+ %4 = shufflevector <8 x i16> %3, <8 x i16> poison, <8 x i32> zeroinitializer
+ ret <8 x i16> %4
+}
+
+define <16 x i16> @elts_pmulh_256(<16 x i16> %a0, <16 x i16> %a1) {
+; CHECK-LABEL: @elts_pmulh_256(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i16> [[A1:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[TMP1]], <16 x i16> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i16> [[TMP3]], <16 x i16> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: ret <16 x i16> [[TMP4]]
+;
+ %1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = shufflevector <16 x i16> %a1, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %3 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %1, <16 x i16> %2)
+ %4 = shufflevector <16 x i16> %3, <16 x i16> poison, <16 x i32> zeroinitializer
+ ret <16 x i16> %4
+}
+
+define <32 x i16> @elts_pmulh_512(<32 x i16> %a0, <32 x i16> %a1) {
+; CHECK-LABEL: @elts_pmulh_512(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <32 x i16> [[A1:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[TMP1]], <32 x i16> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i16> [[TMP3]], <32 x i16> poison, <32 x i32> zeroinitializer
+; CHECK-NEXT: ret <32 x i16> [[TMP4]]
+;
+ %1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %2 = shufflevector <32 x i16> %a1, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %3 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> %1, <32 x i16> %2)
+ %4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> zeroinitializer
+ ret <32 x i16> %4
+}
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
new file mode 100644
index 0000000000000..a343fa266e794
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
@@ -0,0 +1,198 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -mtriple=x86_64-unknown-unknown -S | FileCheck %s
+
+;
+; UNDEF Elts
+;
+
+define <8 x i16> @undef_pmulh_128(<8 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> [[A0:%.*]], <8 x i16> undef)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> undef)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @undef_pmulh_128_commute(<8 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_128_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> undef, <8 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> undef, <8 x i16> %a0)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @undef_pmulh_256(<16 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[A0:%.*]], <16 x i16> undef)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> undef)
+ ret <16 x i16> %1
+}
+
+define <16 x i16> @undef_pmulh_256_commute(<16 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_256_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> undef, <16 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> undef, <16 x i16> %a0)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @undef_pmulh_512(<32 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[A0:%.*]], <32 x i16> undef)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> %a0, <32 x i16> undef)
+ ret <32 x i16> %1
+}
+
+define <32 x i16> @undef_pmulh_512_commute(<32 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulh_512_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> undef, <32 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> undef, <32 x i16> %a0)
+ ret <32 x i16> %1
+}
+
+;
+; Zero Elts
+;
+
+define <8 x i16> @zero_pmulh_128(<8 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> [[A0:%.*]], <8 x i16> zeroinitializer)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> zeroinitializer)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @zero_pmulh_128_commute(<8 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_128_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> zeroinitializer, <8 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> zeroinitializer, <8 x i16> %a0)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @zero_pmulh_256(<16 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[A0:%.*]], <16 x i16> zeroinitializer)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> zeroinitializer)
+ ret <16 x i16> %1
+}
+
+define <16 x i16> @zero_pmulh_256_commute(<16 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_256_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> zeroinitializer, <16 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> zeroinitializer, <16 x i16> %a0)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @zero_pmulh_512(<32 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[A0:%.*]], <32 x i16> zeroinitializer)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> %a0, <32 x i16> zeroinitializer)
+ ret <32 x i16> %1
+}
+
+define <32 x i16> @zero_pmulh_512_commute(<32 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulh_512_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> zeroinitializer, <32 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> zeroinitializer, <32 x i16> %a0)
+ ret <32 x i16> %1
+}
+
+;
+; Constant Folding
+;
+
+define <8 x i16> @fold_pmulh_128() {
+; CHECK-LABEL: @fold_pmulh_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @fold_pmulh_256() {
+; CHECK-LABEL: @fold_pmulh_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @fold_pmulh_512() {
+; CHECK-LABEL: @fold_pmulh_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
+ ret <32 x i16> %1
+}
+
+;
+; Demanded Elts
+;
+
+define <8 x i16> @elts_pmulh_128(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: @elts_pmulh_128(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[A1:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i16> [[TMP3]], <8 x i16> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: ret <8 x i16> [[TMP4]]
+;
+ %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %a1, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ %3 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %1, <8 x i16> %2)
+ %4 = shufflevector <8 x i16> %3, <8 x i16> poison, <8 x i32> zeroinitializer
+ ret <8 x i16> %4
+}
+
+define <16 x i16> @elts_pmulh_256(<16 x i16> %a0, <16 x i16> %a1) {
+; CHECK-LABEL: @elts_pmulh_256(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i16> [[A1:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[TMP1]], <16 x i16> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i16> [[TMP3]], <16 x i16> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: ret <16 x i16> [[TMP4]]
+;
+ %1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = shufflevector <16 x i16> %a1, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %3 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %1, <16 x i16> %2)
+ %4 = shufflevector <16 x i16> %3, <16 x i16> poison, <16 x i32> zeroinitializer
+ ret <16 x i16> %4
+}
+
+define <32 x i16> @elts_pmulh_512(<32 x i16> %a0, <32 x i16> %a1) {
+; CHECK-LABEL: @elts_pmulh_512(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <32 x i16> [[A1:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[TMP1]], <32 x i16> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i16> [[TMP3]], <32 x i16> poison, <32 x i32> zeroinitializer
+; CHECK-NEXT: ret <32 x i16> [[TMP4]]
+;
+ %1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %2 = shufflevector <32 x i16> %a1, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %3 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> %1, <32 x i16> %2)
+ %4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> zeroinitializer
+ ret <32 x i16> %4
+}
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
new file mode 100644
index 0000000000000..0ac4fb81a7754
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
@@ -0,0 +1,198 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -mtriple=x86_64-unknown-unknown -S | FileCheck %s
+
+;
+; UNDEF Elts
+;
+
+define <8 x i16> @undef_pmulhu_128(<8 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulhu_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A0:%.*]], <8 x i16> undef)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> undef)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @undef_pmulhu_128_commute(<8 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulhu_128_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> undef, <8 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> undef, <8 x i16> %a0)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @undef_pmulhu_256(<16 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulhu_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[A0:%.*]], <16 x i16> undef)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> undef)
+ ret <16 x i16> %1
+}
+
+define <16 x i16> @undef_pmulhu_256_commute(<16 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulhu_256_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> undef, <16 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> undef, <16 x i16> %a0)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @undef_pmulhu_512(<32 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulhu_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[A0:%.*]], <32 x i16> undef)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> %a0, <32 x i16> undef)
+ ret <32 x i16> %1
+}
+
+define <32 x i16> @undef_pmulhu_512_commute(<32 x i16> %a0) {
+; CHECK-LABEL: @undef_pmulhu_512_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> undef, <32 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> undef, <32 x i16> %a0)
+ ret <32 x i16> %1
+}
+
+;
+; Zero Elts
+;
+
+define <8 x i16> @zero_pmulhu_128(<8 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulhu_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A0:%.*]], <8 x i16> zeroinitializer)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> zeroinitializer)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @zero_pmulhu_128_commute(<8 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulhu_128_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> zeroinitializer, <8 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> zeroinitializer, <8 x i16> %a0)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @zero_pmulhu_256(<16 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulhu_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[A0:%.*]], <16 x i16> zeroinitializer)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> zeroinitializer)
+ ret <16 x i16> %1
+}
+
+define <16 x i16> @zero_pmulhu_256_commute(<16 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulhu_256_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> zeroinitializer, <16 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> zeroinitializer, <16 x i16> %a0)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @zero_pmulhu_512(<32 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulhu_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[A0:%.*]], <32 x i16> zeroinitializer)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> %a0, <32 x i16> zeroinitializer)
+ ret <32 x i16> %1
+}
+
+define <32 x i16> @zero_pmulhu_512_commute(<32 x i16> %a0) {
+; CHECK-LABEL: @zero_pmulhu_512_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> zeroinitializer, <32 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> zeroinitializer, <32 x i16> %a0)
+ ret <32 x i16> %1
+}
+
+;
+; Constant Folding
+;
+
+define <8 x i16> @fold_pmulhu_128() {
+; CHECK-LABEL: @fold_pmulhu_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @fold_pmulhu_256() {
+; CHECK-LABEL: @fold_pmulhu_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @fold_pmulhu_512() {
+; CHECK-LABEL: @fold_pmulhu_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
+ ret <32 x i16> %1
+}
+
+;
+; Demanded Elts
+;
+
+define <8 x i16> @elts_pmulhu_128(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: @elts_pmulhu_128(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[A1:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i16> [[TMP3]], <8 x i16> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: ret <8 x i16> [[TMP4]]
+;
+ %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %a1, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ %3 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %1, <8 x i16> %2)
+ %4 = shufflevector <8 x i16> %3, <8 x i16> poison, <8 x i32> zeroinitializer
+ ret <8 x i16> %4
+}
+
+define <16 x i16> @elts_pmulhu_256(<16 x i16> %a0, <16 x i16> %a1) {
+; CHECK-LABEL: @elts_pmulhu_256(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i16> [[A1:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[TMP1]], <16 x i16> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i16> [[TMP3]], <16 x i16> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: ret <16 x i16> [[TMP4]]
+;
+ %1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = shufflevector <16 x i16> %a1, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %3 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %1, <16 x i16> %2)
+ %4 = shufflevector <16 x i16> %3, <16 x i16> poison, <16 x i32> zeroinitializer
+ ret <16 x i16> %4
+}
+
+define <32 x i16> @elts_pmulhu_512(<32 x i16> %a0, <32 x i16> %a1) {
+; CHECK-LABEL: @elts_pmulhu_512(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <32 x i16> [[A1:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[TMP1]], <32 x i16> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i16> [[TMP3]], <32 x i16> poison, <32 x i32> zeroinitializer
+; CHECK-NEXT: ret <32 x i16> [[TMP4]]
+;
+ %1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %2 = shufflevector <32 x i16> %a1, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %3 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> %1, <32 x i16> %2)
+ %4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> zeroinitializer
+ ret <32 x i16> %4
+}
>From fb7f65ba468f462103599e762c86f49b420cd984 Mon Sep 17 00:00:00 2001
From: Koakuma <koachan at protonmail.com>
Date: Wed, 3 Jul 2024 19:14:39 +0700
Subject: [PATCH 113/246] [SPARC][IAS] Emit the correct ELF machine type
(#96583)
Emit the correct machine type when writing out ELF objects.
This patch is modeled on GCC's behavior:
- `-m32` emits an object of type EM_SPARC;
- `-m32 -mcpu=v9` emits EM_SPARC32PLUS (however, see below); and
- `-m64` emits EM_SPARCV9.
Note that GCC does not guarantee emission of EM_SPARC32PLUS objects,
since GNU as doesn't support user control of emitted machine type.
It will always autodetect the type based on the instruction mix:
- If there's a V9 instruction inside, then emit EM_SPARC32PLUS; and
- Emit EM_SPARC otherwise.
For LLVM we choose deterministic behavior instead for simplicity.
---
.../Sparc/MCTargetDesc/SparcAsmBackend.cpp | 17 +++++++++--------
.../Sparc/MCTargetDesc/SparcELFObjectWriter.cpp | 14 ++++++++------
.../Sparc/MCTargetDesc/SparcMCTargetDesc.h | 4 ++--
llvm/test/MC/Sparc/elf-sparc-machine-type.s | 12 ++++++++++++
4 files changed, 31 insertions(+), 16 deletions(-)
create mode 100644 llvm/test/MC/Sparc/elf-sparc-machine-type.s
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp b/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
index 81c943ca47bf8..29282582b82bd 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
@@ -131,15 +131,16 @@ static unsigned getFixupKindNumBytes(unsigned Kind) {
namespace {
class SparcAsmBackend : public MCAsmBackend {
protected:
- const Target &TheTarget;
bool Is64Bit;
+ bool HasV9;
public:
- SparcAsmBackend(const Target &T)
- : MCAsmBackend(StringRef(T.getName()) == "sparcel"
+ SparcAsmBackend(const MCSubtargetInfo &STI)
+ : MCAsmBackend(STI.getTargetTriple().isLittleEndian()
? llvm::endianness::little
: llvm::endianness::big),
- TheTarget(T), Is64Bit(StringRef(TheTarget.getName()) == "sparcv9") {}
+ Is64Bit(STI.getTargetTriple().isArch64Bit()),
+ HasV9(STI.hasFeature(Sparc::FeatureV9)) {}
unsigned getNumFixupKinds() const override {
return Sparc::NumTargetFixupKinds;
@@ -330,8 +331,8 @@ namespace {
class ELFSparcAsmBackend : public SparcAsmBackend {
Triple::OSType OSType;
public:
- ELFSparcAsmBackend(const Target &T, Triple::OSType OSType) :
- SparcAsmBackend(T), OSType(OSType) { }
+ ELFSparcAsmBackend(const MCSubtargetInfo &STI, Triple::OSType OSType)
+ : SparcAsmBackend(STI), OSType(OSType) {}
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
@@ -358,7 +359,7 @@ namespace {
std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const override {
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType);
- return createSparcELFObjectWriter(Is64Bit, OSABI);
+ return createSparcELFObjectWriter(Is64Bit, HasV9, OSABI);
}
};
@@ -368,5 +369,5 @@ MCAsmBackend *llvm::createSparcAsmBackend(const Target &T,
const MCSubtargetInfo &STI,
const MCRegisterInfo &MRI,
const MCTargetOptions &Options) {
- return new ELFSparcAsmBackend(T, STI.getTargetTriple().getOS());
+ return new ELFSparcAsmBackend(STI, STI.getTargetTriple().getOS());
}
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp b/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
index f17d3e997452d..bfd71af736231 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
@@ -21,10 +21,12 @@ using namespace llvm;
namespace {
class SparcELFObjectWriter : public MCELFObjectTargetWriter {
public:
- SparcELFObjectWriter(bool Is64Bit, uint8_t OSABI)
- : MCELFObjectTargetWriter(Is64Bit, OSABI,
- Is64Bit ? ELF::EM_SPARCV9 : ELF::EM_SPARC,
- /*HasRelocationAddend*/ true) {}
+ SparcELFObjectWriter(bool Is64Bit, bool HasV9, uint8_t OSABI)
+ : MCELFObjectTargetWriter(
+ Is64Bit, OSABI,
+ Is64Bit ? ELF::EM_SPARCV9
+ : (HasV9 ? ELF::EM_SPARC32PLUS : ELF::EM_SPARC),
+ /*HasRelocationAddend*/ true) {}
~SparcELFObjectWriter() override = default;
@@ -146,6 +148,6 @@ bool SparcELFObjectWriter::needsRelocateWithSymbol(const MCValue &,
}
std::unique_ptr<MCObjectTargetWriter>
-llvm::createSparcELFObjectWriter(bool Is64Bit, uint8_t OSABI) {
- return std::make_unique<SparcELFObjectWriter>(Is64Bit, OSABI);
+llvm::createSparcELFObjectWriter(bool Is64Bit, bool HasV9, uint8_t OSABI) {
+ return std::make_unique<SparcELFObjectWriter>(Is64Bit, HasV9, OSABI);
}
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
index a2a9f7474c3f9..63419663b722c 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
@@ -34,8 +34,8 @@ MCCodeEmitter *createSparcMCCodeEmitter(const MCInstrInfo &MCII,
MCAsmBackend *createSparcAsmBackend(const Target &T, const MCSubtargetInfo &STI,
const MCRegisterInfo &MRI,
const MCTargetOptions &Options);
-std::unique_ptr<MCObjectTargetWriter> createSparcELFObjectWriter(bool Is64Bit,
- uint8_t OSABI);
+std::unique_ptr<MCObjectTargetWriter>
+createSparcELFObjectWriter(bool Is64Bit, bool HasV9, uint8_t OSABI);
// Defines symbolic names for Sparc v9 ASI tag names.
namespace SparcASITag {
diff --git a/llvm/test/MC/Sparc/elf-sparc-machine-type.s b/llvm/test/MC/Sparc/elf-sparc-machine-type.s
new file mode 100644
index 0000000000000..630812394560c
--- /dev/null
+++ b/llvm/test/MC/Sparc/elf-sparc-machine-type.s
@@ -0,0 +1,12 @@
+## Emit correct machine type depending on triple and cpu options.
+## - `-triple sparc` emits an object of type EM_SPARC;
+## - `-triple sparc -mcpu=v9` emits EM_SPARC32PLUS; and
+## - `-triple sparcv9` emits EM_SPARCV9.
+
+# RUN: llvm-mc -filetype=obj -triple sparc %s -o - | llvm-readobj -h - | FileCheck --check-prefixes=SPARC %s
+# RUN: llvm-mc -filetype=obj -triple sparc -mcpu=v9 %s -o - | llvm-readobj -h - | FileCheck --check-prefixes=SPARC32PLUS %s
+# RUN: llvm-mc -filetype=obj -triple sparcv9 %s -o - | llvm-readobj -h - | FileCheck --check-prefixes=SPARCV9 %s
+
+# SPARC: Machine: EM_SPARC (0x2)
+# SPARC32PLUS: Machine: EM_SPARC32PLUS (0x12)
+# SPARCV9: Machine: EM_SPARCV9 (0x2B)
>From 7c96469ea86a8d41aaa1bdb51b14894b0fbf16c0 Mon Sep 17 00:00:00 2001
From: Noah Goldstein <goldstein.w.n at gmail.com>
Date: Sat, 16 Mar 2024 01:51:43 -0500
Subject: [PATCH 114/246] [ValueTracking] Extend LHS/RHS with matching operand
to work without constants.
Previously we only handled the `L0 == R0` case if both `L1` and `R1`
where constant.
We can get more out of the analysis using general constant ranges
instead.
For example, `X u> Y` implies `X != 0`.
In general, any strict comparison on `X` implies that `X` is not equal
to the boundary value for the sign and constant ranges with/without
sign bits can be useful in deducing implications.
Closes #85557
---
llvm/lib/Analysis/ValueTracking.cpp | 52 +++++++++++++------
.../icmp-select-implies-common-op.ll | 20 +++----
.../Transforms/InstCombine/range-check.ll | 12 +----
.../Transforms/LoopVectorize/X86/pr23997.ll | 7 ++-
llvm/test/Transforms/NewGVN/pr35125.ll | 9 ++--
5 files changed, 53 insertions(+), 47 deletions(-)
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 6434de3e463c9..7660009b088d0 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -8857,20 +8857,20 @@ isImpliedCondMatchingOperands(CmpInst::Predicate LPred,
return std::nullopt;
}
-/// Return true if "icmp LPred X, LC" implies "icmp RPred X, RC" is true.
-/// Return false if "icmp LPred X, LC" implies "icmp RPred X, RC" is false.
+/// Return true if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is true.
+/// Return false if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is false.
/// Otherwise, return std::nullopt if we can't infer anything.
-static std::optional<bool> isImpliedCondCommonOperandWithConstants(
- CmpInst::Predicate LPred, const APInt &LC, CmpInst::Predicate RPred,
- const APInt &RC) {
- ConstantRange DomCR = ConstantRange::makeExactICmpRegion(LPred, LC);
- ConstantRange CR = ConstantRange::makeExactICmpRegion(RPred, RC);
- ConstantRange Intersection = DomCR.intersectWith(CR);
- ConstantRange Difference = DomCR.difference(CR);
- if (Intersection.isEmptySet())
- return false;
- if (Difference.isEmptySet())
+static std::optional<bool> isImpliedCondCommonOperandWithCR(
+ CmpInst::Predicate LPred, const ConstantRange &LCR,
+ CmpInst::Predicate RPred, const ConstantRange &RCR) {
+ ConstantRange DomCR = ConstantRange::makeAllowedICmpRegion(LPred, LCR);
+ // If all true values for lhs and true for rhs, lhs implies rhs
+ if (DomCR.icmp(RPred, RCR))
return true;
+
+ // If there is no overlap, lhs implies not rhs
+ if (DomCR.icmp(CmpInst::getInversePredicate(RPred), RCR))
+ return false;
return std::nullopt;
}
@@ -8910,11 +8910,29 @@ static std::optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
}
}
- // Can we infer anything when the 0-operands match and the 1-operands are
- // constants (not necessarily matching)?
- const APInt *LC, *RC;
- if (L0 == R0 && match(L1, m_APInt(LC)) && match(R1, m_APInt(RC)))
- return isImpliedCondCommonOperandWithConstants(LPred, *LC, RPred, *RC);
+ // See if we can infer anything if operand-0 matches and we have at least one
+ // constant.
+ const APInt *Unused;
+ if (L0 == R0 && (match(L1, m_APInt(Unused)) || match(R1, m_APInt(Unused)))) {
+ // Potential TODO: We could also further use the constant range of L0/R0 to
+ // further constraint the constant ranges. At the moment this leads to
+ // several regressions related to not transforming `multi_use(A + C0) eq/ne
+ // C1` (see discussion: D58633).
+ ConstantRange LCR = computeConstantRange(
+ L1, ICmpInst::isSigned(LPred), /* UseInstrInfo=*/true, /*AC=*/nullptr,
+ /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1);
+ ConstantRange RCR = computeConstantRange(
+ R1, ICmpInst::isSigned(RPred), /* UseInstrInfo=*/true, /*AC=*/nullptr,
+ /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1);
+ // Even if L1/R1 are not both constant, we can still sometimes deduce
+ // relationship from a single constant. For example X u> Y implies X != 0.
+ if (auto R = isImpliedCondCommonOperandWithCR(LPred, LCR, RPred, RCR))
+ return R;
+ // If both L1/R1 were exact constant ranges and we didn't get anything
+ // here, we won't be able to deduce this.
+ if (match(L1, m_APInt(Unused)) && match(R1, m_APInt(Unused)))
+ return std::nullopt;
+ }
// Can we infer anything when the two compares have matching operands?
if (L0 == R0 && L1 == R1)
diff --git a/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll b/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll
index bacdb54f787d6..8d393a7ae28c9 100644
--- a/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll
@@ -29,10 +29,10 @@ define i1 @sgt_3_impliesT_sgt_2(i8 %x, i8 %y) {
define i1 @sgt_x_impliesF_eq_smin_todo(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @sgt_x_impliesF_eq_smin_todo(
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[X:%.*]], [[Z:%.*]]
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 -128, i8 [[Y:%.*]]
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[SEL]], [[X]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle i8 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[SEL:%.*]], [[X]]
+; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 [[CMP2]], i1 false
+; CHECK-NEXT: ret i1 [[CMP3]]
;
%cmp = icmp sgt i8 %x, %z
%sel = select i1 %cmp, i8 -128, i8 %y
@@ -43,9 +43,9 @@ define i1 @sgt_x_impliesF_eq_smin_todo(i8 %x, i8 %y, i8 %z) {
define i1 @slt_x_impliesT_ne_smin_todo(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @slt_x_impliesT_ne_smin_todo(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], [[Z:%.*]]
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 127, i8 [[Y:%.*]]
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[SEL]], [[X]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[SEL:%.*]], [[X]]
+; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 true, i1 [[CMP2]]
+; CHECK-NEXT: ret i1 [[CMP3]]
;
%cmp = icmp slt i8 %x, %z
%sel = select i1 %cmp, i8 127, i8 %y
@@ -56,9 +56,9 @@ define i1 @slt_x_impliesT_ne_smin_todo(i8 %x, i8 %y, i8 %z) {
define i1 @ult_x_impliesT_eq_umax_todo(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @ult_x_impliesT_eq_umax_todo(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[Z:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 -1, i8 [[Y:%.*]]
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[SEL]], [[X]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[SEL:%.*]], [[X]]
+; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 true, i1 [[CMP2]]
+; CHECK-NEXT: ret i1 [[CMP3]]
;
%cmp = icmp ugt i8 %z, %x
%sel = select i1 %cmp, i8 255, i8 %y
diff --git a/llvm/test/Transforms/InstCombine/range-check.ll b/llvm/test/Transforms/InstCombine/range-check.ll
index 0d138b6ba7e79..210e57c1d1fe4 100644
--- a/llvm/test/Transforms/InstCombine/range-check.ll
+++ b/llvm/test/Transforms/InstCombine/range-check.ll
@@ -340,11 +340,7 @@ define i1 @negative4_logical(i32 %x, i32 %n) {
define i1 @negative5(i32 %x, i32 %n) {
; CHECK-LABEL: @negative5(
-; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]]
-; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], -1
-; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
-; CHECK-NEXT: ret i1 [[C]]
+; CHECK-NEXT: ret i1 true
;
%nn = and i32 %n, 2147483647
%a = icmp slt i32 %x, %nn
@@ -355,11 +351,7 @@ define i1 @negative5(i32 %x, i32 %n) {
define i1 @negative5_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @negative5_logical(
-; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]]
-; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], -1
-; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
-; CHECK-NEXT: ret i1 [[C]]
+; CHECK-NEXT: ret i1 true
;
%nn = and i32 %n, 2147483647
%a = icmp slt i32 %x, %nn
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll
index 3d7153e66fc66..8800fa26f067c 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll
@@ -12,8 +12,7 @@ define void @foo(ptr addrspace(1) align 8 dereferenceable_or_null(16), ptr addrs
; CHECK: preheader:
; CHECK-NEXT: [[DOT10:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP0:%.*]], i64 16
; CHECK-NEXT: [[DOT12:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP1:%.*]], i64 16
-; CHECK-NEXT: [[UMAX2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP2:%.*]], i64 1)
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 16
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2:%.*]], 16
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 3
@@ -25,7 +24,7 @@ define void @foo(ptr addrspace(1) align 8 dereferenceable_or_null(16), ptr addrs
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[UMAX2]], -16
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], -16
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -49,7 +48,7 @@ define void @foo(ptr addrspace(1) align 8 dereferenceable_or_null(16), ptr addrs
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[UMAX2]], [[N_VEC]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[TMP2]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[PREHEADER]] ], [ 0, [[VECTOR_MEMCHECK]] ]
diff --git a/llvm/test/Transforms/NewGVN/pr35125.ll b/llvm/test/Transforms/NewGVN/pr35125.ll
index 9a96594e3446d..6724538a5a7f2 100644
--- a/llvm/test/Transforms/NewGVN/pr35125.ll
+++ b/llvm/test/Transforms/NewGVN/pr35125.ll
@@ -18,15 +18,12 @@ define i32 @main() #0 {
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[STOREMERGE]], [[PHIOFOPS]]
; CHECK-NEXT: br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END6:%.*]]
; CHECK: if.then3:
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[STOREMERGE]], -1
-; CHECK-NEXT: br i1 [[TOBOOL]], label [[LOR_RHS:%.*]], label [[LOR_END:%.*]]
+; CHECK-NEXT: br i1 false, label [[LOR_RHS:%.*]], label [[LOR_END:%.*]]
; CHECK: lor.rhs:
-; CHECK-NEXT: [[TOBOOL5:%.*]] = icmp ne i32 [[TMP0]], 0
-; CHECK-NEXT: [[PHITMP:%.*]] = zext i1 [[TOBOOL5]] to i32
+; CHECK-NEXT: store i8 poison, ptr null, align 1
; CHECK-NEXT: br label [[LOR_END]]
; CHECK: lor.end:
-; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ 1, [[IF_THEN3]] ], [ [[PHITMP]], [[LOR_RHS]] ]
-; CHECK-NEXT: store i32 [[TMP1]], ptr @a, align 4
+; CHECK-NEXT: store i32 1, ptr @a, align 4
; CHECK-NEXT: br label [[IF_END6]]
; CHECK: if.end6:
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @a, align 4
>From 488fdb7d1245af5651ba77630f81412648ba7ae3 Mon Sep 17 00:00:00 2001
From: serge-sans-paille <sguelton at mozilla.com>
Date: Wed, 3 Jul 2024 14:22:38 +0200
Subject: [PATCH 115/246] Revert "[llvm-readobj][ELF] Test multivalued rpath
entries and alter the output for readobj to emphasize the single valued
nature of NEEDED, SONAME, USED etc. (#96562)"
Reverted due to output change that breaks downstream project, see
https://github.com/llvm/llvm-project/pull/96562#issuecomment-2204938038
This reverts commit 161e1689ba98fabba71cac21f536708c78e9d7b0.
---
lld/test/ELF/as-needed-no-reloc.s | 2 +-
lld/test/ELF/as-needed.s | 12 +--
lld/test/ELF/auxiliary.s | 4 +-
lld/test/ELF/dynamic-reloc.s | 2 +-
lld/test/ELF/filter.s | 4 +-
lld/test/ELF/gc-sections-shared.s | 2 +-
lld/test/ELF/no-soname.s | 8 +-
lld/test/ELF/partition-synthetic-sections.s | 8 +-
lld/test/ELF/push-state.s | 2 +-
lld/test/ELF/shared-ppc64.s | 2 +-
lld/test/ELF/shared.s | 2 +-
lld/test/ELF/soname.s | 2 +-
lld/test/ELF/soname2.s | 2 +-
lld/test/ELF/wrap-drop-shared-original.s | 4 +-
llvm/test/tools/llvm-ifs/write-stub.test | 2 +-
.../llvm-readobj/ELF/dynamic-malformed.test | 54 ++++++-------
.../tools/llvm-readobj/ELF/dynamic-tags.test | 78 +++++++++----------
.../test/tools/llvm-readobj/ELF/loadname.test | 4 +-
llvm/tools/llvm-readobj/ELFDumper.cpp | 17 ++--
19 files changed, 105 insertions(+), 106 deletions(-)
diff --git a/lld/test/ELF/as-needed-no-reloc.s b/lld/test/ELF/as-needed-no-reloc.s
index 07b291a4940ef..6cedeec3155d6 100644
--- a/lld/test/ELF/as-needed-no-reloc.s
+++ b/lld/test/ELF/as-needed-no-reloc.s
@@ -8,7 +8,7 @@
# There must be a NEEDED entry for each undefined
-# CHECK: (NEEDED) Shared library: {{.*}}as-needed-no-reloc{{.*}}2.so
+# CHECK: (NEEDED) Shared library: [{{.*}}as-needed-no-reloc{{.*}}2.so]
# CHECK: UND bar
.globl _start
diff --git a/lld/test/ELF/as-needed.s b/lld/test/ELF/as-needed.s
index d115c672dda8c..1623f324892b4 100644
--- a/lld/test/ELF/as-needed.s
+++ b/lld/test/ELF/as-needed.s
@@ -32,13 +32,13 @@
// RUN: ld.lld %t.o %t.script -o %t2
// RUN: llvm-readobj --dynamic-table %t2 | FileCheck -check-prefix=CHECK2 %s
-// CHECK: NEEDED Shared library: shared1
-// CHECK: NEEDED Shared library: shared2
-// CHECK: NEEDED Shared library: shared3
+// CHECK: NEEDED Shared library: [shared1]
+// CHECK: NEEDED Shared library: [shared2]
+// CHECK: NEEDED Shared library: [shared3]
-// CHECK2: NEEDED Shared library: shared1
-// CHECK2-NOT: NEEDED Shared library: shared2
-// CHECK2-NOT: NEEDED Shared library: shared3
+// CHECK2: NEEDED Shared library: [shared1]
+// CHECK2-NOT: NEEDED Shared library: [shared2]
+// CHECK2-NOT: NEEDED Shared library: [shared3]
.global _start
_start:
diff --git a/lld/test/ELF/auxiliary.s b/lld/test/ELF/auxiliary.s
index dd978b0c423a5..5a74060cc851d 100644
--- a/lld/test/ELF/auxiliary.s
+++ b/lld/test/ELF/auxiliary.s
@@ -7,8 +7,8 @@
# CHECK: DynamicSection [
# CHECK-NEXT: Tag Type Name/Value
-# CHECK-NEXT: 0x000000007FFFFFFD AUXILIARY Auxiliary library: aaa
-# CHECK-NEXT: 0x000000007FFFFFFD AUXILIARY Auxiliary library: bbb
+# CHECK-NEXT: 0x000000007FFFFFFD AUXILIARY Auxiliary library: [aaa]
+# CHECK-NEXT: 0x000000007FFFFFFD AUXILIARY Auxiliary library: [bbb]
# RUN: not ld.lld %t.o -f aaa --auxiliary bbb -o /dev/null 2>&1 \
# RUN: | FileCheck -check-prefix=ERR %s
diff --git a/lld/test/ELF/dynamic-reloc.s b/lld/test/ELF/dynamic-reloc.s
index 6dae02cae125a..6d98db0c2468e 100644
--- a/lld/test/ELF/dynamic-reloc.s
+++ b/lld/test/ELF/dynamic-reloc.s
@@ -32,7 +32,7 @@
// CHECK: DynamicSection [
// CHECK-NEXT: Tag Type Name/Value
-// CHECK-NEXT: 0x0000000000000001 NEEDED Shared library: so
+// CHECK-NEXT: 0x0000000000000001 NEEDED Shared library: [so]
// CHECK-NEXT: 0x0000000000000015 DEBUG 0x0
// CHECK-NEXT: 0x0000000000000017 JMPREL
// CHECK-NEXT: 0x0000000000000002 PLTRELSZ 24 (bytes)
diff --git a/lld/test/ELF/filter.s b/lld/test/ELF/filter.s
index 3f34febacf659..2b07c01013dfa 100644
--- a/lld/test/ELF/filter.s
+++ b/lld/test/ELF/filter.s
@@ -13,8 +13,8 @@
# CHECK: DynamicSection [
# CHECK-NEXT: Tag Type Name/Value
-# CHECK-NEXT: 0x000000007FFFFFFF FILTER Filter library: foo.so
-# CHECK-NEXT: 0x000000007FFFFFFF FILTER Filter library: boo.so
+# CHECK-NEXT: 0x000000007FFFFFFF FILTER Filter library: [foo.so]
+# CHECK-NEXT: 0x000000007FFFFFFF FILTER Filter library: [boo.so]
# RUN: not ld.lld %t.o -F x -o /dev/null 2>&1 | FileCheck -check-prefix=ERR %s
# ERR: -F may not be used without -shared
diff --git a/lld/test/ELF/gc-sections-shared.s b/lld/test/ELF/gc-sections-shared.s
index 9174bdc6f2a8f..d32c15d4187fe 100644
--- a/lld/test/ELF/gc-sections-shared.s
+++ b/lld/test/ELF/gc-sections-shared.s
@@ -15,7 +15,7 @@
# this case is checked with symbol qux and %t4.so.
# CHECK-NOT: NEEDED
-# CHECK: (NEEDED) Shared library: {{.*}}3.so
+# CHECK: (NEEDED) Shared library: [{{.*}}3.so]
# CHECK-NOT: NEEDED
# CHECK-DAG: FUNC WEAK DEFAULT UND qux
diff --git a/lld/test/ELF/no-soname.s b/lld/test/ELF/no-soname.s
index f86b74eb15c12..69b17cd6cc1c8 100644
--- a/lld/test/ELF/no-soname.s
+++ b/lld/test/ELF/no-soname.s
@@ -6,26 +6,26 @@
// RUN: ld.lld %t.o %t.dir/no-soname/libfoo.so -o %t
// RUN: llvm-readobj --dynamic-table %t | FileCheck %s
-// CHECK: 0x0000000000000001 NEEDED Shared library: {{.*}}/no-soname/libfoo.so
+// CHECK: 0x0000000000000001 NEEDED Shared library: [{{.*}}/no-soname/libfoo.so]
// CHECK-NOT: NEEDED
// RUN: ld.lld %t.o %t.dir/no-soname/../no-soname/libfoo.so -o %t
// RUN: llvm-readobj --dynamic-table %t | FileCheck %s --check-prefix=CHECK2
-// CHECK2: 0x0000000000000001 NEEDED Shared library: {{.*}}/no-soname/../no-soname/libfoo.so
+// CHECK2: 0x0000000000000001 NEEDED Shared library: [{{.*}}/no-soname/../no-soname/libfoo.so]
// CHECK2-NOT: NEEDED
// RUN: ld.lld %t.o -L%t.dir/no-soname/../no-soname -lfoo -o %t
// RUN: llvm-readobj --dynamic-table %t | FileCheck %s --check-prefix=CHECK3
-// CHECK3: 0x0000000000000001 NEEDED Shared library: libfoo.so
+// CHECK3: 0x0000000000000001 NEEDED Shared library: [libfoo.so]
// CHECK3-NOT: NEEDED
// RUN: ld.lld %t.o -shared -soname libbar.so -o %t.dir/no-soname/libbar.so
// RUN: ld.lld %t.o %t.dir/no-soname/libbar.so -o %t
// RUN: llvm-readobj --dynamic-table %t | FileCheck %s --check-prefix=CHECK4
-// CHECK4: 0x0000000000000001 NEEDED Shared library: libbar.so
+// CHECK4: 0x0000000000000001 NEEDED Shared library: [libbar.so]
// CHECK4-NOT: NEEDED
.global _start
diff --git a/lld/test/ELF/partition-synthetic-sections.s b/lld/test/ELF/partition-synthetic-sections.s
index 0ae11fed1395a..d38597856e165 100644
--- a/lld/test/ELF/partition-synthetic-sections.s
+++ b/lld/test/ELF/partition-synthetic-sections.s
@@ -74,10 +74,10 @@
// CHECK: Dynamic section
// CHECK-NEXT: Tag
-// CHECK-NEXT: 0x0000000000000001 (NEEDED) Shared library: verneed1.so.0
-// PART0-NEXT: 0x000000000000000e (SONAME) Library soname: main.so
-// PART1-NEXT: 0x0000000000000001 (NEEDED) Shared library: main.so
-// PART1-NEXT: 0x000000000000000e (SONAME) Library soname: part1
+// CHECK-NEXT: 0x0000000000000001 (NEEDED) Shared library: [verneed1.so.0]
+// PART0-NEXT: 0x000000000000000e (SONAME) Library soname: [main.so]
+// PART1-NEXT: 0x0000000000000001 (NEEDED) Shared library: [main.so]
+// PART1-NEXT: 0x000000000000000e (SONAME) Library soname: [part1]
// CHECK-NEXT: 0x0000000000000007 (RELA) 0x[[RELA_DYN_ADDR]]
// CHECK-NEXT: 0x0000000000000008 (RELASZ)
// CHECK-NEXT: 0x0000000000000009 (RELAENT) 24 (bytes)
diff --git a/lld/test/ELF/push-state.s b/lld/test/ELF/push-state.s
index 038382d2f6a94..6477d9d9150b6 100644
--- a/lld/test/ELF/push-state.s
+++ b/lld/test/ELF/push-state.s
@@ -24,7 +24,7 @@
// RUN: ld.lld -o %t.exe -push-state -as-needed -pop-state %t.so %t1.o
// RUN: llvm-readobj --dynamic-table %t.exe | FileCheck -check-prefix=NO-AS-NEEDED %s
-// NO-AS-NEEDED: NEEDED Shared library: libfoo
+// NO-AS-NEEDED: NEEDED Shared library: [libfoo]
// RUN: mkdir -p %t.dir
diff --git a/lld/test/ELF/shared-ppc64.s b/lld/test/ELF/shared-ppc64.s
index 2258db2cc4592..00a27ad0d004e 100644
--- a/lld/test/ELF/shared-ppc64.s
+++ b/lld/test/ELF/shared-ppc64.s
@@ -28,7 +28,7 @@
// CHECK: DynamicSection [
// CHECK-NEXT: Tag Type Name/Value
// CHECK-NEXT: 0x000000000000001D RUNPATH Library runpath: [foo:bar]
-// CHECK-NEXT: 0x0000000000000001 NEEDED Shared library: {{.*}}2.so
+// CHECK-NEXT: 0x0000000000000001 NEEDED Shared library: [{{.*}}2.so]
// CHECK-NEXT: 0x0000000000000015 DEBUG 0x0
// CHECK-NEXT: 0x0000000000000007 RELA [[RELADDR]]
// CHECK-NEXT: 0x0000000000000008 RELASZ [[RELSIZE]] (bytes)
diff --git a/lld/test/ELF/shared.s b/lld/test/ELF/shared.s
index d8a690d3fd27d..800570e63b953 100644
--- a/lld/test/ELF/shared.s
+++ b/lld/test/ELF/shared.s
@@ -157,7 +157,7 @@
// CHECK: DynamicSection [
// CHECK-NEXT: Tag Type Name/Value
// CHECK-NEXT: 0x0000001D RUNPATH Library runpath: [foo:bar]
-// CHECK-NEXT: 0x00000001 NEEDED Shared library: {{.*}}2.so
+// CHECK-NEXT: 0x00000001 NEEDED Shared library: [{{.*}}2.so]
// CHECK-NEXT: 0x00000015 DEBUG 0x0
// CHECK-NEXT: 0x00000011 REL [[RELADDR]]
// CHECK-NEXT: 0x00000012 RELSZ [[RELSIZE]] (bytes)
diff --git a/lld/test/ELF/soname.s b/lld/test/ELF/soname.s
index 904b506f4b2c3..25c969dab7457 100644
--- a/lld/test/ELF/soname.s
+++ b/lld/test/ELF/soname.s
@@ -5,7 +5,7 @@
// RUN: ld.lld %t.o %t.so %t2.so -o %t
// RUN: llvm-readobj --dynamic-table %t | FileCheck %s
-// CHECK: 0x0000000000000001 NEEDED Shared library: bar
+// CHECK: 0x0000000000000001 NEEDED Shared library: [bar]
// CHECK-NOT: NEEDED
.global _start
diff --git a/lld/test/ELF/soname2.s b/lld/test/ELF/soname2.s
index 6866f9802c198..67a9c24af5cdd 100644
--- a/lld/test/ELF/soname2.s
+++ b/lld/test/ELF/soname2.s
@@ -3,7 +3,7 @@
// RUN: ld.lld %t.o -shared -soname=foo.so -o %t
// RUN: llvm-readobj --dynamic-table %t | FileCheck %s
-// CHECK: 0x000000000000000E SONAME Library soname: foo.so
+// CHECK: 0x000000000000000E SONAME Library soname: [foo.so]
.global _start
_start:
diff --git a/lld/test/ELF/wrap-drop-shared-original.s b/lld/test/ELF/wrap-drop-shared-original.s
index 59fef1493a567..f3784aa972796 100644
--- a/lld/test/ELF/wrap-drop-shared-original.s
+++ b/lld/test/ELF/wrap-drop-shared-original.s
@@ -20,13 +20,13 @@
# RUN: llvm-readelf --dynamic --dyn-syms %t/libref-with-original-and-wrapped.so | \
# RUN: FileCheck --check-prefix=ORIGINAL-AND-WRAPPED %s
-# ORIGINAL-NOT: (NEEDED) Shared library: liboriginal.so
+# ORIGINAL-NOT: (NEEDED) Shared library: [liboriginal.so]
# ORIGINAL: Symbol table '.dynsym' contains 3 entries:
# ORIGINAL: NOTYPE LOCAL DEFAULT UND
# ORIGINAL-NEXT: NOTYPE GLOBAL DEFAULT UND __wrap_foo
# ORIGINAL-NEXT: NOTYPE GLOBAL DEFAULT 6 ref
-# ORIGINAL-AND-WRAPPED: (NEEDED) Shared library: liboriginal-and-wrapped.so
+# ORIGINAL-AND-WRAPPED: (NEEDED) Shared library: [liboriginal-and-wrapped.so]
# ORIGINAL-AND-WRAPPED: Symbol table '.dynsym' contains 3 entries:
# ORIGINAL-AND-WRAPPED: NOTYPE LOCAL DEFAULT UND
# ORIGINAL-AND-WRAPPED-NEXT: NOTYPE GLOBAL DEFAULT UND __wrap_foo
diff --git a/llvm/test/tools/llvm-ifs/write-stub.test b/llvm/test/tools/llvm-ifs/write-stub.test
index e9f2d4f147b2c..44c194b77abc7 100644
--- a/llvm/test/tools/llvm-ifs/write-stub.test
+++ b/llvm/test/tools/llvm-ifs/write-stub.test
@@ -151,7 +151,7 @@ Symbols:
# CHECK-NEXT: 0x[[DYNTABZ]]0000006 SYMTAB
# CHECK-NEXT: 0x[[DYNTABZ]]0000005 STRTAB
# CHECK-NEXT: 0x[[DYNTABZ]]000000A STRSZ
-# CHECK-NEXT: 0x[[DYNTABZ]]0000001 NEEDED Shared library: libc.so.6
+# CHECK-NEXT: 0x[[DYNTABZ]]0000001 NEEDED Shared library: [libc.so.6]
# CHECK-NEXT: 0x[[DYNTABZ]]0000000 NULL
# CHECK-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/dynamic-malformed.test b/llvm/test/tools/llvm-readobj/ELF/dynamic-malformed.test
index 05be15775e9e7..d160ea87208c3 100644
--- a/llvm/test/tools/llvm-readobj/ELF/dynamic-malformed.test
+++ b/llvm/test/tools/llvm-readobj/ELF/dynamic-malformed.test
@@ -100,15 +100,15 @@ ProgramHeaders:
# BAD-STRING-LLVM-NEXT: 0x0000000000000005 STRTAB 0x1000
# BAD-STRING-LLVM-NEXT: 0x000000000000000A STRSZ 1 (bytes)
# BAD-STRING-LLVM-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb2: it goes past the end of the table (0xb1)
-# BAD-STRING-LLVM-NEXT: 0x0000000000000001 NEEDED Shared library: <?>
+# BAD-STRING-LLVM-NEXT: 0x0000000000000001 NEEDED Shared library: [<?>]
# BAD-STRING-LLVM-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb3: it goes past the end of the table (0xb1)
-# BAD-STRING-LLVM-NEXT: 0x000000007FFFFFFF FILTER Filter library: <?>
+# BAD-STRING-LLVM-NEXT: 0x000000007FFFFFFF FILTER Filter library: [<?>]
# BAD-STRING-LLVM-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb4: it goes past the end of the table (0xb1)
-# BAD-STRING-LLVM-NEXT: 0x000000007FFFFFFD AUXILIARY Auxiliary library: <?>
+# BAD-STRING-LLVM-NEXT: 0x000000007FFFFFFD AUXILIARY Auxiliary library: [<?>]
# BAD-STRING-LLVM-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb5: it goes past the end of the table (0xb1)
-# BAD-STRING-LLVM-NEXT: 0x000000007FFFFFFE USED Not needed object: <?>
+# BAD-STRING-LLVM-NEXT: 0x000000007FFFFFFE USED Not needed object: [<?>]
## Note: there is no "string table at offset 0xb0..." warning here, because it was printed earlier.
-# BAD-STRING-LLVM-NEXT: 0x000000000000000E SONAME Library soname: <?>
+# BAD-STRING-LLVM-NEXT: 0x000000000000000E SONAME Library soname: [<?>]
# BAD-STRING-LLVM-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb7: it goes past the end of the table (0xb1)
# BAD-STRING-LLVM-NEXT: 0x000000000000000F RPATH Library rpath: [<?>]
# BAD-STRING-LLVM-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb8: it goes past the end of the table (0xb1)
@@ -121,15 +121,15 @@ ProgramHeaders:
# BAD-STRING-GNU-NEXT: 0x0000000000000005 (STRTAB) 0x1000
# BAD-STRING-GNU-NEXT: 0x000000000000000a (STRSZ) 1 (bytes)
# BAD-STRING-GNU-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb2: it goes past the end of the table (0xb1)
-# BAD-STRING-GNU-NEXT: 0x0000000000000001 (NEEDED) Shared library: <?>
+# BAD-STRING-GNU-NEXT: 0x0000000000000001 (NEEDED) Shared library: [<?>]
# BAD-STRING-GNU-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb3: it goes past the end of the table (0xb1)
-# BAD-STRING-GNU-NEXT: 0x000000007fffffff (FILTER) Filter library: <?>
+# BAD-STRING-GNU-NEXT: 0x000000007fffffff (FILTER) Filter library: [<?>]
# BAD-STRING-GNU-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb4: it goes past the end of the table (0xb1)
-# BAD-STRING-GNU-NEXT: 0x000000007ffffffd (AUXILIARY) Auxiliary library: <?>
+# BAD-STRING-GNU-NEXT: 0x000000007ffffffd (AUXILIARY) Auxiliary library: [<?>]
# BAD-STRING-GNU-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb5: it goes past the end of the table (0xb1)
-# BAD-STRING-GNU-NEXT: 0x000000007ffffffe (USED) Not needed object: <?>
+# BAD-STRING-GNU-NEXT: 0x000000007ffffffe (USED) Not needed object: [<?>]
# BAD-STRING-GNU-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb6: it goes past the end of the table (0xb1)
-# BAD-STRING-GNU-NEXT: 0x000000000000000e (SONAME) Library soname: <?>
+# BAD-STRING-GNU-NEXT: 0x000000000000000e (SONAME) Library soname: [<?>]
# BAD-STRING-GNU-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb7: it goes past the end of the table (0xb1)
# BAD-STRING-GNU-NEXT: 0x000000000000000f (RPATH) Library rpath: [<?>]
# BAD-STRING-GNU-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb8: it goes past the end of the table (0xb1)
@@ -199,8 +199,8 @@ ProgramHeaders:
# BAD-STRTAB-ERR2: warning: '[[FILE]]': unable to parse DT_STRTAB: virtual address is not in any segment: 0x2000000
# BAD-STRTAB-LLVM: LoadName: <Not found>
# BAD-STRTAB: warning: '[[FILE]]': string table was not found
-# BAD-STRTAB-LLVM-NEXT: 0x0000000000000001 NEEDED Shared library: <?>
-# BAD-STRTAB-GNU-NEXT: 0x0000000000000001 (NEEDED) Shared library: <?>
+# BAD-STRTAB-LLVM-NEXT: 0x0000000000000001 NEEDED Shared library: [<?>]
+# BAD-STRTAB-GNU-NEXT: 0x0000000000000001 (NEEDED) Shared library: [<?>]
# BAD-STRTAB: NeededLibraries [
# BAD-STRTAB: <?>
# BAD-STRTAB: ]
@@ -313,15 +313,15 @@ ProgramHeaders:
# NOT-TERMINATED-LLVM: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb4: the string table is not null-terminated
# NOT-TERMINATED: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb0: the string table is not null-terminated
-# NOT-TERMINATED-NEXT: {{[(]?}}NEEDED{{[)]?}} Shared library: <?>
+# NOT-TERMINATED-NEXT: {{[(]?}}NEEDED{{[)]?}} Shared library: [<?>]
# NOT-TERMINATED-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb1: the string table is not null-terminated
-# NOT-TERMINATED-NEXT: {{[(]?}}FILTER{{[)]?}} Filter library: <?>
+# NOT-TERMINATED-NEXT: {{[(]?}}FILTER{{[)]?}} Filter library: [<?>]
# NOT-TERMINATED-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb2: the string table is not null-terminated
-# NOT-TERMINATED-NEXT: {{[(]?}}AUXILIARY{{[)]?}} Auxiliary library: <?>
+# NOT-TERMINATED-NEXT: {{[(]?}}AUXILIARY{{[)]?}} Auxiliary library: [<?>]
# NOT-TERMINATED-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb3: the string table is not null-terminated
-# NOT-TERMINATED-NEXT: {{[(]?}}USED{{[)]?}} Not needed object: <?>
+# NOT-TERMINATED-NEXT: {{[(]?}}USED{{[)]?}} Not needed object: [<?>]
# NOT-TERMINATED-GNU-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb4: the string table is not null-terminated
-# NOT-TERMINATED-NEXT: {{[(]?}}SONAME{{[)]?}} Library soname: <?>
+# NOT-TERMINATED-NEXT: {{[(]?}}SONAME{{[)]?}} Library soname: [<?>]
# NOT-TERMINATED-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb5: the string table is not null-terminated
# NOT-TERMINATED-NEXT: {{[(]?}}RPATH{{[)]?}} Library rpath: [<?>]
# NOT-TERMINATED-GREQ-NEXT: warning: '[[FILE]]': string table at offset 0xb0: unable to read the string at 0xb6: the string table is not null-terminated
@@ -390,11 +390,11 @@ ProgramHeaders:
## as normal. Since the file ends with a zero byte, strings are dumped, but if it didn't,
## we'd get <?> printed instead. The important bit is that we don't get the past the end warning.
-# BEFORE-THE-EOF: {{[(]?}}NEEDED{{[)]?}} Shared library: test.soabc
-# BEFORE-THE-EOF-NEXT: {{[(]?}}FILTER{{[)]?}} Filter library: est.soabc
-# BEFORE-THE-EOF-NEXT: {{[(]?}}AUXILIARY{{[)]?}} Auxiliary library: st.soabc
-# BEFORE-THE-EOF-NEXT: {{[(]?}}USED{{[)]?}} Not needed object: t.soabc
-# BEFORE-THE-EOF-NEXT: {{[(]?}}SONAME{{[)]?}} Library soname: .soabc
+# BEFORE-THE-EOF: {{[(]?}}NEEDED{{[)]?}} Shared library: [test.soabc]
+# BEFORE-THE-EOF-NEXT: {{[(]?}}FILTER{{[)]?}} Filter library: [est.soabc]
+# BEFORE-THE-EOF-NEXT: {{[(]?}}AUXILIARY{{[)]?}} Auxiliary library: [st.soabc]
+# BEFORE-THE-EOF-NEXT: {{[(]?}}USED{{[)]?}} Not needed object: [t.soabc]
+# BEFORE-THE-EOF-NEXT: {{[(]?}}SONAME{{[)]?}} Library soname: [.soabc]
# BEFORE-THE-EOF-NEXT: {{[(]?}}RPATH{{[)]?}} Library rpath: [soabc]
# BEFORE-THE-EOF-NEXT: {{[(]?}}RUNPATH{{[)]?}} Library runpath: [oabc]
# BEFORE-THE-EOF-NEXT: {{[(]?}}NULL{{[)]?}} 0x0
@@ -406,11 +406,11 @@ ProgramHeaders:
# PAST-THE-EOF: warning: '[[FILE]]': the dynamic string table at 0xb0 goes past the end of the file (0x2c0) with DT_STRSZ = 0x211
# PAST-THE-EOF: warning: '[[FILE]]': string table was not found
-# PAST-THE-EOF: {{[(]?}}NEEDED{{[)]?}} Shared library: <?>
-# PAST-THE-EOF-NEXT: {{[(]?}}FILTER{{[)]?}} Filter library: <?>
-# PAST-THE-EOF-NEXT: {{[(]?}}AUXILIARY{{[)]?}} Auxiliary library: <?>
-# PAST-THE-EOF-NEXT: {{[(]?}}USED{{[)]?}} Not needed object: <?>
-# PAST-THE-EOF-NEXT: {{[(]?}}SONAME{{[)]?}} Library soname: <?>
+# PAST-THE-EOF: {{[(]?}}NEEDED{{[)]?}} Shared library: [<?>]
+# PAST-THE-EOF-NEXT: {{[(]?}}FILTER{{[)]?}} Filter library: [<?>]
+# PAST-THE-EOF-NEXT: {{[(]?}}AUXILIARY{{[)]?}} Auxiliary library: [<?>]
+# PAST-THE-EOF-NEXT: {{[(]?}}USED{{[)]?}} Not needed object: [<?>]
+# PAST-THE-EOF-NEXT: {{[(]?}}SONAME{{[)]?}} Library soname: [<?>]
# PAST-THE-EOF-NEXT: {{[(]?}}RPATH{{[)]?}} Library rpath: [<?>]
# PAST-THE-EOF-NEXT: {{[(]?}}RUNPATH{{[)]?}} Library runpath: [<?>]
# PAST-THE-EOF-NEXT: {{[(]?}}NULL{{[)]?}} 0x0
diff --git a/llvm/test/tools/llvm-readobj/ELF/dynamic-tags.test b/llvm/test/tools/llvm-readobj/ELF/dynamic-tags.test
index e0f293ae5273f..dec2353fa74dd 100644
--- a/llvm/test/tools/llvm-readobj/ELF/dynamic-tags.test
+++ b/llvm/test/tools/llvm-readobj/ELF/dynamic-tags.test
@@ -13,7 +13,7 @@
# LLVM64:DynamicSection [ (62 entries)
# LLVM64-NEXT: Tag Type Name/Value
-# LLVM64-NEXT: 0x0000000000000001 NEEDED Shared library: D
+# LLVM64-NEXT: 0x0000000000000001 NEEDED Shared library: [D]
# LLVM64-NEXT: 0x0000000000000002 PLTRELSZ 16 (bytes)
# LLVM64-NEXT: 0x0000000000000003 PLTGOT 0x1000
# LLVM64-NEXT: 0x0000000000000004 HASH 0x1000
@@ -26,8 +26,8 @@
# LLVM64-NEXT: 0x000000000000000B SYMENT 2439 (bytes)
# LLVM64-NEXT: 0x000000000000000C INIT 0x1000
# LLVM64-NEXT: 0x000000000000000D FINI 0x1000
-# LLVM64-NEXT: 0x000000000000000E SONAME Library soname: U
-# LLVM64-NEXT: 0x000000000000000F RPATH Library rpath: [x:w:U]
+# LLVM64-NEXT: 0x000000000000000E SONAME Library soname: [U]
+# LLVM64-NEXT: 0x000000000000000F RPATH Library rpath: [f]
# LLVM64-NEXT: 0x0000000000000010 SYMBOLIC 0x1234567890ABCDEF
# LLVM64-NEXT: 0x0000000000000011 REL 0x1000
# LLVM64-NEXT: 0x0000000000000012 RELSZ 16 (bytes)
@@ -68,9 +68,9 @@
# LLVM64-NEXT: 0x000000006FFFFFFD VERDEFNUM 0
# LLVM64-NEXT: 0x000000006FFFFFFE VERNEED 0x1000
# LLVM64-NEXT: 0x000000006FFFFFFF VERNEEDNUM 0
-# LLVM64-NEXT: 0x000000007FFFFFFD AUXILIARY Auxiliary library: D
-# LLVM64-NEXT: 0x000000007FFFFFFE USED Not needed object: U
-# LLVM64-NEXT: 0x000000007FFFFFFF FILTER Filter library: U
+# LLVM64-NEXT: 0x000000007FFFFFFD AUXILIARY Auxiliary library: [D]
+# LLVM64-NEXT: 0x000000007FFFFFFE USED Not needed object: [U]
+# LLVM64-NEXT: 0x000000007FFFFFFF FILTER Filter library: [U]
# LLVM64-NEXT: 0x0000000012345678 <unknown:>0x12345678 0x8765432187654321
# LLVM64-NEXT: 0x000000006ABCDEF0 <unknown:>0x6abcdef0 0x9988776655443322
# LLVM64-NEXT: 0x0000000076543210 <unknown:>0x76543210 0x5555666677778888
@@ -79,7 +79,7 @@
# GNU64:Dynamic section at offset {{.*}} contains 62 entries:
# GNU64-NEXT: Tag Type Name/Value
-# GNU64-NEXT: 0x0000000000000001 (NEEDED) Shared library: D
+# GNU64-NEXT: 0x0000000000000001 (NEEDED) Shared library: [D]
# GNU64-NEXT: 0x0000000000000002 (PLTRELSZ) 16 (bytes)
# GNU64-NEXT: 0x0000000000000003 (PLTGOT) 0x1000
# GNU64-NEXT: 0x0000000000000004 (HASH) 0x1000
@@ -92,8 +92,8 @@
# GNU64-NEXT: 0x000000000000000b (SYMENT) 2439 (bytes)
# GNU64-NEXT: 0x000000000000000c (INIT) 0x1000
# GNU64-NEXT: 0x000000000000000d (FINI) 0x1000
-# GNU64-NEXT: 0x000000000000000e (SONAME) Library soname: U
-# GNU64-NEXT: 0x000000000000000f (RPATH) Library rpath: [x:w:U]
+# GNU64-NEXT: 0x000000000000000e (SONAME) Library soname: [U]
+# GNU64-NEXT: 0x000000000000000f (RPATH) Library rpath: [f]
# GNU64-NEXT: 0x0000000000000010 (SYMBOLIC) 0x1234567890abcdef
# GNU64-NEXT: 0x0000000000000011 (REL) 0x1000
# GNU64-NEXT: 0x0000000000000012 (RELSZ) 16 (bytes)
@@ -134,9 +134,9 @@
# GNU64-NEXT: 0x000000006ffffffd (VERDEFNUM) 0
# GNU64-NEXT: 0x000000006ffffffe (VERNEED) 0x1000
# GNU64-NEXT: 0x000000006fffffff (VERNEEDNUM) 0
-# GNU64-NEXT: 0x000000007ffffffd (AUXILIARY) Auxiliary library: D
-# GNU64-NEXT: 0x000000007ffffffe (USED) Not needed object: U
-# GNU64-NEXT: 0x000000007fffffff (FILTER) Filter library: U
+# GNU64-NEXT: 0x000000007ffffffd (AUXILIARY) Auxiliary library: [D]
+# GNU64-NEXT: 0x000000007ffffffe (USED) Not needed object: [U]
+# GNU64-NEXT: 0x000000007fffffff (FILTER) Filter library: [U]
# GNU64-NEXT: 0x0000000012345678 (<unknown:>0x12345678) 0x8765432187654321
# GNU64-NEXT: 0x000000006abcdef0 (<unknown:>0x6abcdef0) 0x9988776655443322
# GNU64-NEXT: 0x0000000076543210 (<unknown:>0x76543210) 0x5555666677778888
@@ -218,11 +218,9 @@
# JSON64-NEXT: {
# JSON64-NEXT: "Tag": 15,
# JSON64-NEXT: "Type": "RPATH",
-# JSON64-NEXT: "Value": 9,
+# JSON64-NEXT: "Value": 5,
# JSON64-NEXT: "Path": [
-# JSON64-NEXT: "x",
-# JSON64-NEXT: "w",
-# JSON64-NEXT: "U"
+# JSON64-NEXT: "f"
# JSON64-NEXT: ]
# JSON64-NEXT: },
# JSON64-NEXT: {
@@ -515,7 +513,7 @@ Sections:
Type: SHT_STRTAB
Address: 0x1000
Size: 0x10
- Content: "004400550066007700783A773A5500"
+ Content: "004400550066007700"
- Name: .dynamic
Type: SHT_DYNAMIC
Address: 0x1010
@@ -549,7 +547,7 @@ Sections:
- Tag: DT_SONAME
Value: 0x3
- Tag: DT_RPATH
- Value: 0x9
+ Value: 0x5
- Tag: DT_SYMBOLIC
Value: [[SYMBOLIC=0x1234567890abcdef]]
- Tag: DT_REL
@@ -670,7 +668,7 @@ ProgramHeaders:
# LLVM32:DynamicSection [ (62 entries)
# LLVM32-NEXT: Tag Type Name/Value
-# LLVM32-NEXT: 0x00000001 NEEDED Shared library: D
+# LLVM32-NEXT: 0x00000001 NEEDED Shared library: [D]
# LLVM32-NEXT: 0x00000002 PLTRELSZ 16 (bytes)
# LLVM32-NEXT: 0x00000003 PLTGOT 0x1000
# LLVM32-NEXT: 0x00000004 HASH 0x1000
@@ -683,8 +681,8 @@ ProgramHeaders:
# LLVM32-NEXT: 0x0000000B SYMENT 2439 (bytes)
# LLVM32-NEXT: 0x0000000C INIT 0x1000
# LLVM32-NEXT: 0x0000000D FINI 0x1000
-# LLVM32-NEXT: 0x0000000E SONAME Library soname: U
-# LLVM32-NEXT: 0x0000000F RPATH Library rpath: [x:w:U]
+# LLVM32-NEXT: 0x0000000E SONAME Library soname: [U]
+# LLVM32-NEXT: 0x0000000F RPATH Library rpath: [f]
# LLVM32-NEXT: 0x00000010 SYMBOLIC 0x12345678
# LLVM32-NEXT: 0x00000011 REL 0x1000
# LLVM32-NEXT: 0x00000012 RELSZ 16 (bytes)
@@ -725,9 +723,9 @@ ProgramHeaders:
# LLVM32-NEXT: 0x6FFFFFFD VERDEFNUM 0
# LLVM32-NEXT: 0x6FFFFFFE VERNEED 0x1000
# LLVM32-NEXT: 0x6FFFFFFF VERNEEDNUM 0
-# LLVM32-NEXT: 0x7FFFFFFD AUXILIARY Auxiliary library: D
-# LLVM32-NEXT: 0x7FFFFFFE USED Not needed object: U
-# LLVM32-NEXT: 0x7FFFFFFF FILTER Filter library: U
+# LLVM32-NEXT: 0x7FFFFFFD AUXILIARY Auxiliary library: [D]
+# LLVM32-NEXT: 0x7FFFFFFE USED Not needed object: [U]
+# LLVM32-NEXT: 0x7FFFFFFF FILTER Filter library: [U]
# LLVM32-NEXT: 0x12345678 <unknown:>0x12345678 0x87654321
# LLVM32-NEXT: 0x6ABCDEF0 <unknown:>0x6abcdef0 0x99887766
# LLVM32-NEXT: 0x76543210 <unknown:>0x76543210 0x55556666
@@ -736,7 +734,7 @@ ProgramHeaders:
# GNU32:Dynamic section at offset 0x84 contains 62 entries:
# GNU32-NEXT: Tag Type Name/Value
-# GNU32-NEXT: 0x00000001 (NEEDED) Shared library: D
+# GNU32-NEXT: 0x00000001 (NEEDED) Shared library: [D]
# GNU32-NEXT: 0x00000002 (PLTRELSZ) 16 (bytes)
# GNU32-NEXT: 0x00000003 (PLTGOT) 0x1000
# GNU32-NEXT: 0x00000004 (HASH) 0x1000
@@ -749,8 +747,8 @@ ProgramHeaders:
# GNU32-NEXT: 0x0000000b (SYMENT) 2439 (bytes)
# GNU32-NEXT: 0x0000000c (INIT) 0x1000
# GNU32-NEXT: 0x0000000d (FINI) 0x1000
-# GNU32-NEXT: 0x0000000e (SONAME) Library soname: U
-# GNU32-NEXT: 0x0000000f (RPATH) Library rpath: [x:w:U]
+# GNU32-NEXT: 0x0000000e (SONAME) Library soname: [U]
+# GNU32-NEXT: 0x0000000f (RPATH) Library rpath: [f]
# GNU32-NEXT: 0x00000010 (SYMBOLIC) 0x12345678
# GNU32-NEXT: 0x00000011 (REL) 0x1000
# GNU32-NEXT: 0x00000012 (RELSZ) 16 (bytes)
@@ -791,9 +789,9 @@ ProgramHeaders:
# GNU32-NEXT: 0x6ffffffd (VERDEFNUM) 0
# GNU32-NEXT: 0x6ffffffe (VERNEED) 0x1000
# GNU32-NEXT: 0x6fffffff (VERNEEDNUM) 0
-# GNU32-NEXT: 0x7ffffffd (AUXILIARY) Auxiliary library: D
-# GNU32-NEXT: 0x7ffffffe (USED) Not needed object: U
-# GNU32-NEXT: 0x7fffffff (FILTER) Filter library: U
+# GNU32-NEXT: 0x7ffffffd (AUXILIARY) Auxiliary library: [D]
+# GNU32-NEXT: 0x7ffffffe (USED) Not needed object: [U]
+# GNU32-NEXT: 0x7fffffff (FILTER) Filter library: [U]
# GNU32-NEXT: 0x12345678 (<unknown:>0x12345678) 0x87654321
# GNU32-NEXT: 0x6abcdef0 (<unknown:>0x6abcdef0) 0x99887766
# GNU32-NEXT: 0x76543210 (<unknown:>0x76543210) 0x55556666
@@ -868,7 +866,7 @@ Sections:
# PHENTSIZE-LLVM: DynamicSection [ (62 entries)
# PHENTSIZE-LLVM-NEXT: Tag Type Name/Value
-# PHENTSIZE-LLVM-NEXT: 0x0000000000000001 NEEDED Shared library: <?>
+# PHENTSIZE-LLVM-NEXT: 0x0000000000000001 NEEDED Shared library: [<?>]
# PHENTSIZE-LLVM-NEXT: 0x0000000000000002 PLTRELSZ 16 (bytes)
# PHENTSIZE-LLVM-NEXT: 0x0000000000000003 PLTGOT 0x1000
# PHENTSIZE-LLVM-NEXT: 0x0000000000000004 HASH 0x1000
@@ -881,7 +879,7 @@ Sections:
# PHENTSIZE-LLVM-NEXT: 0x000000000000000B SYMENT 2439 (bytes)
# PHENTSIZE-LLVM-NEXT: 0x000000000000000C INIT 0x1000
# PHENTSIZE-LLVM-NEXT: 0x000000000000000D FINI 0x1000
-# PHENTSIZE-LLVM-NEXT: 0x000000000000000E SONAME Library soname: <?>
+# PHENTSIZE-LLVM-NEXT: 0x000000000000000E SONAME Library soname: [<?>]
# PHENTSIZE-LLVM-NEXT: 0x000000000000000F RPATH Library rpath: [<?>]
# PHENTSIZE-LLVM-NEXT: 0x0000000000000010 SYMBOLIC 0x1234567890ABCDEF
# PHENTSIZE-LLVM-NEXT: 0x0000000000000011 REL 0x1000
@@ -923,9 +921,9 @@ Sections:
# PHENTSIZE-LLVM-NEXT: 0x000000006FFFFFFD VERDEFNUM 0
# PHENTSIZE-LLVM-NEXT: 0x000000006FFFFFFE VERNEED 0x1000
# PHENTSIZE-LLVM-NEXT: 0x000000006FFFFFFF VERNEEDNUM 0
-# PHENTSIZE-LLVM-NEXT: 0x000000007FFFFFFD AUXILIARY Auxiliary library: <?>
-# PHENTSIZE-LLVM-NEXT: 0x000000007FFFFFFE USED Not needed object: <?>
-# PHENTSIZE-LLVM-NEXT: 0x000000007FFFFFFF FILTER Filter library: <?>
+# PHENTSIZE-LLVM-NEXT: 0x000000007FFFFFFD AUXILIARY Auxiliary library: [<?>]
+# PHENTSIZE-LLVM-NEXT: 0x000000007FFFFFFE USED Not needed object: [<?>]
+# PHENTSIZE-LLVM-NEXT: 0x000000007FFFFFFF FILTER Filter library: [<?>]
# PHENTSIZE-LLVM-NEXT: 0x0000000012345678 <unknown:>0x12345678 0x8765432187654321
# PHENTSIZE-LLVM-NEXT: 0x000000006ABCDEF0 <unknown:>0x6abcdef0 0x9988776655443322
# PHENTSIZE-LLVM-NEXT: 0x0000000076543210 <unknown:>0x76543210 0x5555666677778888
@@ -935,7 +933,7 @@ Sections:
# PHENTSIZE-GNU: Dynamic section at offset 0xc0 contains 62 entries:
# PHENTSIZE-GNU-NEXT: Tag Type Name/Value
# PHENTSIZE-GNU-NEXT: warning: '[[FILE]]': string table was not found
-# PHENTSIZE-GNU-NEXT: 0x0000000000000001 (NEEDED) Shared library: <?>
+# PHENTSIZE-GNU-NEXT: 0x0000000000000001 (NEEDED) Shared library: [<?>]
# PHENTSIZE-GNU-NEXT: 0x0000000000000002 (PLTRELSZ) 16 (bytes)
# PHENTSIZE-GNU-NEXT: 0x0000000000000003 (PLTGOT) 0x1000
# PHENTSIZE-GNU-NEXT: 0x0000000000000004 (HASH) 0x1000
@@ -948,7 +946,7 @@ Sections:
# PHENTSIZE-GNU-NEXT: 0x000000000000000b (SYMENT) 2439 (bytes)
# PHENTSIZE-GNU-NEXT: 0x000000000000000c (INIT) 0x1000
# PHENTSIZE-GNU-NEXT: 0x000000000000000d (FINI) 0x1000
-# PHENTSIZE-GNU-NEXT: 0x000000000000000e (SONAME) Library soname: <?>
+# PHENTSIZE-GNU-NEXT: 0x000000000000000e (SONAME) Library soname: [<?>]
# PHENTSIZE-GNU-NEXT: 0x000000000000000f (RPATH) Library rpath: [<?>]
# PHENTSIZE-GNU-NEXT: 0x0000000000000010 (SYMBOLIC) 0x1234567890abcdef
# PHENTSIZE-GNU-NEXT: 0x0000000000000011 (REL) 0x1000
@@ -990,9 +988,9 @@ Sections:
# PHENTSIZE-GNU-NEXT: 0x000000006ffffffd (VERDEFNUM) 0
# PHENTSIZE-GNU-NEXT: 0x000000006ffffffe (VERNEED) 0x1000
# PHENTSIZE-GNU-NEXT: 0x000000006fffffff (VERNEEDNUM) 0
-# PHENTSIZE-GNU-NEXT: 0x000000007ffffffd (AUXILIARY) Auxiliary library: <?>
-# PHENTSIZE-GNU-NEXT: 0x000000007ffffffe (USED) Not needed object: <?>
-# PHENTSIZE-GNU-NEXT: 0x000000007fffffff (FILTER) Filter library: <?>
+# PHENTSIZE-GNU-NEXT: 0x000000007ffffffd (AUXILIARY) Auxiliary library: [<?>]
+# PHENTSIZE-GNU-NEXT: 0x000000007ffffffe (USED) Not needed object: [<?>]
+# PHENTSIZE-GNU-NEXT: 0x000000007fffffff (FILTER) Filter library: [<?>]
# PHENTSIZE-GNU-NEXT: 0x0000000012345678 (<unknown:>0x12345678) 0x8765432187654321
# PHENTSIZE-GNU-NEXT: 0x000000006abcdef0 (<unknown:>0x6abcdef0) 0x9988776655443322
# PHENTSIZE-GNU-NEXT: 0x0000000076543210 (<unknown:>0x76543210) 0x5555666677778888
diff --git a/llvm/test/tools/llvm-readobj/ELF/loadname.test b/llvm/test/tools/llvm-readobj/ELF/loadname.test
index 359b9ca762334..18db00845e00b 100644
--- a/llvm/test/tools/llvm-readobj/ELF/loadname.test
+++ b/llvm/test/tools/llvm-readobj/ELF/loadname.test
@@ -15,7 +15,7 @@
# GNU-NEXT: Tag Type Name/Value
# GNU-NEXT: 0x0000000000000005 (STRTAB) 0x0
# GNU-NEXT: 0x000000000000000a (STRSZ) 8 (bytes)
-# GNU-NEXT: 0x000000000000000e (SONAME) Library soname: test.so
+# GNU-NEXT: 0x000000000000000e (SONAME) Library soname: [test.so]
# GNU-NEXT: 0x0000000000000000 (NULL) 0x0
!ELF
@@ -62,4 +62,4 @@ ProgramHeaders:
# BROKEN-OFFSET: warning: '[[FILE]]': unable to parse DT_STRTAB: can't map virtual address 0xfffe to the segment with index 1: the segment ends at 0x10077, which is greater than the file size (0x228)
# BROKEN-OFFSET: warning: '[[FILE]]': string table was not found
# BROKEN-OFFSET-LLVM: LoadName: <?>
-# BROKEN-OFFSET-GNU: 0x000000000000000e (SONAME) Library soname: <?>
+# BROKEN-OFFSET-GNU: 0x000000000000000e (SONAME) Library soname: [<?>]
diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp
index 1b6600637800e..7596d90b4fcd2 100644
--- a/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -2323,12 +2323,6 @@ std::string ELFDumper<ELFT>::getDynamicEntry(uint64_t Type,
return OS.str();
};
- const std::map<uint64_t, const char *> TagNames = {
- {DT_NEEDED, "Shared library"}, {DT_SONAME, "Library soname"},
- {DT_AUXILIARY, "Auxiliary library"}, {DT_USED, "Not needed object"},
- {DT_FILTER, "Filter library"}, {DT_RPATH, "Library rpath"},
- {DT_RUNPATH, "Library runpath"},
- };
// Handle custom printing of architecture specific tags
switch (Obj.getHeader().e_machine) {
case EM_AARCH64:
@@ -2489,11 +2483,18 @@ std::string ELFDumper<ELFT>::getDynamicEntry(uint64_t Type,
case DT_AUXILIARY:
case DT_USED:
case DT_FILTER:
- return (Twine(TagNames.at(Type)) + ": " + getDynamicString(Value)).str();
case DT_RPATH:
- case DT_RUNPATH:
+ case DT_RUNPATH: {
+ const std::map<uint64_t, const char *> TagNames = {
+ {DT_NEEDED, "Shared library"}, {DT_SONAME, "Library soname"},
+ {DT_AUXILIARY, "Auxiliary library"}, {DT_USED, "Not needed object"},
+ {DT_FILTER, "Filter library"}, {DT_RPATH, "Library rpath"},
+ {DT_RUNPATH, "Library runpath"},
+ };
+
return (Twine(TagNames.at(Type)) + ": [" + getDynamicString(Value) + "]")
.str();
+ }
case DT_FLAGS:
return FormatFlags(Value, ArrayRef(ElfDynamicDTFlags));
case DT_FLAGS_1:
>From e8c94149d3ca12d4d02fb8de89981c68ffa278f3 Mon Sep 17 00:00:00 2001
From: Alexandre Ganea <aganea at havenstudios.com>
Date: Wed, 3 Jul 2024 08:26:23 -0400
Subject: [PATCH 116/246] [llvm-config] Quote and escape paths if necessary
(#97305)
If any of the printed paths by `llvm-config` contains quotes, spaces,
backslashes or dollar sign characters, these paths will be quoted and
the corresponding characters will be escaped.
Following discussion in https://github.com/llvm/llvm-project/pull/76304
Fixes https://github.com/llvm/llvm-project/issues/28117
---
llvm/docs/ReleaseNotes.rst | 4 ++
llvm/test/tools/llvm-config/paths.test | 8 +--
llvm/tools/llvm-config/llvm-config.cpp | 70 ++++++++++++++++++--------
3 files changed, 56 insertions(+), 26 deletions(-)
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index 4fe7ebd0fd84d..da416ffa678c3 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -369,6 +369,10 @@ Changes to the LLVM tools
jumping in reverse direction with shift+L/R/B). (`#95662
<https://github.com/llvm/llvm-project/pull/95662>`).
+* llvm-config now quotes and escapes paths emitted in stdout, to account for
+ spaces or other special characters in path.
+ (`#97305 <https://github.com/llvm/llvm-project/pull/97305>`).
+
Changes to LLDB
---------------------------------
diff --git a/llvm/test/tools/llvm-config/paths.test b/llvm/test/tools/llvm-config/paths.test
index 419f155ae1f83..6fa43a1561521 100644
--- a/llvm/test/tools/llvm-config/paths.test
+++ b/llvm/test/tools/llvm-config/paths.test
@@ -1,21 +1,21 @@
# Check directory options for obvious issues.
RUN: llvm-config --bindir 2>&1 | FileCheck --check-prefix=CHECK-BINDIR %s
-CHECK-BINDIR: {{.*}}{{/|\\}}bin
+CHECK-BINDIR: {{.*}}{{/|\\\\}}bin
CHECK-BINDIR-NOT: error:
CHECK-BINDIR-NOT: warning
RUN: llvm-config --includedir 2>&1 | FileCheck --check-prefix=CHECK-INCLUDEDIR %s
-CHECK-INCLUDEDIR: {{.*}}{{/|\\}}include
+CHECK-INCLUDEDIR: {{.*}}{{/|\\\\}}include
CHECK-INCLUDEDIR-NOT: error:
CHECK-INCLUDEDIR-NOT: warning
RUN: llvm-config --libdir 2>&1 | FileCheck --check-prefix=CHECK-LIBDIR %s
-CHECK-LIBDIR: {{.*}}{{/|\\}}lib{{.*}}
+CHECK-LIBDIR: {{.*}}{{/|\\\\}}lib{{.*}}
CHECK-LIBDIR-NOT: error:
CHECK-LIBDIR-NOT: warning
RUN: llvm-config --cmakedir 2>&1 | FileCheck --check-prefix=CHECK-CMAKEDIR %s
-CHECK-CMAKEDIR: {{.*}}{{/|\\}}cmake{{/|\\}}llvm
+CHECK-CMAKEDIR: {{.*}}{{/|\\\\}}cmake{{/|\\\\}}llvm
CHECK-CMAKEDIR-NOT: error:
CHECK-CMAKEDIR-NOT: warning
diff --git a/llvm/tools/llvm-config/llvm-config.cpp b/llvm/tools/llvm-config/llvm-config.cpp
index d5b76b1bb6c16..db92d462807d0 100644
--- a/llvm/tools/llvm-config/llvm-config.cpp
+++ b/llvm/tools/llvm-config/llvm-config.cpp
@@ -24,6 +24,7 @@
#include "llvm/Config/config.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
#include "llvm/Support/WithColor.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Triple.h"
@@ -326,7 +327,7 @@ int main(int argc, char **argv) {
// information.
std::string ActivePrefix, ActiveBinDir, ActiveIncludeDir, ActiveLibDir,
ActiveCMakeDir;
- std::string ActiveIncludeOption;
+ std::vector<std::string> ActiveIncludeOptions;
if (IsInDevelopmentTree) {
ActiveIncludeDir = std::string(LLVM_SRC_ROOT) + "/include";
ActivePrefix = CurrentExecPrefix;
@@ -352,8 +353,8 @@ int main(int argc, char **argv) {
}
// We need to include files from both the source and object trees.
- ActiveIncludeOption =
- ("-I" + ActiveIncludeDir + " " + "-I" + ActiveObjRoot + "/include");
+ ActiveIncludeOptions.push_back(ActiveIncludeDir);
+ ActiveIncludeOptions.push_back(ActiveObjRoot + "/include");
} else {
ActivePrefix = CurrentExecPrefix;
{
@@ -372,7 +373,7 @@ int main(int argc, char **argv) {
sys::fs::make_absolute(ActivePrefix, Path);
ActiveCMakeDir = std::string(Path);
}
- ActiveIncludeOption = "-I" + ActiveIncludeDir;
+ ActiveIncludeOptions.push_back(ActiveIncludeDir);
}
/// We only use `shared library` mode in cases where the static library form
@@ -401,8 +402,8 @@ int main(int argc, char **argv) {
std::replace(ActiveBinDir.begin(), ActiveBinDir.end(), '/', '\\');
std::replace(ActiveLibDir.begin(), ActiveLibDir.end(), '/', '\\');
std::replace(ActiveCMakeDir.begin(), ActiveCMakeDir.end(), '/', '\\');
- std::replace(ActiveIncludeOption.begin(), ActiveIncludeOption.end(), '/',
- '\\');
+ for (auto &Include : ActiveIncludeOptions)
+ std::replace(Include.begin(), Include.end(), '/', '\\');
}
SharedDir = ActiveBinDir;
StaticDir = ActiveLibDir;
@@ -504,6 +505,20 @@ int main(int argc, char **argv) {
};
raw_ostream &OS = outs();
+
+ // Render include paths and associated flags
+ auto RenderFlags = [&](StringRef Flags) {
+ bool First = true;
+ for (auto &Include : ActiveIncludeOptions) {
+ if (!First)
+ OS << ' ';
+ OS << "-I";
+ sys::printArg(OS, Include, /*Quote=*/true);
+ First = false;
+ }
+ OS << ' ' << Flags << '\n';
+ };
+
for (int i = 1; i != argc; ++i) {
StringRef Arg = argv[i];
@@ -512,24 +527,30 @@ int main(int argc, char **argv) {
if (Arg == "--version") {
OS << PACKAGE_VERSION << '\n';
} else if (Arg == "--prefix") {
- OS << ActivePrefix << '\n';
+ sys::printArg(OS, ActivePrefix, /*Quote=*/true);
+ OS << '\n';
} else if (Arg == "--bindir") {
- OS << ActiveBinDir << '\n';
+ sys::printArg(OS, ActiveBinDir, /*Quote=*/true);
+ OS << '\n';
} else if (Arg == "--includedir") {
- OS << ActiveIncludeDir << '\n';
+ sys::printArg(OS, ActiveIncludeDir, /*Quote=*/true);
+ OS << '\n';
} else if (Arg == "--libdir") {
- OS << ActiveLibDir << '\n';
+ sys::printArg(OS, ActiveLibDir, /*Quote=*/true);
+ OS << '\n';
} else if (Arg == "--cmakedir") {
- OS << ActiveCMakeDir << '\n';
+ sys::printArg(OS, ActiveCMakeDir, /*Quote=*/true);
+ OS << '\n';
} else if (Arg == "--cppflags") {
- OS << ActiveIncludeOption << ' ' << LLVM_CPPFLAGS << '\n';
+ RenderFlags(LLVM_CPPFLAGS);
} else if (Arg == "--cflags") {
- OS << ActiveIncludeOption << ' ' << LLVM_CFLAGS << '\n';
+ RenderFlags(LLVM_CFLAGS);
} else if (Arg == "--cxxflags") {
- OS << ActiveIncludeOption << ' ' << LLVM_CXXFLAGS << '\n';
+ RenderFlags(LLVM_CXXFLAGS);
} else if (Arg == "--ldflags") {
- OS << ((HostTriple.isWindowsMSVCEnvironment()) ? "-LIBPATH:" : "-L")
- << ActiveLibDir << ' ' << LLVM_LDFLAGS << '\n';
+ OS << ((HostTriple.isWindowsMSVCEnvironment()) ? "-LIBPATH:" : "-L");
+ sys::printArg(OS, ActiveLibDir, /*Quote=*/true);
+ OS << ' ' << LLVM_LDFLAGS << '\n';
} else if (Arg == "--system-libs") {
PrintSystemLibs = true;
} else if (Arg == "--libs") {
@@ -590,7 +611,8 @@ int main(int argc, char **argv) {
} else if (Arg == "--shared-mode") {
PrintSharedMode = true;
} else if (Arg == "--obj-root") {
- OS << ActivePrefix << '\n';
+ sys::printArg(OS, ActivePrefix, /*Quote=*/true);
+ OS << '\n';
} else if (Arg == "--ignore-libllvm") {
LinkDyLib = false;
LinkMode = BuiltSharedLibs ? LinkModeShared : LinkModeAuto;
@@ -695,26 +717,30 @@ int main(int argc, char **argv) {
auto PrintForLib = [&](const StringRef &Lib) {
const bool Shared = LinkMode == LinkModeShared;
+ std::string LibFileName;
if (PrintLibNames) {
- OS << GetComponentLibraryFileName(Lib, Shared);
+ LibFileName = GetComponentLibraryFileName(Lib, Shared);
} else if (PrintLibFiles) {
- OS << GetComponentLibraryPath(Lib, Shared);
+ LibFileName = GetComponentLibraryPath(Lib, Shared);
} else if (PrintLibs) {
// On Windows, output full path to library without parameters.
// Elsewhere, if this is a typical library name, include it using -l.
if (HostTriple.isWindowsMSVCEnvironment()) {
- OS << GetComponentLibraryPath(Lib, Shared);
+ LibFileName = GetComponentLibraryPath(Lib, Shared);
} else {
+ OS << "-l";
StringRef LibName;
if (GetComponentLibraryNameSlice(Lib, LibName)) {
// Extract library name (remove prefix and suffix).
- OS << "-l" << LibName;
+ LibFileName = LibName;
} else {
// Lib is already a library name without prefix and suffix.
- OS << "-l" << Lib;
+ LibFileName = Lib;
}
}
}
+ if (!LibFileName.empty())
+ sys::printArg(OS, LibFileName, /*Quote=*/true);
};
if (LinkMode == LinkModeShared && LinkDyLib) {
>From 08888d0e6b2e02f1f4f34e2bf71a5d4fe8e0e039 Mon Sep 17 00:00:00 2001
From: David Spickett <david.spickett at linaro.org>
Date: Wed, 3 Jul 2024 13:26:11 +0100
Subject: [PATCH 117/246] [llvm][Docs] Add release note for lldb's support for
register enums
---
llvm/docs/ReleaseNotes.rst | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index da416ffa678c3..c8946682ca4e8 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -379,6 +379,17 @@ Changes to LLDB
* Register field information is now provided on AArch64 FreeBSD for live
processes and core files (previously only provided on AArch64 Linux).
+* Register field information can now include enums to represent field
+ values. Enums have been added for ``fpcr.RMode`` and ``mte_ctrl.TCF``
+ for AArch64 targets::
+
+ (lldb) register read fpcr
+ fpcr = 0x00000000
+ = (AHP = 0, DN = 0, FZ = 0, RMode = RN, <...>)
+
+ If you need to know the values of the enum, these can be found in
+ the output of ``register info`` for the same register.
+
Changes to BOLT
---------------------------------
* Now supports ``--match-profile-with-function-hash`` to match profiled and
>From d5c9ffd545ebf171346ac69b15fafeee469f0b3c Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Wed, 3 Jul 2024 20:32:46 +0800
Subject: [PATCH 118/246] [SDAG] Intersect poison-generating flags after CSE
(#97434)
This patch fixes a miscompilation when `N` gets CSEed to `Existing`:
```
Existing: t5: i32 = sub nuw Constant:i32<0>, t3
N: t30: i32 = sub Constant:i32<0>, t3
```
Fixes https://github.com/llvm/llvm-project/issues/96366.
---
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 1 +
llvm/test/CodeGen/AArch64/pr96366.ll | 25 ++++++++++++++++
.../CodeGen/AMDGPU/dagcombine-fma-crash.ll | 2 +-
llvm/test/CodeGen/RISCV/pr96366.ll | 29 +++++++++++++++++++
4 files changed, 56 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AArch64/pr96366.ll
create mode 100644 llvm/test/CodeGen/RISCV/pr96366.ll
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index bc16f885f6a04..96242305e9eab 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -1239,6 +1239,7 @@ SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
// If there was already an existing matching node, use ReplaceAllUsesWith
// to replace the dead one with the existing one. This can cause
// recursive merging of other unrelated nodes down the line.
+ Existing->intersectFlagsWith(N->getFlags());
ReplaceAllUsesWith(N, Existing);
// N is now dead. Inform the listeners and delete it.
diff --git a/llvm/test/CodeGen/AArch64/pr96366.ll b/llvm/test/CodeGen/AArch64/pr96366.ll
new file mode 100644
index 0000000000000..0a5d87c7f9bbf
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pr96366.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+
+declare void @use(i32)
+
+define i32 @f(i32 %x) nounwind {
+; CHECK-LABEL: f:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: mov w19, w0
+; CHECK-NEXT: neg w0, w0
+; CHECK-NEXT: bl use
+; CHECK-NEXT: mov w8, #4 // =0x4
+; CHECK-NEXT: sub w0, w8, w19
+; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ %sub1 = sub nuw i32 0, %x
+ call void @use(i32 %sub1)
+ %sub2 = sub i32 1, %x
+ %sub3 = sub i32 3, %x
+ %mul = mul i32 %x, 1
+ %add1 = add i32 %sub2, %mul
+ %add2 = add i32 %add1, %sub3
+ ret i32 %add2
+}
diff --git a/llvm/test/CodeGen/AMDGPU/dagcombine-fma-crash.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-fma-crash.ll
index 8b1a6878136a9..ddb635cabbab1 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcombine-fma-crash.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcombine-fma-crash.ll
@@ -90,7 +90,7 @@ define float @test2(float %arg, float %arg1) {
; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 1120534528
; CHECK-NEXT: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = nsz contract reassoc nofpexcept V_FMAC_F32_e64 0, [[COPY]], 0, killed [[S_MOV_B32_]], 0, [[V_MOV_B32_e32_]], 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 0
- ; CHECK-NEXT: [[V_FMAC_F32_e64_1:%[0-9]+]]:vgpr_32 = nsz contract reassoc nofpexcept V_FMAC_F32_e64 0, [[COPY1]], 0, killed [[S_MOV_B32_1]], 0, [[V_FMAC_F32_e64_]], 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[V_FMAC_F32_e64_1:%[0-9]+]]:vgpr_32 = nsz contract nofpexcept V_FMAC_F32_e64 0, [[COPY1]], 0, killed [[S_MOV_B32_1]], 0, [[V_FMAC_F32_e64_]], 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nsz contract reassoc nofpexcept V_ADD_F32_e64 0, [[V_FMAC_F32_e64_1]], 0, [[V_MOV_B32_e32_]], 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[V_RCP_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_RCP_F32_e64 0, [[V_FMAC_F32_e64_1]], 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[V_RCP_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_RCP_F32_e64 0, killed [[V_ADD_F32_e64_]], 0, 0, implicit $mode, implicit $exec
diff --git a/llvm/test/CodeGen/RISCV/pr96366.ll b/llvm/test/CodeGen/RISCV/pr96366.ll
new file mode 100644
index 0000000000000..8c6fd5bfb6cb0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/pr96366.ll
@@ -0,0 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=riscv64 | FileCheck %s
+
+declare void @use(i32)
+
+define i32 @f(i32 %x) nounwind {
+; CHECK-LABEL: f:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT: mv s0, a0
+; CHECK-NEXT: negw a0, a0
+; CHECK-NEXT: call use
+; CHECK-NEXT: li a0, 4
+; CHECK-NEXT: subw a0, a0, s0
+; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %sub1 = sub nuw i32 0, %x
+ call void @use(i32 %sub1)
+ %sub2 = sub i32 1, %x
+ %sub3 = sub i32 3, %x
+ %mul = mul i32 %x, 1
+ %add1 = add i32 %sub2, %mul
+ %add2 = add i32 %add1, %sub3
+ ret i32 %add2
+}
>From 3e7ddcc3dcbe9b1e82473c0591af8b5fa24cbe7f Mon Sep 17 00:00:00 2001
From: David Spickett <david.spickett at linaro.org>
Date: Wed, 3 Jul 2024 13:37:02 +0100
Subject: [PATCH 119/246] [llvm][Docs] Fix some incorrect RST syntax in the
tools release notes
---
llvm/docs/ReleaseNotes.rst | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index c8946682ca4e8..07c531934eed6 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -354,24 +354,24 @@ Changes to the LLVM tools
would continue. Additionally, it can now continue when it encounters
instructions which lack scheduling information. The behaviour can be
controlled by the newly introduced
- `--skip-unsupported-instructions=<none|lack-sched|parse-failure|any>`, as
- documented in `--help` output and the command guide. (`#90474
- <https://github.com/llvm/llvm-project/pull/90474>`)
+ ``--skip-unsupported-instructions=<none|lack-sched|parse-failure|any>``, as
+ documented in ``--help`` output and the command guide. (`#90474
+ <https://github.com/llvm/llvm-project/pull/90474>`_)
* llvm-readobj's LLVM output format for ELF core files has been changed.
Similarly, the JSON format has been fixed for this case. The NT_FILE note
now has a map for the mapped files. (`#92835
- <https://github.com/llvm/llvm-project/pull/92835>`).
+ <https://github.com/llvm/llvm-project/pull/92835>`_).
* llvm-cov now generates HTML report with JavaScript code to allow simple
jumping between uncovered parts (lines/regions/branches) of code
using buttons on top-right corner of the page or using keys (L/R/B or
jumping in reverse direction with shift+L/R/B). (`#95662
- <https://github.com/llvm/llvm-project/pull/95662>`).
+ <https://github.com/llvm/llvm-project/pull/95662>`_).
* llvm-config now quotes and escapes paths emitted in stdout, to account for
spaces or other special characters in path.
- (`#97305 <https://github.com/llvm/llvm-project/pull/97305>`).
+ (`#97305 <https://github.com/llvm/llvm-project/pull/97305>`_).
Changes to LLDB
---------------------------------
>From c2072d993a443f08ab1bac8a3d5575e1a48663c7 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 3 Jul 2024 14:40:09 +0200
Subject: [PATCH 120/246] [CVP] Support vectors for and elision
---
.../Scalar/CorrelatedValuePropagation.cpp | 10 +++++-----
.../CorrelatedValuePropagation/vectors.ll | 15 ++++++++++++---
2 files changed, 17 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 34304c2245e30..2bfae0ec28e17 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -33,6 +33,7 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
@@ -1189,21 +1190,20 @@ static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI) {
}
static bool processAnd(BinaryOperator *BinOp, LazyValueInfo *LVI) {
- if (BinOp->getType()->isVectorTy())
- return false;
+ using namespace llvm::PatternMatch;
// Pattern match (and lhs, C) where C includes a superset of bits which might
// be set in lhs. This is a common truncation idiom created by instcombine.
const Use &LHS = BinOp->getOperandUse(0);
- ConstantInt *RHS = dyn_cast<ConstantInt>(BinOp->getOperand(1));
- if (!RHS || !RHS->getValue().isMask())
+ const APInt *RHS;
+ if (!match(BinOp->getOperand(1), m_LowBitMask(RHS)))
return false;
// We can only replace the AND with LHS based on range info if the range does
// not include undef.
ConstantRange LRange =
LVI->getConstantRangeAtUse(LHS, /*UndefAllowed=*/false);
- if (!LRange.getUnsignedMax().ule(RHS->getValue()))
+ if (!LRange.getUnsignedMax().ule(*RHS))
return false;
BinOp->replaceAllUsesWith(LHS);
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll b/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll
index a06fa2c106609..0024b0a5c75c9 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll
@@ -199,15 +199,24 @@ define <2 x float> @sitofp(<2 x i8> %a) {
ret <2 x float> %res
}
-; TODO: Add support for this.
define <2 x i16> @and(<2 x i8> %a) {
; CHECK-LABEL: define <2 x i16> @and(
; CHECK-SAME: <2 x i8> [[A:%.*]]) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
-; CHECK-NEXT: [[RES:%.*]] = and <2 x i16> [[ZEXT]], <i16 255, i16 255>
-; CHECK-NEXT: ret <2 x i16> [[RES]]
+; CHECK-NEXT: ret <2 x i16> [[ZEXT]]
;
%zext = zext <2 x i8> %a to <2 x i16>
%res = and <2 x i16> %zext, splat (i16 u0xff)
ret <2 x i16> %res
}
+
+define <2 x i16> @and_with_poison(<2 x i8> %a) {
+; CHECK-LABEL: define <2 x i16> @and_with_poison(
+; CHECK-SAME: <2 x i8> [[A:%.*]]) {
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[A]] to <2 x i16>
+; CHECK-NEXT: ret <2 x i16> [[ZEXT]]
+;
+ %zext = zext <2 x i8> %a to <2 x i16>
+ %res = and <2 x i16> %zext, <i16 u0xff, i16 poison>
+ ret <2 x i16> %res
+}
>From c20695a44817d52abda3dee495ef2a172ca315fa Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 3 Jul 2024 13:14:21 +0100
Subject: [PATCH 121/246] [InstCombine][X86]
simplifyDemandedVectorEltsIntrinsic - add handling for PMULH/PMULHU/PMULHRS
intrinsics
---
.../Target/X86/X86InstCombineIntrinsic.cpp | 16 +++++++++++++
.../Transforms/InstCombine/X86/x86-pmulh.ll | 24 +++++++------------
.../Transforms/InstCombine/X86/x86-pmulhrs.ll | 24 +++++++------------
.../Transforms/InstCombine/X86/x86-pmulhu.ll | 24 +++++++------------
4 files changed, 43 insertions(+), 45 deletions(-)
diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
index 305a998a0e05b..fcf7ad73074c4 100644
--- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
@@ -3189,6 +3189,21 @@ std::optional<Value *> X86TTIImpl::simplifyDemandedVectorEltsIntrinsic(
break;
}
+ case Intrinsic::x86_sse2_pmulh_w:
+ case Intrinsic::x86_avx2_pmulh_w:
+ case Intrinsic::x86_avx512_pmulh_w_512:
+ case Intrinsic::x86_sse2_pmulhu_w:
+ case Intrinsic::x86_avx2_pmulhu_w:
+ case Intrinsic::x86_avx512_pmulhu_w_512:
+ case Intrinsic::x86_ssse3_pmul_hr_sw_128:
+ case Intrinsic::x86_avx2_pmul_hr_sw:
+ case Intrinsic::x86_avx512_pmul_hr_sw_512: {
+ simplifyAndSetOp(&II, 0, DemandedElts, UndefElts);
+ simplifyAndSetOp(&II, 1, DemandedElts, UndefElts2);
+ // NOTE: mulh(undef,undef) != undef.
+ break;
+ }
+
case Intrinsic::x86_sse2_packssdw_128:
case Intrinsic::x86_sse2_packsswb_128:
case Intrinsic::x86_sse2_packuswb_128:
@@ -3255,6 +3270,7 @@ std::optional<Value *> X86TTIImpl::simplifyDemandedVectorEltsIntrinsic(
APInt Op1UndefElts(InnerVWidth, 0);
simplifyAndSetOp(&II, 0, OpDemandedElts, Op0UndefElts);
simplifyAndSetOp(&II, 1, OpDemandedElts, Op1UndefElts);
+ // NOTE: madd(undef,undef) != undef.
break;
}
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
index 56eecd869817e..130dd7b33d8a4 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
@@ -154,11 +154,9 @@ define <32 x i16> @fold_pmulh_512() {
define <8 x i16> @elts_pmulh_128(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: @elts_pmulh_128(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[A1:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
-; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i16> [[TMP3]], <8 x i16> poison, <8 x i32> zeroinitializer
-; CHECK-NEXT: ret <8 x i16> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: ret <8 x i16> [[TMP2]]
;
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
%2 = shufflevector <8 x i16> %a1, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
@@ -169,11 +167,9 @@ define <8 x i16> @elts_pmulh_128(<8 x i16> %a0, <8 x i16> %a1) {
define <16 x i16> @elts_pmulh_256(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: @elts_pmulh_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i16> [[A1:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[TMP1]], <16 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i16> [[TMP3]], <16 x i16> poison, <16 x i32> zeroinitializer
-; CHECK-NEXT: ret <16 x i16> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: ret <16 x i16> [[TMP2]]
;
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = shufflevector <16 x i16> %a1, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -184,11 +180,9 @@ define <16 x i16> @elts_pmulh_256(<16 x i16> %a0, <16 x i16> %a1) {
define <32 x i16> @elts_pmulh_512(<32 x i16> %a0, <32 x i16> %a1) {
; CHECK-LABEL: @elts_pmulh_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <32 x i16> [[A1:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[TMP1]], <32 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i16> [[TMP3]], <32 x i16> poison, <32 x i32> zeroinitializer
-; CHECK-NEXT: ret <32 x i16> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[A0:%.*]], <32 x i16> [[A1:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> poison, <32 x i32> zeroinitializer
+; CHECK-NEXT: ret <32 x i16> [[TMP2]]
;
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%2 = shufflevector <32 x i16> %a1, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
index a343fa266e794..d568a1c703526 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
@@ -154,11 +154,9 @@ define <32 x i16> @fold_pmulh_512() {
define <8 x i16> @elts_pmulh_128(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: @elts_pmulh_128(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[A1:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
-; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i16> [[TMP3]], <8 x i16> poison, <8 x i32> zeroinitializer
-; CHECK-NEXT: ret <8 x i16> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: ret <8 x i16> [[TMP2]]
;
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
%2 = shufflevector <8 x i16> %a1, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
@@ -169,11 +167,9 @@ define <8 x i16> @elts_pmulh_128(<8 x i16> %a0, <8 x i16> %a1) {
define <16 x i16> @elts_pmulh_256(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: @elts_pmulh_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i16> [[A1:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[TMP1]], <16 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i16> [[TMP3]], <16 x i16> poison, <16 x i32> zeroinitializer
-; CHECK-NEXT: ret <16 x i16> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: ret <16 x i16> [[TMP2]]
;
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = shufflevector <16 x i16> %a1, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -184,11 +180,9 @@ define <16 x i16> @elts_pmulh_256(<16 x i16> %a0, <16 x i16> %a1) {
define <32 x i16> @elts_pmulh_512(<32 x i16> %a0, <32 x i16> %a1) {
; CHECK-LABEL: @elts_pmulh_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <32 x i16> [[A1:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[TMP1]], <32 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i16> [[TMP3]], <32 x i16> poison, <32 x i32> zeroinitializer
-; CHECK-NEXT: ret <32 x i16> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[A0:%.*]], <32 x i16> [[A1:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> poison, <32 x i32> zeroinitializer
+; CHECK-NEXT: ret <32 x i16> [[TMP2]]
;
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%2 = shufflevector <32 x i16> %a1, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
index 0ac4fb81a7754..5eed7328136de 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
@@ -154,11 +154,9 @@ define <32 x i16> @fold_pmulhu_512() {
define <8 x i16> @elts_pmulhu_128(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: @elts_pmulhu_128(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[A1:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
-; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i16> [[TMP3]], <8 x i16> poison, <8 x i32> zeroinitializer
-; CHECK-NEXT: ret <8 x i16> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: ret <8 x i16> [[TMP2]]
;
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
%2 = shufflevector <8 x i16> %a1, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
@@ -169,11 +167,9 @@ define <8 x i16> @elts_pmulhu_128(<8 x i16> %a0, <8 x i16> %a1) {
define <16 x i16> @elts_pmulhu_256(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: @elts_pmulhu_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i16> [[A1:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[TMP1]], <16 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i16> [[TMP3]], <16 x i16> poison, <16 x i32> zeroinitializer
-; CHECK-NEXT: ret <16 x i16> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: ret <16 x i16> [[TMP2]]
;
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = shufflevector <16 x i16> %a1, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -184,11 +180,9 @@ define <16 x i16> @elts_pmulhu_256(<16 x i16> %a0, <16 x i16> %a1) {
define <32 x i16> @elts_pmulhu_512(<32 x i16> %a0, <32 x i16> %a1) {
; CHECK-LABEL: @elts_pmulhu_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <32 x i16> [[A1:%.*]], <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[TMP1]], <32 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i16> [[TMP3]], <32 x i16> poison, <32 x i32> zeroinitializer
-; CHECK-NEXT: ret <32 x i16> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[A0:%.*]], <32 x i16> [[A1:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> poison, <32 x i32> zeroinitializer
+; CHECK-NEXT: ret <32 x i16> [[TMP2]]
;
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%2 = shufflevector <32 x i16> %a1, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
>From 4ee4bc3cf259419c3d87711df1f96742a0914a0c Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 3 Jul 2024 13:44:39 +0100
Subject: [PATCH 122/246] [InstCombine][X86] Add zero/undef arg handling for
MULH/PMULHU/PMULHRS intrinsics
---
.../Target/X86/X86InstCombineIntrinsic.cpp | 35 ++++++++++++++++++
.../Transforms/InstCombine/X86/x86-pmulh.ll | 36 +++++++------------
.../Transforms/InstCombine/X86/x86-pmulhrs.ll | 36 +++++++------------
.../Transforms/InstCombine/X86/x86-pmulhu.ll | 36 +++++++------------
4 files changed, 71 insertions(+), 72 deletions(-)
diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
index fcf7ad73074c4..7ac149852be97 100644
--- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
@@ -502,6 +502,27 @@ static Value *simplifyX86pack(IntrinsicInst &II,
return Builder.CreateTrunc(Shuffle, ResTy);
}
+static Value *simplifyX86pmulh(IntrinsicInst &II,
+ InstCombiner::BuilderTy &Builder) {
+ Value *Arg0 = II.getArgOperand(0);
+ Value *Arg1 = II.getArgOperand(1);
+ auto *ResTy = cast<FixedVectorType>(II.getType());
+ [[maybe_unused]] auto *ArgTy = cast<FixedVectorType>(Arg0->getType());
+ assert(ArgTy == ResTy && ResTy->getScalarSizeInBits() == 16 &&
+ "Unexpected PMULH types");
+
+ // Multiply by undef -> zero (NOT undef!) as other arg could still be zero.
+ if (isa<UndefValue>(Arg0) || isa<UndefValue>(Arg1))
+ return ConstantAggregateZero::get(ResTy);
+
+ // Multiply by zero.
+ if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1))
+ return ConstantAggregateZero::get(ResTy);
+
+ // TODO: Constant folding.
+ return nullptr;
+}
+
static Value *simplifyX86pmadd(IntrinsicInst &II,
InstCombiner::BuilderTy &Builder,
bool IsPMADDWD) {
@@ -2568,6 +2589,20 @@ X86TTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
}
break;
+ case Intrinsic::x86_sse2_pmulh_w:
+ case Intrinsic::x86_avx2_pmulh_w:
+ case Intrinsic::x86_avx512_pmulh_w_512:
+ case Intrinsic::x86_sse2_pmulhu_w:
+ case Intrinsic::x86_avx2_pmulhu_w:
+ case Intrinsic::x86_avx512_pmulhu_w_512:
+ case Intrinsic::x86_ssse3_pmul_hr_sw_128:
+ case Intrinsic::x86_avx2_pmul_hr_sw:
+ case Intrinsic::x86_avx512_pmul_hr_sw_512:
+ if (Value *V = simplifyX86pmulh(II, IC.Builder)) {
+ return IC.replaceInstUsesWith(II, V);
+ }
+ break;
+
case Intrinsic::x86_sse2_pmadd_wd:
case Intrinsic::x86_avx2_pmadd_wd:
case Intrinsic::x86_avx512_pmaddw_d_512:
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
index 130dd7b33d8a4..d6a06e7d08358 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
@@ -7,8 +7,7 @@
define <8 x i16> @undef_pmulh_128(<8 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_128(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[A0:%.*]], <8 x i16> undef)
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> undef)
ret <8 x i16> %1
@@ -16,8 +15,7 @@ define <8 x i16> @undef_pmulh_128(<8 x i16> %a0) {
define <8 x i16> @undef_pmulh_128_commute(<8 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_128_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> undef, <8 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> undef, <8 x i16> %a0)
ret <8 x i16> %1
@@ -25,8 +23,7 @@ define <8 x i16> @undef_pmulh_128_commute(<8 x i16> %a0) {
define <16 x i16> @undef_pmulh_256(<16 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_256(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[A0:%.*]], <16 x i16> undef)
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> undef)
ret <16 x i16> %1
@@ -34,8 +31,7 @@ define <16 x i16> @undef_pmulh_256(<16 x i16> %a0) {
define <16 x i16> @undef_pmulh_256_commute(<16 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_256_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> undef, <16 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> undef, <16 x i16> %a0)
ret <16 x i16> %1
@@ -43,8 +39,7 @@ define <16 x i16> @undef_pmulh_256_commute(<16 x i16> %a0) {
define <32 x i16> @undef_pmulh_512(<32 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_512(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[A0:%.*]], <32 x i16> undef)
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> %a0, <32 x i16> undef)
ret <32 x i16> %1
@@ -52,8 +47,7 @@ define <32 x i16> @undef_pmulh_512(<32 x i16> %a0) {
define <32 x i16> @undef_pmulh_512_commute(<32 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_512_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> undef, <32 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> undef, <32 x i16> %a0)
ret <32 x i16> %1
@@ -65,8 +59,7 @@ define <32 x i16> @undef_pmulh_512_commute(<32 x i16> %a0) {
define <8 x i16> @zero_pmulh_128(<8 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_128(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[A0:%.*]], <8 x i16> zeroinitializer)
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> zeroinitializer)
ret <8 x i16> %1
@@ -74,8 +67,7 @@ define <8 x i16> @zero_pmulh_128(<8 x i16> %a0) {
define <8 x i16> @zero_pmulh_128_commute(<8 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_128_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> zeroinitializer, <8 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> zeroinitializer, <8 x i16> %a0)
ret <8 x i16> %1
@@ -83,8 +75,7 @@ define <8 x i16> @zero_pmulh_128_commute(<8 x i16> %a0) {
define <16 x i16> @zero_pmulh_256(<16 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_256(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[A0:%.*]], <16 x i16> zeroinitializer)
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> zeroinitializer)
ret <16 x i16> %1
@@ -92,8 +83,7 @@ define <16 x i16> @zero_pmulh_256(<16 x i16> %a0) {
define <16 x i16> @zero_pmulh_256_commute(<16 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_256_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> zeroinitializer, <16 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> zeroinitializer, <16 x i16> %a0)
ret <16 x i16> %1
@@ -101,8 +91,7 @@ define <16 x i16> @zero_pmulh_256_commute(<16 x i16> %a0) {
define <32 x i16> @zero_pmulh_512(<32 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_512(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[A0:%.*]], <32 x i16> zeroinitializer)
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> %a0, <32 x i16> zeroinitializer)
ret <32 x i16> %1
@@ -110,8 +99,7 @@ define <32 x i16> @zero_pmulh_512(<32 x i16> %a0) {
define <32 x i16> @zero_pmulh_512_commute(<32 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_512_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> zeroinitializer, <32 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> zeroinitializer, <32 x i16> %a0)
ret <32 x i16> %1
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
index d568a1c703526..2c42534cae8b1 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
@@ -7,8 +7,7 @@
define <8 x i16> @undef_pmulh_128(<8 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_128(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> [[A0:%.*]], <8 x i16> undef)
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> undef)
ret <8 x i16> %1
@@ -16,8 +15,7 @@ define <8 x i16> @undef_pmulh_128(<8 x i16> %a0) {
define <8 x i16> @undef_pmulh_128_commute(<8 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_128_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> undef, <8 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> undef, <8 x i16> %a0)
ret <8 x i16> %1
@@ -25,8 +23,7 @@ define <8 x i16> @undef_pmulh_128_commute(<8 x i16> %a0) {
define <16 x i16> @undef_pmulh_256(<16 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_256(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[A0:%.*]], <16 x i16> undef)
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> undef)
ret <16 x i16> %1
@@ -34,8 +31,7 @@ define <16 x i16> @undef_pmulh_256(<16 x i16> %a0) {
define <16 x i16> @undef_pmulh_256_commute(<16 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_256_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> undef, <16 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> undef, <16 x i16> %a0)
ret <16 x i16> %1
@@ -43,8 +39,7 @@ define <16 x i16> @undef_pmulh_256_commute(<16 x i16> %a0) {
define <32 x i16> @undef_pmulh_512(<32 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_512(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[A0:%.*]], <32 x i16> undef)
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> %a0, <32 x i16> undef)
ret <32 x i16> %1
@@ -52,8 +47,7 @@ define <32 x i16> @undef_pmulh_512(<32 x i16> %a0) {
define <32 x i16> @undef_pmulh_512_commute(<32 x i16> %a0) {
; CHECK-LABEL: @undef_pmulh_512_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> undef, <32 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> undef, <32 x i16> %a0)
ret <32 x i16> %1
@@ -65,8 +59,7 @@ define <32 x i16> @undef_pmulh_512_commute(<32 x i16> %a0) {
define <8 x i16> @zero_pmulh_128(<8 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_128(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> [[A0:%.*]], <8 x i16> zeroinitializer)
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> zeroinitializer)
ret <8 x i16> %1
@@ -74,8 +67,7 @@ define <8 x i16> @zero_pmulh_128(<8 x i16> %a0) {
define <8 x i16> @zero_pmulh_128_commute(<8 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_128_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> zeroinitializer, <8 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> zeroinitializer, <8 x i16> %a0)
ret <8 x i16> %1
@@ -83,8 +75,7 @@ define <8 x i16> @zero_pmulh_128_commute(<8 x i16> %a0) {
define <16 x i16> @zero_pmulh_256(<16 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_256(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[A0:%.*]], <16 x i16> zeroinitializer)
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> zeroinitializer)
ret <16 x i16> %1
@@ -92,8 +83,7 @@ define <16 x i16> @zero_pmulh_256(<16 x i16> %a0) {
define <16 x i16> @zero_pmulh_256_commute(<16 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_256_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> zeroinitializer, <16 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> zeroinitializer, <16 x i16> %a0)
ret <16 x i16> %1
@@ -101,8 +91,7 @@ define <16 x i16> @zero_pmulh_256_commute(<16 x i16> %a0) {
define <32 x i16> @zero_pmulh_512(<32 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_512(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[A0:%.*]], <32 x i16> zeroinitializer)
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> %a0, <32 x i16> zeroinitializer)
ret <32 x i16> %1
@@ -110,8 +99,7 @@ define <32 x i16> @zero_pmulh_512(<32 x i16> %a0) {
define <32 x i16> @zero_pmulh_512_commute(<32 x i16> %a0) {
; CHECK-LABEL: @zero_pmulh_512_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> zeroinitializer, <32 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> zeroinitializer, <32 x i16> %a0)
ret <32 x i16> %1
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
index 5eed7328136de..81b890b7df6e6 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
@@ -7,8 +7,7 @@
define <8 x i16> @undef_pmulhu_128(<8 x i16> %a0) {
; CHECK-LABEL: @undef_pmulhu_128(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A0:%.*]], <8 x i16> undef)
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> undef)
ret <8 x i16> %1
@@ -16,8 +15,7 @@ define <8 x i16> @undef_pmulhu_128(<8 x i16> %a0) {
define <8 x i16> @undef_pmulhu_128_commute(<8 x i16> %a0) {
; CHECK-LABEL: @undef_pmulhu_128_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> undef, <8 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> undef, <8 x i16> %a0)
ret <8 x i16> %1
@@ -25,8 +23,7 @@ define <8 x i16> @undef_pmulhu_128_commute(<8 x i16> %a0) {
define <16 x i16> @undef_pmulhu_256(<16 x i16> %a0) {
; CHECK-LABEL: @undef_pmulhu_256(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[A0:%.*]], <16 x i16> undef)
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> undef)
ret <16 x i16> %1
@@ -34,8 +31,7 @@ define <16 x i16> @undef_pmulhu_256(<16 x i16> %a0) {
define <16 x i16> @undef_pmulhu_256_commute(<16 x i16> %a0) {
; CHECK-LABEL: @undef_pmulhu_256_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> undef, <16 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> undef, <16 x i16> %a0)
ret <16 x i16> %1
@@ -43,8 +39,7 @@ define <16 x i16> @undef_pmulhu_256_commute(<16 x i16> %a0) {
define <32 x i16> @undef_pmulhu_512(<32 x i16> %a0) {
; CHECK-LABEL: @undef_pmulhu_512(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[A0:%.*]], <32 x i16> undef)
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> %a0, <32 x i16> undef)
ret <32 x i16> %1
@@ -52,8 +47,7 @@ define <32 x i16> @undef_pmulhu_512(<32 x i16> %a0) {
define <32 x i16> @undef_pmulhu_512_commute(<32 x i16> %a0) {
; CHECK-LABEL: @undef_pmulhu_512_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> undef, <32 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> undef, <32 x i16> %a0)
ret <32 x i16> %1
@@ -65,8 +59,7 @@ define <32 x i16> @undef_pmulhu_512_commute(<32 x i16> %a0) {
define <8 x i16> @zero_pmulhu_128(<8 x i16> %a0) {
; CHECK-LABEL: @zero_pmulhu_128(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A0:%.*]], <8 x i16> zeroinitializer)
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> zeroinitializer)
ret <8 x i16> %1
@@ -74,8 +67,7 @@ define <8 x i16> @zero_pmulhu_128(<8 x i16> %a0) {
define <8 x i16> @zero_pmulhu_128_commute(<8 x i16> %a0) {
; CHECK-LABEL: @zero_pmulhu_128_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> zeroinitializer, <8 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> zeroinitializer, <8 x i16> %a0)
ret <8 x i16> %1
@@ -83,8 +75,7 @@ define <8 x i16> @zero_pmulhu_128_commute(<8 x i16> %a0) {
define <16 x i16> @zero_pmulhu_256(<16 x i16> %a0) {
; CHECK-LABEL: @zero_pmulhu_256(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[A0:%.*]], <16 x i16> zeroinitializer)
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> zeroinitializer)
ret <16 x i16> %1
@@ -92,8 +83,7 @@ define <16 x i16> @zero_pmulhu_256(<16 x i16> %a0) {
define <16 x i16> @zero_pmulhu_256_commute(<16 x i16> %a0) {
; CHECK-LABEL: @zero_pmulhu_256_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> zeroinitializer, <16 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> zeroinitializer, <16 x i16> %a0)
ret <16 x i16> %1
@@ -101,8 +91,7 @@ define <16 x i16> @zero_pmulhu_256_commute(<16 x i16> %a0) {
define <32 x i16> @zero_pmulhu_512(<32 x i16> %a0) {
; CHECK-LABEL: @zero_pmulhu_512(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[A0:%.*]], <32 x i16> zeroinitializer)
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> %a0, <32 x i16> zeroinitializer)
ret <32 x i16> %1
@@ -110,8 +99,7 @@ define <32 x i16> @zero_pmulhu_512(<32 x i16> %a0) {
define <32 x i16> @zero_pmulhu_512_commute(<32 x i16> %a0) {
; CHECK-LABEL: @zero_pmulhu_512_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> zeroinitializer, <32 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> zeroinitializer, <32 x i16> %a0)
ret <32 x i16> %1
>From acaa0262a98fe8ecc525bdbdc2692d803e50976a Mon Sep 17 00:00:00 2001
From: Krzysztof Parzyszek <Krzysztof.Parzyszek at amd.com>
Date: Wed, 3 Jul 2024 07:57:31 -0500
Subject: [PATCH 123/246] [clang][OpenMP] Use leaf constructs in
`mapLoopConstruct` (#97446)
This removes mentions of specific combined directives.
Also, add a quote from the OpenMP spec to explain the code dealing with
the `bind` clause.
---
clang/lib/Sema/SemaOpenMP.cpp | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 86666f064f35d..5fde4f67b4fd9 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -6270,16 +6270,21 @@ bool SemaOpenMP::mapLoopConstruct(
if (BindKind == OMPC_BIND_unknown) {
// Setting the enclosing teams or parallel construct for the loop
// directive without bind clause.
+ // [5.0:129:25-28] If the bind clause is not present on the construct and
+ // the loop construct is closely nested inside a teams or parallel
+ // construct, the binding region is the corresponding teams or parallel
+ // region. If none of those conditions hold, the binding region is not
+ // defined.
BindKind = OMPC_BIND_thread; // Default bind(thread) if binding is unknown
+ ArrayRef<OpenMPDirectiveKind> ParentLeafs =
+ getLeafConstructsOrSelf(ParentDirective);
if (ParentDirective == OMPD_unknown) {
Diag(DSAStack->getDefaultDSALocation(),
diag::err_omp_bind_required_on_loop);
- } else if (ParentDirective == OMPD_parallel ||
- ParentDirective == OMPD_target_parallel) {
+ } else if (ParentLeafs.back() == OMPD_parallel) {
BindKind = OMPC_BIND_parallel;
- } else if (ParentDirective == OMPD_teams ||
- ParentDirective == OMPD_target_teams) {
+ } else if (ParentLeafs.back() == OMPD_teams) {
BindKind = OMPC_BIND_teams;
}
} else {
>From 6461b921fd06b1c812f1172685b8b7edc0608af7 Mon Sep 17 00:00:00 2001
From: Krzysztof Parzyszek <Krzysztof.Parzyszek at amd.com>
Date: Wed, 3 Jul 2024 07:58:01 -0500
Subject: [PATCH 124/246] [clang][OpenMP] Change `ActOnOpenMPRegionStart` to
use captured regions (#97445)
Instead of checking specific directives, this function now gets the list
of captured regions, and processes them individually. This makes this
function directive-agnostic (except a few cases of leaf constructs).
---
clang/lib/Sema/SemaOpenMP.cpp | 600 ++++++++++------------------------
1 file changed, 166 insertions(+), 434 deletions(-)
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 5fde4f67b4fd9..12cd75e7102ba 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -4234,454 +4234,186 @@ static void handleDeclareVariantConstructTrait(DSAStackTy *Stack,
Stack->handleConstructTrait(Traits, ScopeEntry);
}
-void SemaOpenMP::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind,
- Scope *CurScope) {
- ASTContext &Context = getASTContext();
- switch (DKind) {
- case OMPD_parallel:
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_parallel_sections:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_parallel_loop:
- case OMPD_teams:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- SemaOpenMP::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, Params);
- break;
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getParallelRegionParams(Sema &SemaRef, bool LoopBoundSharing) {
+ ASTContext &Context = SemaRef.getASTContext();
+ QualType KmpInt32Ty =
+ Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1).withConst();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ SmallVector<SemaOpenMP::CapturedParamNameType> Params{
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
+ };
+ if (LoopBoundSharing) {
+ QualType KmpSizeTy = Context.getSizeType().withConst();
+ Params.push_back(std::make_pair(".previous.lb.", KmpSizeTy));
+ Params.push_back(std::make_pair(".previous.ub.", KmpSizeTy));
}
- case OMPD_target_teams:
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_parallel_loop:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
+
+ // __context with shared vars
+ Params.push_back(std::make_pair(StringRef(), QualType()));
+ return Params;
+}
+
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getTeamsRegionParams(Sema &SemaRef) {
+ return getParallelRegionParams(SemaRef, /*LoopBoundSharing=*/false);
+}
+
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getTaskRegionParams(Sema &SemaRef) {
+ ASTContext &Context = SemaRef.getASTContext();
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
+ QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ QualType Args[] = {VoidPtrTy};
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = true;
+ QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
+ SmallVector<SemaOpenMP::CapturedParamNameType> Params{
+ std::make_pair(".global_tid.", KmpInt32Ty),
+ std::make_pair(".part_id.", KmpInt32PtrTy),
+ std::make_pair(".privates.", VoidPtrTy),
+ std::make_pair(
+ ".copy_fn.",
+ Context.getPointerType(CopyFnType).withConst().withRestrict()),
+ std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ return Params;
+}
+
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getTargetRegionParams(Sema &SemaRef) {
+ ASTContext &Context = SemaRef.getASTContext();
+ SmallVector<SemaOpenMP::CapturedParamNameType> Params;
+ if (SemaRef.getLangOpts().OpenMPIsTargetDevice) {
QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- SemaOpenMP::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, Params,
- /*OpenMPCaptureLevel=*/0);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- SmallVector<SemaOpenMP::CapturedParamNameType, 2> ParamsTarget;
- if (getLangOpts().OpenMPIsTargetDevice)
- ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
- ParamsTarget.push_back(
- std::make_pair(StringRef(), QualType())); // __context with shared vars;
- // Start a captured region for 'target' with no implicit parameters.
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, ParamsTarget,
- /*OpenMPCaptureLevel=*/1);
- SemaOpenMP::CapturedParamNameType ParamsTeamsOrParallel[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'teams' or 'parallel'. Both regions have
- // the same implicit parameters.
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, ParamsTeamsOrParallel,
- /*OpenMPCaptureLevel=*/2);
- break;
+ Params.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
}
- case OMPD_target:
- case OMPD_target_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- SemaOpenMP::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, Params,
- /*OpenMPCaptureLevel=*/0);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- SmallVector<SemaOpenMP::CapturedParamNameType, 2> ParamsTarget;
- if (getLangOpts().OpenMPIsTargetDevice)
- ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
- ParamsTarget.push_back(
- std::make_pair(StringRef(), QualType())); // __context with shared vars;
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, ParamsTarget,
- /*OpenMPCaptureLevel=*/1);
- break;
+ // __context with shared vars
+ Params.push_back(std::make_pair(StringRef(), QualType()));
+ return Params;
+}
+
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getUnknownRegionParams(Sema &SemaRef) {
+ SmallVector<SemaOpenMP::CapturedParamNameType> Params{
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ return Params;
+}
+
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getTaskloopRegionParams(Sema &SemaRef) {
+ ASTContext &Context = SemaRef.getASTContext();
+ QualType KmpInt32Ty =
+ Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1).withConst();
+ QualType KmpUInt64Ty =
+ Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0).withConst();
+ QualType KmpInt64Ty =
+ Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1).withConst();
+ QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ QualType Args[] = {VoidPtrTy};
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = true;
+ QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
+ SmallVector<SemaOpenMP::CapturedParamNameType> Params{
+ std::make_pair(".global_tid.", KmpInt32Ty),
+ std::make_pair(".part_id.", KmpInt32PtrTy),
+ std::make_pair(".privates.", VoidPtrTy),
+ std::make_pair(
+ ".copy_fn.",
+ Context.getPointerType(CopyFnType).withConst().withRestrict()),
+ std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
+ std::make_pair(".lb.", KmpUInt64Ty),
+ std::make_pair(".ub.", KmpUInt64Ty),
+ std::make_pair(".st.", KmpInt64Ty),
+ std::make_pair(".liter.", KmpInt32Ty),
+ std::make_pair(".reductions.", VoidPtrTy),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ return Params;
+}
+
+static void processCapturedRegions(Sema &SemaRef, OpenMPDirectiveKind DKind,
+ Scope *CurScope, SourceLocation Loc) {
+ SmallVector<OpenMPDirectiveKind> Regions;
+ getOpenMPCaptureRegions(Regions, DKind);
+
+ bool LoopBoundSharing = isOpenMPLoopBoundSharingDirective(DKind);
+
+ auto MarkAsInlined = [&](CapturedRegionScopeInfo *CSI) {
+ CSI->TheCapturedDecl->addAttr(AlwaysInlineAttr::CreateImplicit(
+ SemaRef.getASTContext(), {}, AlwaysInlineAttr::Keyword_forceinline));
+ };
+
+ for (auto [Level, RKind] : llvm::enumerate(Regions)) {
+ switch (RKind) {
+ // All region kinds that can be returned from `getOpenMPCaptureRegions`
+ // are listed here.
+ case OMPD_parallel:
+ SemaRef.ActOnCapturedRegionStart(
+ Loc, CurScope, CR_OpenMP,
+ getParallelRegionParams(SemaRef, LoopBoundSharing), Level);
+ break;
+ case OMPD_teams:
+ SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
+ getTeamsRegionParams(SemaRef), Level);
+ break;
+ case OMPD_task:
+ SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
+ getTaskRegionParams(SemaRef), Level);
+ // Mark this captured region as inlined, because we don't use outlined
+ // function directly.
+ MarkAsInlined(SemaRef.getCurCapturedRegion());
+ break;
+ case OMPD_taskloop:
+ SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
+ getTaskloopRegionParams(SemaRef), Level);
+ // Mark this captured region as inlined, because we don't use outlined
+ // function directly.
+ MarkAsInlined(SemaRef.getCurCapturedRegion());
+ break;
+ case OMPD_target:
+ SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
+ getTargetRegionParams(SemaRef), Level);
+ break;
+ case OMPD_unknown:
+ SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
+ getUnknownRegionParams(SemaRef));
+ break;
+ case OMPD_metadirective:
+ case OMPD_nothing:
+ default:
+ llvm_unreachable("Unexpected capture region");
+ }
}
+}
+
+void SemaOpenMP::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind,
+ Scope *CurScope) {
+ switch (DKind) {
case OMPD_atomic:
case OMPD_critical:
- case OMPD_section:
- case OMPD_master:
case OMPD_masked:
+ case OMPD_master:
+ case OMPD_section:
case OMPD_tile:
case OMPD_unroll:
break;
- case OMPD_loop:
- // TODO: 'loop' may require additional parameters depending on the binding.
- // Treat similar to OMPD_simd/OMPD_for for now.
- case OMPD_simd:
- case OMPD_for:
- case OMPD_for_simd:
- case OMPD_sections:
- case OMPD_single:
- case OMPD_taskgroup:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_ordered:
- case OMPD_scope:
- case OMPD_target_data:
- case OMPD_dispatch: {
- SemaOpenMP::CapturedParamNameType Params[] = {
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, Params);
- break;
- }
- case OMPD_task: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- SemaOpenMP::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, Params);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- break;
- }
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_masked_taskloop:
- case OMPD_masked_taskloop_simd:
- case OMPD_master_taskloop_simd: {
- QualType KmpInt32Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
- .withConst();
- QualType KmpUInt64Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
- .withConst();
- QualType KmpInt64Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
- .withConst();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- SemaOpenMP::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(".lb.", KmpUInt64Ty),
- std::make_pair(".ub.", KmpUInt64Ty),
- std::make_pair(".st.", KmpInt64Ty),
- std::make_pair(".liter.", KmpInt32Ty),
- std::make_pair(".reductions.", VoidPtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, Params);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- break;
- }
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_masked_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd: {
- QualType KmpInt32Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
- .withConst();
- QualType KmpUInt64Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
- .withConst();
- QualType KmpInt64Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
- .withConst();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- SemaOpenMP::CapturedParamNameType ParamsParallel[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'parallel'.
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, ParamsParallel,
- /*OpenMPCaptureLevel=*/0);
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- SemaOpenMP::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(".lb.", KmpUInt64Ty),
- std::make_pair(".ub.", KmpUInt64Ty),
- std::make_pair(".st.", KmpInt64Ty),
- std::make_pair(".liter.", KmpInt32Ty),
- std::make_pair(".reductions.", VoidPtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, Params,
- /*OpenMPCaptureLevel=*/1);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- break;
- }
- case OMPD_distribute_parallel_for_simd:
- case OMPD_distribute_parallel_for: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- SemaOpenMP::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
- std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, Params);
- break;
- }
- // For 'target teams loop', collect all captured regions so codegen can
- // later decide the best IR to emit given the associated loop-nest.
- case OMPD_target_teams_loop:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
-
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- SemaOpenMP::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, Params,
- /*OpenMPCaptureLevel=*/0);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- SmallVector<SemaOpenMP::CapturedParamNameType, 2> ParamsTarget;
- if (getLangOpts().OpenMPIsTargetDevice)
- ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
- ParamsTarget.push_back(
- std::make_pair(StringRef(), QualType())); // __context with shared vars;
- // Start a captured region for 'target' with no implicit parameters.
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, ParamsTarget,
- /*OpenMPCaptureLevel=*/1);
-
- SemaOpenMP::CapturedParamNameType ParamsTeams[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'target' with no implicit parameters.
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, ParamsTeams,
- /*OpenMPCaptureLevel=*/2);
-
- SemaOpenMP::CapturedParamNameType ParamsParallel[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
- std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'teams' or 'parallel'. Both regions have
- // the same implicit parameters.
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, ParamsParallel,
- /*OpenMPCaptureLevel=*/3);
+ default:
+ processCapturedRegions(SemaRef, DKind, CurScope,
+ DSAStack->getConstructLoc());
break;
}
- case OMPD_teams_loop:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
-
- SemaOpenMP::CapturedParamNameType ParamsTeams[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'target' with no implicit parameters.
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, ParamsTeams,
- /*OpenMPCaptureLevel=*/0);
-
- SemaOpenMP::CapturedParamNameType ParamsParallel[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
- std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'teams' or 'parallel'. Both regions have
- // the same implicit parameters.
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, ParamsParallel,
- /*OpenMPCaptureLevel=*/1);
- break;
- }
- case OMPD_target_update:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- SemaOpenMP::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope,
- CR_OpenMP, Params);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- break;
- }
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_taskyield:
- case OMPD_error:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_cancel:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_declare_simd:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_requires:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_metadirective:
- llvm_unreachable("OpenMP Directive is not allowed");
- case OMPD_unknown:
- default:
- llvm_unreachable("Unknown OpenMP directive");
- }
DSAStack->setContext(SemaRef.CurContext);
- handleDeclareVariantConstructTrait(DSAStack, DKind, /* ScopeEntry */ true);
+ handleDeclareVariantConstructTrait(DSAStack, DKind, /*ScopeEntry=*/true);
}
int SemaOpenMP::getNumberOfConstructScopes(unsigned Level) const {
>From 0778b5d0d296edaca2ac52f2c8d2c806d492355a Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 3 Jul 2024 15:08:12 +0200
Subject: [PATCH 125/246] [InstCombine] Add test for computeConstantRange()
with non-splat poison (NFC)
---
.../test/Transforms/InstCombine/saturating-add-sub.ll | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/llvm/test/Transforms/InstCombine/saturating-add-sub.ll b/llvm/test/Transforms/InstCombine/saturating-add-sub.ll
index 57977a72cd08f..5a29ee7f66e35 100644
--- a/llvm/test/Transforms/InstCombine/saturating-add-sub.ll
+++ b/llvm/test/Transforms/InstCombine/saturating-add-sub.ll
@@ -1062,6 +1062,17 @@ define <2 x i8> @test_vector_usub_add_nuw_no_ov_nonsplat1(<2 x i8> %a) {
ret <2 x i8> %r
}
+define <3 x i8> @test_vector_usub_add_nuw_no_ov_nonsplat1_poison(<3 x i8> %a) {
+; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov_nonsplat1_poison(
+; CHECK-NEXT: [[B:%.*]] = add nuw <3 x i8> [[A:%.*]], <i8 10, i8 10, i8 10>
+; CHECK-NEXT: [[R:%.*]] = call <3 x i8> @llvm.usub.sat.v3i8(<3 x i8> [[B]], <3 x i8> <i8 10, i8 9, i8 poison>)
+; CHECK-NEXT: ret <3 x i8> [[R]]
+;
+ %b = add nuw <3 x i8> %a, <i8 10, i8 10, i8 10>
+ %r = call <3 x i8> @llvm.usub.sat.v3i8(<3 x i8> %b, <3 x i8> <i8 10, i8 9, i8 poison>)
+ ret <3 x i8> %r
+}
+
; Can be optimized if the add nuw RHS constant range handles non-splat vectors.
define <2 x i8> @test_vector_usub_add_nuw_no_ov_nonsplat2(<2 x i8> %a) {
; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov_nonsplat2(
>From 2dbb454791044e3ef91c8e7069f953b7406d78c6 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 3 Jul 2024 15:06:27 +0200
Subject: [PATCH 126/246] [ValueTracking][LVI] Consolidate vector constant
range calculation
Add a common helper used for computeConstantRange() and LVI. The
implementation is a mix of both, with the efficient handling for
ConstantDataVector taken from computeConstantRange(), and the
general handling (including non-splat poison) from LVI.
---
llvm/include/llvm/Analysis/ValueTracking.h | 3 ++
llvm/lib/Analysis/LazyValueInfo.cpp | 27 +---------
llvm/lib/Analysis/ValueTracking.cpp | 51 +++++++++++++++----
.../InstCombine/saturating-add-sub.ll | 3 +-
4 files changed, 46 insertions(+), 38 deletions(-)
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index b7b78cb9edab3..a67ad501982d2 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -904,6 +904,9 @@ bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
/// based on the vscale_range function attribute.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);
+/// Determine the possible constant range of a vector constant.
+ConstantRange getVectorConstantRange(const Constant *C);
+
/// Determine the possible constant range of an integer or vector of integer
/// value. This is intended as a cheap, non-recursive check.
ConstantRange computeConstantRange(const Value *V, bool ForSigned,
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index e9051e74b4577..468b08a15d7df 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -836,24 +836,6 @@ void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
}
}
-static ConstantRange getConstantRangeFromFixedVector(Constant *C,
- FixedVectorType *Ty) {
- unsigned BW = Ty->getScalarSizeInBits();
- ConstantRange CR = ConstantRange::getEmpty(BW);
- for (unsigned I = 0; I < Ty->getNumElements(); ++I) {
- Constant *Elem = C->getAggregateElement(I);
- if (!Elem)
- return ConstantRange::getFull(BW);
- if (isa<PoisonValue>(Elem))
- continue;
- auto *CI = dyn_cast<ConstantInt>(Elem);
- if (!CI)
- return ConstantRange::getFull(BW);
- CR = CR.unionWith(CI->getValue());
- }
- return CR;
-}
-
static ConstantRange toConstantRange(const ValueLatticeElement &Val,
Type *Ty, bool UndefAllowed = false) {
assert(Ty->isIntOrIntVectorTy() && "Must be integer type");
@@ -862,13 +844,8 @@ static ConstantRange toConstantRange(const ValueLatticeElement &Val,
unsigned BW = Ty->getScalarSizeInBits();
if (Val.isUnknown())
return ConstantRange::getEmpty(BW);
- if (Val.isConstant() && Ty->isVectorTy()) {
- if (auto *CI = dyn_cast_or_null<ConstantInt>(
- Val.getConstant()->getSplatValue(/*AllowPoison=*/true)))
- return ConstantRange(CI->getValue());
- if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
- return getConstantRangeFromFixedVector(Val.getConstant(), VTy);
- }
+ if (Val.isConstant() && Ty->isVectorTy())
+ return getVectorConstantRange(Val.getConstant());
return ConstantRange::getFull(BW);
}
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 7660009b088d0..5476dc5d85182 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -9498,6 +9498,39 @@ static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) {
}
}
+ConstantRange llvm::getVectorConstantRange(const Constant *C) {
+ assert(C->getType()->isVectorTy() && "Expected vector constant");
+ if (auto *CI = dyn_cast_or_null<ConstantInt>(
+ C->getSplatValue(/*AllowPoison=*/true)))
+ return ConstantRange(CI->getValue());
+
+ unsigned BitWidth = C->getType()->getScalarSizeInBits();
+ if (auto *CDV = dyn_cast<ConstantDataVector>(C)) {
+ ConstantRange CR = ConstantRange::getEmpty(BitWidth);
+ for (unsigned I = 0, E = CDV->getNumElements(); I < E; ++I)
+ CR = CR.unionWith(CDV->getElementAsAPInt(I));
+ return CR;
+ }
+
+ if (auto *CV = dyn_cast<ConstantVector>(C)) {
+ ConstantRange CR = ConstantRange::getEmpty(BitWidth);
+ for (unsigned I = 0, E = CV->getNumOperands(); I < E; ++I) {
+ Constant *Elem = C->getAggregateElement(I);
+ if (!Elem)
+ return ConstantRange::getFull(BitWidth);
+ if (isa<PoisonValue>(Elem))
+ continue;
+ auto *CI = dyn_cast<ConstantInt>(Elem);
+ if (!CI)
+ return ConstantRange::getFull(BitWidth);
+ CR = CR.unionWith(CI->getValue());
+ }
+ return CR;
+ }
+
+ return ConstantRange::getFull(BitWidth);
+}
+
ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned,
bool UseInstrInfo, AssumptionCache *AC,
const Instruction *CtxI,
@@ -9508,19 +9541,15 @@ ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned,
if (Depth == MaxAnalysisRecursionDepth)
return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
- const APInt *C;
- if (match(V, m_APInt(C)))
- return ConstantRange(*C);
- unsigned BitWidth = V->getType()->getScalarSizeInBits();
-
- if (auto *VC = dyn_cast<ConstantDataVector>(V)) {
- ConstantRange CR = ConstantRange::getEmpty(BitWidth);
- for (unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem;
- ++ElemIdx)
- CR = CR.unionWith(VC->getElementAsAPInt(ElemIdx));
- return CR;
+ if (auto *C = dyn_cast<Constant>(V)) {
+ if (auto *CI = dyn_cast<ConstantInt>(C))
+ return ConstantRange(CI->getValue());
+ if (C->getType()->isVectorTy())
+ return getVectorConstantRange(C);
+ return ConstantRange::getFull(C->getType()->getScalarSizeInBits());
}
+ unsigned BitWidth = V->getType()->getScalarSizeInBits();
InstrInfoQuery IIQ(UseInstrInfo);
ConstantRange CR = ConstantRange::getFull(BitWidth);
if (auto *BO = dyn_cast<BinaryOperator>(V)) {
diff --git a/llvm/test/Transforms/InstCombine/saturating-add-sub.ll b/llvm/test/Transforms/InstCombine/saturating-add-sub.ll
index 5a29ee7f66e35..bf1568f1cd8c0 100644
--- a/llvm/test/Transforms/InstCombine/saturating-add-sub.ll
+++ b/llvm/test/Transforms/InstCombine/saturating-add-sub.ll
@@ -1064,8 +1064,7 @@ define <2 x i8> @test_vector_usub_add_nuw_no_ov_nonsplat1(<2 x i8> %a) {
define <3 x i8> @test_vector_usub_add_nuw_no_ov_nonsplat1_poison(<3 x i8> %a) {
; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov_nonsplat1_poison(
-; CHECK-NEXT: [[B:%.*]] = add nuw <3 x i8> [[A:%.*]], <i8 10, i8 10, i8 10>
-; CHECK-NEXT: [[R:%.*]] = call <3 x i8> @llvm.usub.sat.v3i8(<3 x i8> [[B]], <3 x i8> <i8 10, i8 9, i8 poison>)
+; CHECK-NEXT: [[R:%.*]] = add <3 x i8> [[A:%.*]], <i8 0, i8 1, i8 poison>
; CHECK-NEXT: ret <3 x i8> [[R]]
;
%b = add nuw <3 x i8> %a, <i8 10, i8 10, i8 10>
>From c1004cad4b62b70c4b316a49c5426271d4e1d740 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 3 Jul 2024 15:25:59 +0200
Subject: [PATCH 127/246] [LVI] Use CmpInst::Predicate in APIs (NFC)
Unfortunately this requires including InstrTypes.h in the header,
but I think that's fine given that that LazyValueInfo.h is not
widely used.
---
llvm/include/llvm/Analysis/LazyValueInfo.h | 7 +++--
llvm/lib/Analysis/LazyValueInfo.cpp | 32 ++++++++++------------
2 files changed, 19 insertions(+), 20 deletions(-)
diff --git a/llvm/include/llvm/Analysis/LazyValueInfo.h b/llvm/include/llvm/Analysis/LazyValueInfo.h
index 596fb2d73150f..1ac355e39cabe 100644
--- a/llvm/include/llvm/Analysis/LazyValueInfo.h
+++ b/llvm/include/llvm/Analysis/LazyValueInfo.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ANALYSIS_LAZYVALUEINFO_H
#define LLVM_ANALYSIS_LAZYVALUEINFO_H
+#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
@@ -67,7 +68,7 @@ namespace llvm {
/// Determine whether the specified value comparison with a constant is
/// known to be true or false on the specified CFG edge. Pred is a CmpInst
/// predicate.
- Tristate getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
+ Tristate getPredicateOnEdge(CmpInst::Predicate Pred, Value *V, Constant *C,
BasicBlock *FromBB, BasicBlock *ToBB,
Instruction *CxtI = nullptr);
@@ -75,7 +76,7 @@ namespace llvm {
/// known to be true or false at the specified instruction. \p Pred is a
/// CmpInst predicate. If \p UseBlockValue is true, the block value is also
/// taken into account.
- Tristate getPredicateAt(unsigned Pred, Value *V, Constant *C,
+ Tristate getPredicateAt(CmpInst::Predicate Pred, Value *V, Constant *C,
Instruction *CxtI, bool UseBlockValue);
/// Determine whether the specified value comparison is known to be true
@@ -83,7 +84,7 @@ namespace llvm {
/// it still requires that one of them is a constant.
/// \p Pred is a CmpInst predicate.
/// If \p UseBlockValue is true, the block value is also taken into account.
- Tristate getPredicateAt(unsigned Pred, Value *LHS, Value *RHS,
+ Tristate getPredicateAt(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
Instruction *CxtI, bool UseBlockValue);
/// Determine whether the specified value is known to be a constant at the
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 468b08a15d7df..b30e6a6a367c5 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -1775,8 +1775,8 @@ ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
}
static LazyValueInfo::Tristate
-getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val,
- const DataLayout &DL) {
+getPredicateResult(CmpInst::Predicate Pred, Constant *C,
+ const ValueLatticeElement &Val, const DataLayout &DL) {
// If we know the value is a constant, evaluate the conditional.
Constant *Res = nullptr;
if (Val.isConstant()) {
@@ -1805,8 +1805,8 @@ getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val,
return LazyValueInfo::False;
} else {
// Handle more complex predicates.
- ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(
- (ICmpInst::Predicate)Pred, CI->getValue());
+ ConstantRange TrueValues =
+ ConstantRange::makeExactICmpRegion(Pred, CI->getValue());
if (TrueValues.contains(CR))
return LazyValueInfo::True;
if (TrueValues.inverse().contains(CR))
@@ -1840,9 +1840,9 @@ getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val,
/// Determine whether the specified value comparison with a constant is known to
/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
LazyValueInfo::Tristate
-LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
- BasicBlock *FromBB, BasicBlock *ToBB,
- Instruction *CxtI) {
+LazyValueInfo::getPredicateOnEdge(CmpInst::Predicate Pred, Value *V,
+ Constant *C, BasicBlock *FromBB,
+ BasicBlock *ToBB, Instruction *CxtI) {
Module *M = FromBB->getModule();
ValueLatticeElement Result =
getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
@@ -1850,9 +1850,10 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
return getPredicateResult(Pred, C, Result, M->getDataLayout());
}
-LazyValueInfo::Tristate
-LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
- Instruction *CxtI, bool UseBlockValue) {
+LazyValueInfo::Tristate LazyValueInfo::getPredicateAt(CmpInst::Predicate Pred,
+ Value *V, Constant *C,
+ Instruction *CxtI,
+ bool UseBlockValue) {
// Is or is not NonNull are common predicates being queried. If
// isKnownNonZero can tell us the result of the predicate, we can
// return it quickly. But this is only a fastpath, and falling
@@ -1956,14 +1957,12 @@ LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
return Unknown;
}
-LazyValueInfo::Tristate LazyValueInfo::getPredicateAt(unsigned P, Value *LHS,
- Value *RHS,
+LazyValueInfo::Tristate LazyValueInfo::getPredicateAt(CmpInst::Predicate Pred,
+ Value *LHS, Value *RHS,
Instruction *CxtI,
bool UseBlockValue) {
- CmpInst::Predicate Pred = (CmpInst::Predicate)P;
-
if (auto *C = dyn_cast<Constant>(RHS))
- return getPredicateAt(P, LHS, C, CxtI, UseBlockValue);
+ return getPredicateAt(Pred, LHS, C, CxtI, UseBlockValue);
if (auto *C = dyn_cast<Constant>(LHS))
return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
UseBlockValue);
@@ -1981,8 +1980,7 @@ LazyValueInfo::Tristate LazyValueInfo::getPredicateAt(unsigned P, Value *LHS,
ValueLatticeElement R =
getOrCreateImpl(M).getValueInBlock(RHS, CxtI->getParent(), CxtI);
Type *Ty = CmpInst::makeCmpResultType(LHS->getType());
- if (Constant *Res = L.getCompare((CmpInst::Predicate)P, Ty, R,
- M->getDataLayout())) {
+ if (Constant *Res = L.getCompare(Pred, Ty, R, M->getDataLayout())) {
if (Res->isNullValue())
return LazyValueInfo::False;
if (Res->isOneValue())
>From dde3f17026be48c05a5d3876f12db72fdd6422ed Mon Sep 17 00:00:00 2001
From: David Spickett <david.spickett at linaro.org>
Date: Wed, 3 Jul 2024 14:30:47 +0100
Subject: [PATCH 128/246] [lldb] Fix printing of unsigned enum bitfields when
they contain the max value (#96202)
While testing register fields I found that if you put the max value into
a bitfield with an underlying type that is an unsigned enum, lldb would
not print the enum name.
This is because the code to match values to names wasn't checking
whether the enum's type was signed, it just assumed it was.
So for example a 2 bit field with value 3 got signed extended to -1,
which didn't match the enumerator value of 3. So lldb just printed the
number instead of the name.
For a value of 1, the top bit was 0 so the sign extend became a zero
extend, and lldb did print the name of the enumerator.
I added a new test because I needed to use C++ to get typed enums. It
checks min, max and an in between value for signed and unsigned enums
applied to a bitfield.
---
.../TypeSystem/Clang/TypeSystemClang.cpp | 18 ++++++++---
.../expression/bitfield_enums/Makefile | 3 ++
.../bitfield_enums/TestBitfieldEnums.py | 31 +++++++++++++++++++
.../expression/bitfield_enums/main.cpp | 24 ++++++++++++++
4 files changed, 71 insertions(+), 5 deletions(-)
create mode 100644 lldb/test/API/commands/expression/bitfield_enums/Makefile
create mode 100644 lldb/test/API/commands/expression/bitfield_enums/TestBitfieldEnums.py
create mode 100644 lldb/test/API/commands/expression/bitfield_enums/main.cpp
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index 093d27a92d718..e0fbb32b30b20 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -8639,8 +8639,13 @@ static bool DumpEnumValue(const clang::QualType &qual_type, Stream &s,
const clang::EnumDecl *enum_decl = enutype->getDecl();
assert(enum_decl);
lldb::offset_t offset = byte_offset;
- const uint64_t enum_svalue = data.GetMaxS64Bitfield(
- &offset, byte_size, bitfield_bit_size, bitfield_bit_offset);
+ bool qual_type_is_signed = qual_type->isSignedIntegerOrEnumerationType();
+ const uint64_t enum_svalue =
+ qual_type_is_signed
+ ? data.GetMaxS64Bitfield(&offset, byte_size, bitfield_bit_size,
+ bitfield_bit_offset)
+ : data.GetMaxU64Bitfield(&offset, byte_size, bitfield_bit_size,
+ bitfield_bit_offset);
bool can_be_bitfield = true;
uint64_t covered_bits = 0;
int num_enumerators = 0;
@@ -8652,8 +8657,11 @@ static bool DumpEnumValue(const clang::QualType &qual_type, Stream &s,
// enumerators. Also 0 doesn't make sense when the enumerators are used as
// flags.
for (auto *enumerator : enum_decl->enumerators()) {
- uint64_t val = enumerator->getInitVal().getSExtValue();
- val = llvm::SignExtend64(val, 8*byte_size);
+ llvm::APSInt init_val = enumerator->getInitVal();
+ uint64_t val =
+ qual_type_is_signed ? init_val.getSExtValue() : init_val.getZExtValue();
+ if (qual_type_is_signed)
+ val = llvm::SignExtend64(val, 8 * byte_size);
if (llvm::popcount(val) != 1 && (val & ~covered_bits) != 0)
can_be_bitfield = false;
covered_bits |= val;
@@ -8673,7 +8681,7 @@ static bool DumpEnumValue(const clang::QualType &qual_type, Stream &s,
// No exact match, but we don't think this is a bitfield. Print the value as
// decimal.
if (!can_be_bitfield) {
- if (qual_type->isSignedIntegerOrEnumerationType())
+ if (qual_type_is_signed)
s.Printf("%" PRIi64, enum_svalue);
else
s.Printf("%" PRIu64, enum_uvalue);
diff --git a/lldb/test/API/commands/expression/bitfield_enums/Makefile b/lldb/test/API/commands/expression/bitfield_enums/Makefile
new file mode 100644
index 0000000000000..99998b20bcb05
--- /dev/null
+++ b/lldb/test/API/commands/expression/bitfield_enums/Makefile
@@ -0,0 +1,3 @@
+CXX_SOURCES := main.cpp
+
+include Makefile.rules
diff --git a/lldb/test/API/commands/expression/bitfield_enums/TestBitfieldEnums.py b/lldb/test/API/commands/expression/bitfield_enums/TestBitfieldEnums.py
new file mode 100644
index 0000000000000..a484b69300e7b
--- /dev/null
+++ b/lldb/test/API/commands/expression/bitfield_enums/TestBitfieldEnums.py
@@ -0,0 +1,31 @@
+"""
+Test that the expression parser accounts for the underlying type of bitfield
+enums when looking for matching values.
+"""
+
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class TestBitfieldEnum(TestBase):
+ def test_bitfield_enums(self):
+ self.build()
+
+ lldbutil.run_to_source_breakpoint(
+ self, "// break here", lldb.SBFileSpec("main.cpp", False)
+ )
+
+ self.expect_expr(
+ "bfs",
+ result_type="BitfieldStruct",
+ result_children=[
+ ValueCheck(name="signed_min", value="min"),
+ ValueCheck(name="signed_other", value="-1"),
+ ValueCheck(name="signed_max", value="max"),
+ ValueCheck(name="unsigned_min", value="min"),
+ ValueCheck(name="unsigned_other", value="1"),
+ ValueCheck(name="unsigned_max", value="max"),
+ ],
+ )
diff --git a/lldb/test/API/commands/expression/bitfield_enums/main.cpp b/lldb/test/API/commands/expression/bitfield_enums/main.cpp
new file mode 100644
index 0000000000000..f6c53b3100b93
--- /dev/null
+++ b/lldb/test/API/commands/expression/bitfield_enums/main.cpp
@@ -0,0 +1,24 @@
+enum class SignedEnum : int { min = -2, max = 1 };
+enum class UnsignedEnum : unsigned { min = 0, max = 3 };
+
+struct BitfieldStruct {
+ SignedEnum signed_min : 2;
+ SignedEnum signed_other : 2;
+ SignedEnum signed_max : 2;
+ UnsignedEnum unsigned_min : 2;
+ UnsignedEnum unsigned_other : 2;
+ UnsignedEnum unsigned_max : 2;
+};
+
+int main() {
+ BitfieldStruct bfs;
+ bfs.signed_min = SignedEnum::min;
+ bfs.signed_other = static_cast<SignedEnum>(-1);
+ bfs.signed_max = SignedEnum::max;
+
+ bfs.unsigned_min = UnsignedEnum::min;
+ bfs.unsigned_other = static_cast<UnsignedEnum>(1);
+ bfs.unsigned_max = UnsignedEnum::max;
+
+ return 0; // break here
+}
>From 41fddc4ec3302f125a5b84ae86c8027dedc89984 Mon Sep 17 00:00:00 2001
From: David Spickett <david.spickett at linaro.org>
Date: Wed, 3 Jul 2024 14:47:55 +0100
Subject: [PATCH 129/246] [lldb] Print empty enums as if they were unrecognised
normal enums (#97553)
Fixes #97514
Given this example:
```
enum E {};
int main()
{
E x = E(0);
E y = E(1);
E z = E(2);
return 0;
}
```
lldb used to print nothing for `x`, but `0x1` for `y` and `0x2` for `z`.
At first this seemed like the 0 case needed fixing but the real issue
here is that en enum with no enumerators was being detected as a
"bitfield like enum".
Which is an enum where all enumerators are a single bit value, or the
sum of previous single bit values.
For these we do not print anything for a value of 0, as we assume it
must be the remainder after we've printed the other bits that were set
(I think this is also unfortunate, but I'm not addressing that here).
Clearly an enum with no enumerators cannot be being used as a bitfield,
so check that up front and print it as if it's a normal enum where we
didn't match any of the enumerators. This means you now get:
```
(lldb) p x
(E) 0
(lldb) p y
(E) 1
(lldb) p z
(E) 2
```
Which is a change to decimal from hex, but I think it's overall more
consistent. Printing hex here was never a concious decision.
---
.../TypeSystem/Clang/TypeSystemClang.cpp | 33 +++++++++++--------
.../x86/debug-types-missing-signature.test | 4 +--
.../DumpValueObjectOptionsTests.cpp | 28 ++++++++++++----
3 files changed, 43 insertions(+), 22 deletions(-)
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index e0fbb32b30b20..48fc9b199a5e1 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -8656,20 +8656,25 @@ static bool DumpEnumValue(const clang::QualType &qual_type, Stream &s,
// every enumerator is either a one bit value or a superset of the previous
// enumerators. Also 0 doesn't make sense when the enumerators are used as
// flags.
- for (auto *enumerator : enum_decl->enumerators()) {
- llvm::APSInt init_val = enumerator->getInitVal();
- uint64_t val =
- qual_type_is_signed ? init_val.getSExtValue() : init_val.getZExtValue();
- if (qual_type_is_signed)
- val = llvm::SignExtend64(val, 8 * byte_size);
- if (llvm::popcount(val) != 1 && (val & ~covered_bits) != 0)
- can_be_bitfield = false;
- covered_bits |= val;
- ++num_enumerators;
- if (val == enum_svalue) {
- // Found an exact match, that's all we need to do.
- s.PutCString(enumerator->getNameAsString());
- return true;
+ clang::EnumDecl::enumerator_range enumerators = enum_decl->enumerators();
+ if (enumerators.empty())
+ can_be_bitfield = false;
+ else {
+ for (auto *enumerator : enumerators) {
+ llvm::APSInt init_val = enumerator->getInitVal();
+ uint64_t val = qual_type_is_signed ? init_val.getSExtValue()
+ : init_val.getZExtValue();
+ if (qual_type_is_signed)
+ val = llvm::SignExtend64(val, 8 * byte_size);
+ if (llvm::popcount(val) != 1 && (val & ~covered_bits) != 0)
+ can_be_bitfield = false;
+ covered_bits |= val;
+ ++num_enumerators;
+ if (val == enum_svalue) {
+ // Found an exact match, that's all we need to do.
+ s.PutCString(enumerator->getNameAsString());
+ return true;
+ }
}
}
diff --git a/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test b/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test
index 548dd6cdbc275..b2c792ed6003e 100644
--- a/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test
+++ b/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test
@@ -22,5 +22,5 @@ PRINTEC: use of undeclared identifier 'EC'
RUN: %lldb %t -b -o "target variable a e ec" | FileCheck --check-prefix=VARS %s
VARS: (const (unnamed struct)) a = <incomplete type "const (unnamed struct)">
-VARS: (const (unnamed enum)) e = 0x1
-VARS: (const (unnamed enum)) ec = 0x1
+VARS: (const (unnamed enum)) e = 1
+VARS: (const (unnamed enum)) ec = 1
diff --git a/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp b/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
index 6cb982d7f5980..767f19872f858 100644
--- a/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
+++ b/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
@@ -71,12 +71,13 @@ class ValueObjectMockProcessTest : public ::testing::Test {
}
CompilerType
- MakeEnumType(const std::vector<std::pair<const char *, int>> enumerators) {
- CompilerType uint_type = m_type_system->GetBuiltinTypeForEncodingAndBitSize(
- lldb::eEncodingUint, 32);
+ MakeEnumType(const std::vector<std::pair<const char *, int>> enumerators,
+ bool is_signed) {
+ CompilerType int_type = m_type_system->GetBuiltinTypeForEncodingAndBitSize(
+ is_signed ? lldb::eEncodingSint : lldb::eEncodingUint, 32);
CompilerType enum_type = m_type_system->CreateEnumerationType(
"TestEnum", m_type_system->GetTranslationUnitDecl(),
- OptionalClangModuleID(), Declaration(), uint_type, false);
+ OptionalClangModuleID(), Declaration(), int_type, false);
m_type_system->StartTagDeclarationDefinition(enum_type);
Declaration decl;
@@ -123,12 +124,27 @@ class ValueObjectMockProcessTest : public ::testing::Test {
lldb::ProcessSP m_process_sp;
};
+TEST_F(ValueObjectMockProcessTest, EmptyEnum) {
+ // All values of an empty enum should be shown as plain numbers.
+ TestDumpValueObject(MakeEnumType({}, false),
+ {{0, {}, "(TestEnum) test_var = 0\n"},
+ {1, {}, "(TestEnum) test_var = 1\n"},
+ {2, {}, "(TestEnum) test_var = 2\n"}});
+
+ TestDumpValueObject(MakeEnumType({}, true),
+ {{-2, {}, "(TestEnum) test_var = -2\n"},
+ {-1, {}, "(TestEnum) test_var = -1\n"},
+ {0, {}, "(TestEnum) test_var = 0\n"},
+ {1, {}, "(TestEnum) test_var = 1\n"},
+ {2, {}, "(TestEnum) test_var = 2\n"}});
+}
+
TEST_F(ValueObjectMockProcessTest, Enum) {
// This is not a bitfield-like enum, so values are printed as decimal by
// default. Also we only show the enumerator name if the value is an
// exact match.
TestDumpValueObject(
- MakeEnumType({{"test_2", 2}, {"test_3", 3}}),
+ MakeEnumType({{"test_2", 2}, {"test_3", 3}}, false),
{{0, {}, "(TestEnum) test_var = 0\n"},
{1, {}, "(TestEnum) test_var = 1\n"},
{2, {}, "(TestEnum) test_var = test_2\n"},
@@ -152,7 +168,7 @@ TEST_F(ValueObjectMockProcessTest, BitFieldLikeEnum) {
// as hex, a value of 0 shows nothing, and values with no exact enumerator are
// shown as combinations of the other values.
TestDumpValueObject(
- MakeEnumType({{"test_2", 2}, {"test_4", 4}}),
+ MakeEnumType({{"test_2", 2}, {"test_4", 4}}, false),
{
{0, {}, "(TestEnum) test_var =\n"},
{1, {}, "(TestEnum) test_var = 0x1\n"},
>From 31015240d366e4bf6f114856caa6e9ce90742b7f Mon Sep 17 00:00:00 2001
From: David Spickett <david.spickett at linaro.org>
Date: Wed, 3 Jul 2024 14:48:48 +0100
Subject: [PATCH 130/246] [lldb] Print "0x0" for bitfield like enums where the
value is 0 (#97557)
Enums like this one are treated as bitfield like enums: enum FlagsLike
{B=2, C=4};
lldb recognises them as collections of flags, so you can have "B | C".
If there's any values not covered that's printed as hex "B | C | 0x1".
What happened if the value was 0 was we would not match any of the
enumerators, then the remainder check requires that the remainder is
non-zero. So lldb would print nothing at all.
Which I assume is a bug because knowing that no flags are set is useful,
just as much as knowing that some unkown bit was set to make it
non-zero.
---
.../Plugins/TypeSystem/Clang/TypeSystemClang.cpp | 10 +++++++++-
.../ValueObject/DumpValueObjectOptionsTests.cpp | 6 +++---
2 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index 48fc9b199a5e1..f70efe5ed57e4 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -8693,6 +8693,13 @@ static bool DumpEnumValue(const clang::QualType &qual_type, Stream &s,
return true;
}
+ if (!enum_uvalue) {
+ // This is a bitfield enum, but the value is 0 so we know it won't match
+ // with any of the enumerators.
+ s.Printf("0x%" PRIx64, enum_uvalue);
+ return true;
+ }
+
uint64_t remaining_value = enum_uvalue;
std::vector<std::pair<uint64_t, llvm::StringRef>> values;
values.reserve(num_enumerators);
@@ -8717,7 +8724,8 @@ static bool DumpEnumValue(const clang::QualType &qual_type, Stream &s,
s.PutCString(" | ");
}
- // If there is a remainder that is not covered by the value, print it as hex.
+ // If there is a remainder that is not covered by the value, print it as
+ // hex.
if (remaining_value)
s.Printf("0x%" PRIx64, remaining_value);
diff --git a/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp b/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
index 767f19872f858..af6fa55bab171 100644
--- a/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
+++ b/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
@@ -165,12 +165,12 @@ TEST_F(ValueObjectMockProcessTest, Enum) {
TEST_F(ValueObjectMockProcessTest, BitFieldLikeEnum) {
// These enumerators set individual bits in the value, as if it were a flag
// set. lldb treats this as a "bitfield like enum". This means we show values
- // as hex, a value of 0 shows nothing, and values with no exact enumerator are
- // shown as combinations of the other values.
+ // as hex, and values without exact matches are shown as a combination of
+ // enumerators and any remaining value left over.
TestDumpValueObject(
MakeEnumType({{"test_2", 2}, {"test_4", 4}}, false),
{
- {0, {}, "(TestEnum) test_var =\n"},
+ {0, {}, "(TestEnum) test_var = 0x0\n"},
{1, {}, "(TestEnum) test_var = 0x1\n"},
{2, {}, "(TestEnum) test_var = test_2\n"},
{4, {}, "(TestEnum) test_var = test_4\n"},
>From 50c81128de8616117118564eff22cf508cba7848 Mon Sep 17 00:00:00 2001
From: Aaron Ballman <aaron at aaronballman.com>
Date: Wed, 3 Jul 2024 10:11:31 -0400
Subject: [PATCH 131/246] Rename C2x to C23; NFC
This renames the test directory for C23 tests.
---
clang/test/C/{C2x => C23}/Inputs/bits.bin | 0
clang/test/C/{C2x => C23}/Inputs/boop.h | 0
clang/test/C/{C2x => C23}/Inputs/i.dat | 0
clang/test/C/{C2x => C23}/Inputs/jump.wav | 0
clang/test/C/{C2x => C23}/Inputs/s.dat | 0
clang/test/C/{C2x => C23}/n2322.c | 0
clang/test/C/{C2x => C23}/n2350.c | 0
clang/test/C/{C2x => C23}/n2359.c | 0
clang/test/C/{C2x => C23}/n2508.c | 0
clang/test/C/{C2x => C23}/n2549.c | 0
clang/test/C/{C2x => C23}/n2607.c | 0
clang/test/C/{C2x => C23}/n2670.c | 0
clang/test/C/{C2x => C23}/n2683.c | 0
clang/test/C/{C2x => C23}/n2683_2.c | 0
clang/test/C/{C2x => C23}/n2819.c | 0
clang/test/C/{C2x => C23}/n2826.c | 0
clang/test/C/{C2x => C23}/n2836_n2939.c | 0
clang/test/C/{C2x => C23}/n2838.c | 0
clang/test/C/{C2x => C23}/n2886.c | 0
clang/test/C/{C2x => C23}/n2900_n3011.c | 0
clang/test/C/{C2x => C23}/n2900_n3011_2.c | 0
clang/test/C/{C2x => C23}/n2927.c | 0
clang/test/C/{C2x => C23}/n2927_2.c | 0
clang/test/C/{C2x => C23}/n2930.c | 0
clang/test/C/{C2x => C23}/n2934.c | 0
clang/test/C/{C2x => C23}/n2940.c | 0
clang/test/C/{C2x => C23}/n2975.c | 0
clang/test/C/{C2x => C23}/n3007.c | 0
clang/test/C/{C2x => C23}/n3017.c | 0
clang/test/C/{C2x => C23}/n3018.c | 0
clang/test/C/{C2x => C23}/n3033.c | 0
clang/test/C/{C2x => C23}/n3033_2.c | 0
clang/test/C/{C2x => C23}/n3035.c | 0
clang/test/C/{C2x => C23}/n3042.c | 0
34 files changed, 0 insertions(+), 0 deletions(-)
rename clang/test/C/{C2x => C23}/Inputs/bits.bin (100%)
rename clang/test/C/{C2x => C23}/Inputs/boop.h (100%)
rename clang/test/C/{C2x => C23}/Inputs/i.dat (100%)
rename clang/test/C/{C2x => C23}/Inputs/jump.wav (100%)
rename clang/test/C/{C2x => C23}/Inputs/s.dat (100%)
rename clang/test/C/{C2x => C23}/n2322.c (100%)
rename clang/test/C/{C2x => C23}/n2350.c (100%)
rename clang/test/C/{C2x => C23}/n2359.c (100%)
rename clang/test/C/{C2x => C23}/n2508.c (100%)
rename clang/test/C/{C2x => C23}/n2549.c (100%)
rename clang/test/C/{C2x => C23}/n2607.c (100%)
rename clang/test/C/{C2x => C23}/n2670.c (100%)
rename clang/test/C/{C2x => C23}/n2683.c (100%)
rename clang/test/C/{C2x => C23}/n2683_2.c (100%)
rename clang/test/C/{C2x => C23}/n2819.c (100%)
rename clang/test/C/{C2x => C23}/n2826.c (100%)
rename clang/test/C/{C2x => C23}/n2836_n2939.c (100%)
rename clang/test/C/{C2x => C23}/n2838.c (100%)
rename clang/test/C/{C2x => C23}/n2886.c (100%)
rename clang/test/C/{C2x => C23}/n2900_n3011.c (100%)
rename clang/test/C/{C2x => C23}/n2900_n3011_2.c (100%)
rename clang/test/C/{C2x => C23}/n2927.c (100%)
rename clang/test/C/{C2x => C23}/n2927_2.c (100%)
rename clang/test/C/{C2x => C23}/n2930.c (100%)
rename clang/test/C/{C2x => C23}/n2934.c (100%)
rename clang/test/C/{C2x => C23}/n2940.c (100%)
rename clang/test/C/{C2x => C23}/n2975.c (100%)
rename clang/test/C/{C2x => C23}/n3007.c (100%)
rename clang/test/C/{C2x => C23}/n3017.c (100%)
rename clang/test/C/{C2x => C23}/n3018.c (100%)
rename clang/test/C/{C2x => C23}/n3033.c (100%)
rename clang/test/C/{C2x => C23}/n3033_2.c (100%)
rename clang/test/C/{C2x => C23}/n3035.c (100%)
rename clang/test/C/{C2x => C23}/n3042.c (100%)
diff --git a/clang/test/C/C2x/Inputs/bits.bin b/clang/test/C/C23/Inputs/bits.bin
similarity index 100%
rename from clang/test/C/C2x/Inputs/bits.bin
rename to clang/test/C/C23/Inputs/bits.bin
diff --git a/clang/test/C/C2x/Inputs/boop.h b/clang/test/C/C23/Inputs/boop.h
similarity index 100%
rename from clang/test/C/C2x/Inputs/boop.h
rename to clang/test/C/C23/Inputs/boop.h
diff --git a/clang/test/C/C2x/Inputs/i.dat b/clang/test/C/C23/Inputs/i.dat
similarity index 100%
rename from clang/test/C/C2x/Inputs/i.dat
rename to clang/test/C/C23/Inputs/i.dat
diff --git a/clang/test/C/C2x/Inputs/jump.wav b/clang/test/C/C23/Inputs/jump.wav
similarity index 100%
rename from clang/test/C/C2x/Inputs/jump.wav
rename to clang/test/C/C23/Inputs/jump.wav
diff --git a/clang/test/C/C2x/Inputs/s.dat b/clang/test/C/C23/Inputs/s.dat
similarity index 100%
rename from clang/test/C/C2x/Inputs/s.dat
rename to clang/test/C/C23/Inputs/s.dat
diff --git a/clang/test/C/C2x/n2322.c b/clang/test/C/C23/n2322.c
similarity index 100%
rename from clang/test/C/C2x/n2322.c
rename to clang/test/C/C23/n2322.c
diff --git a/clang/test/C/C2x/n2350.c b/clang/test/C/C23/n2350.c
similarity index 100%
rename from clang/test/C/C2x/n2350.c
rename to clang/test/C/C23/n2350.c
diff --git a/clang/test/C/C2x/n2359.c b/clang/test/C/C23/n2359.c
similarity index 100%
rename from clang/test/C/C2x/n2359.c
rename to clang/test/C/C23/n2359.c
diff --git a/clang/test/C/C2x/n2508.c b/clang/test/C/C23/n2508.c
similarity index 100%
rename from clang/test/C/C2x/n2508.c
rename to clang/test/C/C23/n2508.c
diff --git a/clang/test/C/C2x/n2549.c b/clang/test/C/C23/n2549.c
similarity index 100%
rename from clang/test/C/C2x/n2549.c
rename to clang/test/C/C23/n2549.c
diff --git a/clang/test/C/C2x/n2607.c b/clang/test/C/C23/n2607.c
similarity index 100%
rename from clang/test/C/C2x/n2607.c
rename to clang/test/C/C23/n2607.c
diff --git a/clang/test/C/C2x/n2670.c b/clang/test/C/C23/n2670.c
similarity index 100%
rename from clang/test/C/C2x/n2670.c
rename to clang/test/C/C23/n2670.c
diff --git a/clang/test/C/C2x/n2683.c b/clang/test/C/C23/n2683.c
similarity index 100%
rename from clang/test/C/C2x/n2683.c
rename to clang/test/C/C23/n2683.c
diff --git a/clang/test/C/C2x/n2683_2.c b/clang/test/C/C23/n2683_2.c
similarity index 100%
rename from clang/test/C/C2x/n2683_2.c
rename to clang/test/C/C23/n2683_2.c
diff --git a/clang/test/C/C2x/n2819.c b/clang/test/C/C23/n2819.c
similarity index 100%
rename from clang/test/C/C2x/n2819.c
rename to clang/test/C/C23/n2819.c
diff --git a/clang/test/C/C2x/n2826.c b/clang/test/C/C23/n2826.c
similarity index 100%
rename from clang/test/C/C2x/n2826.c
rename to clang/test/C/C23/n2826.c
diff --git a/clang/test/C/C2x/n2836_n2939.c b/clang/test/C/C23/n2836_n2939.c
similarity index 100%
rename from clang/test/C/C2x/n2836_n2939.c
rename to clang/test/C/C23/n2836_n2939.c
diff --git a/clang/test/C/C2x/n2838.c b/clang/test/C/C23/n2838.c
similarity index 100%
rename from clang/test/C/C2x/n2838.c
rename to clang/test/C/C23/n2838.c
diff --git a/clang/test/C/C2x/n2886.c b/clang/test/C/C23/n2886.c
similarity index 100%
rename from clang/test/C/C2x/n2886.c
rename to clang/test/C/C23/n2886.c
diff --git a/clang/test/C/C2x/n2900_n3011.c b/clang/test/C/C23/n2900_n3011.c
similarity index 100%
rename from clang/test/C/C2x/n2900_n3011.c
rename to clang/test/C/C23/n2900_n3011.c
diff --git a/clang/test/C/C2x/n2900_n3011_2.c b/clang/test/C/C23/n2900_n3011_2.c
similarity index 100%
rename from clang/test/C/C2x/n2900_n3011_2.c
rename to clang/test/C/C23/n2900_n3011_2.c
diff --git a/clang/test/C/C2x/n2927.c b/clang/test/C/C23/n2927.c
similarity index 100%
rename from clang/test/C/C2x/n2927.c
rename to clang/test/C/C23/n2927.c
diff --git a/clang/test/C/C2x/n2927_2.c b/clang/test/C/C23/n2927_2.c
similarity index 100%
rename from clang/test/C/C2x/n2927_2.c
rename to clang/test/C/C23/n2927_2.c
diff --git a/clang/test/C/C2x/n2930.c b/clang/test/C/C23/n2930.c
similarity index 100%
rename from clang/test/C/C2x/n2930.c
rename to clang/test/C/C23/n2930.c
diff --git a/clang/test/C/C2x/n2934.c b/clang/test/C/C23/n2934.c
similarity index 100%
rename from clang/test/C/C2x/n2934.c
rename to clang/test/C/C23/n2934.c
diff --git a/clang/test/C/C2x/n2940.c b/clang/test/C/C23/n2940.c
similarity index 100%
rename from clang/test/C/C2x/n2940.c
rename to clang/test/C/C23/n2940.c
diff --git a/clang/test/C/C2x/n2975.c b/clang/test/C/C23/n2975.c
similarity index 100%
rename from clang/test/C/C2x/n2975.c
rename to clang/test/C/C23/n2975.c
diff --git a/clang/test/C/C2x/n3007.c b/clang/test/C/C23/n3007.c
similarity index 100%
rename from clang/test/C/C2x/n3007.c
rename to clang/test/C/C23/n3007.c
diff --git a/clang/test/C/C2x/n3017.c b/clang/test/C/C23/n3017.c
similarity index 100%
rename from clang/test/C/C2x/n3017.c
rename to clang/test/C/C23/n3017.c
diff --git a/clang/test/C/C2x/n3018.c b/clang/test/C/C23/n3018.c
similarity index 100%
rename from clang/test/C/C2x/n3018.c
rename to clang/test/C/C23/n3018.c
diff --git a/clang/test/C/C2x/n3033.c b/clang/test/C/C23/n3033.c
similarity index 100%
rename from clang/test/C/C2x/n3033.c
rename to clang/test/C/C23/n3033.c
diff --git a/clang/test/C/C2x/n3033_2.c b/clang/test/C/C23/n3033_2.c
similarity index 100%
rename from clang/test/C/C2x/n3033_2.c
rename to clang/test/C/C23/n3033_2.c
diff --git a/clang/test/C/C2x/n3035.c b/clang/test/C/C23/n3035.c
similarity index 100%
rename from clang/test/C/C2x/n3035.c
rename to clang/test/C/C23/n3035.c
diff --git a/clang/test/C/C2x/n3042.c b/clang/test/C/C23/n3042.c
similarity index 100%
rename from clang/test/C/C2x/n3042.c
rename to clang/test/C/C23/n3042.c
>From 495452e7da8fad7bd311fd041530d72c101da643 Mon Sep 17 00:00:00 2001
From: Izaak Schroeder <izaak.schroeder at gmail.com>
Date: Wed, 3 Jul 2024 07:25:43 -0700
Subject: [PATCH 132/246] [libunwind] Remove needless `sys/uio.h` (#97495)
No reference to `readv` or `writev`. This makes `libcxx` happy when
compiling against clang's `libc` as part of
https://github.com/llvm/llvm-project/issues/97191.
---
libunwind/src/UnwindCursor.hpp | 1 -
1 file changed, 1 deletion(-)
diff --git a/libunwind/src/UnwindCursor.hpp b/libunwind/src/UnwindCursor.hpp
index 66fe8e2a32cca..677e842d8a22b 100644
--- a/libunwind/src/UnwindCursor.hpp
+++ b/libunwind/src/UnwindCursor.hpp
@@ -36,7 +36,6 @@
#include <errno.h>
#include <signal.h>
#include <sys/syscall.h>
-#include <sys/uio.h>
#include <unistd.h>
#define _LIBUNWIND_CHECK_LINUX_SIGRETURN 1
#endif
>From 9ce895c1ae12ebed4249f2d693dcbff146193c60 Mon Sep 17 00:00:00 2001
From: Nikolas Klauser <nikolasklauser at berlin.de>
Date: Wed, 3 Jul 2024 16:29:58 +0200
Subject: [PATCH 133/246] [libc++] Use GCC compilers from compiler explorer
(#97219)
This avoids the very recent system requirements for new GCC versions,
making it a lot easier to update GCC in a timely manner.
---
libcxx/utils/ci/Dockerfile | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/libcxx/utils/ci/Dockerfile b/libcxx/utils/ci/Dockerfile
index 234b0b5873eb6..9e1865ee61fdf 100644
--- a/libcxx/utils/ci/Dockerfile
+++ b/libcxx/utils/ci/Dockerfile
@@ -99,9 +99,9 @@ RUN sudo apt-get update \
unzip \
uuid-dev \
wget \
+ xz-utils \
&& sudo rm -rf /var/lib/apt/lists/*
-
# Install various tools used by the build or the test suite
#RUN apt-get update && apt-get install -y ninja-build python3 python3-distutils python3-psutil git gdb ccache
# TODO add ninja-build once 1.11 is available in Ubuntu, also remove the manual installation.
@@ -142,13 +142,15 @@ EOF
# Install the most recent GCC, like clang install the previous version as a transition.
RUN <<EOF
- sudo apt-get update
- sudo apt-get install -y \
- gcc-$((GCC_LATEST_VERSION - 1)) \
- g++-$((GCC_LATEST_VERSION - 1)) \
- gcc-$GCC_LATEST_VERSION \
- g++-$GCC_LATEST_VERSION
- sudo rm -rf /var/lib/apt/lists/*
+ sudo git clone https://github.com/compiler-explorer/infra.git /tmp/ce-infra
+ (cd /tmp/ce-infra && sudo make ce)
+ sudo /tmp/ce-infra/bin/ce_install install compilers/c++/x86/gcc $GCC_LATEST_VERSION.1.0
+ sudo /tmp/ce-infra/bin/ce_install install compilers/c++/x86/gcc $((GCC_LATEST_VERSION - 1)).1.0
+ sudo ln -s /opt/compiler-explorer/gcc-$GCC_LATEST_VERSION.1.0/bin/gcc /usr/bin/gcc-$GCC_LATEST_VERSION
+ sudo ln -s /opt/compiler-explorer/gcc-$GCC_LATEST_VERSION.1.0/bin/g++ /usr/bin/g++-$GCC_LATEST_VERSION
+ sudo ln -s /opt/compiler-explorer/gcc-$((GCC_LATEST_VERSION - 1)).1.0/bin/gcc /usr/bin/gcc-$((GCC_LATEST_VERSION - 1))
+ sudo ln -s /opt/compiler-explorer/gcc-$((GCC_LATEST_VERSION - 1)).1.0/bin/g++ /usr/bin/g++-$((GCC_LATEST_VERSION - 1))
+ sudo rm -rf /tmp/ce-infra
EOF
RUN <<EOF
>From 7bb251a91a4f57aed458aa0572c135b5374cd2f2 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 3 Jul 2024 15:24:11 +0100
Subject: [PATCH 134/246] [InstCombine][X86] Add constant folding for
PMULH/PMULHU/PMULHRS intrinsics
---
.../Target/X86/X86InstCombineIntrinsic.cpp | 47 +++++++++++++++++--
.../Transforms/InstCombine/X86/x86-pmulh.ll | 9 ++--
.../Transforms/InstCombine/X86/x86-pmulhrs.ll | 9 ++--
.../Transforms/InstCombine/X86/x86-pmulhu.ll | 9 ++--
4 files changed, 51 insertions(+), 23 deletions(-)
diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
index 7ac149852be97..6d4734d477b3e 100644
--- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
@@ -503,13 +503,15 @@ static Value *simplifyX86pack(IntrinsicInst &II,
}
static Value *simplifyX86pmulh(IntrinsicInst &II,
- InstCombiner::BuilderTy &Builder) {
+ InstCombiner::BuilderTy &Builder, bool IsSigned,
+ bool IsRounding) {
Value *Arg0 = II.getArgOperand(0);
Value *Arg1 = II.getArgOperand(1);
auto *ResTy = cast<FixedVectorType>(II.getType());
- [[maybe_unused]] auto *ArgTy = cast<FixedVectorType>(Arg0->getType());
+ auto *ArgTy = cast<FixedVectorType>(Arg0->getType());
assert(ArgTy == ResTy && ResTy->getScalarSizeInBits() == 16 &&
"Unexpected PMULH types");
+ assert((!IsRounding || IsSigned) && "PMULHRS instruction must be signed");
// Multiply by undef -> zero (NOT undef!) as other arg could still be zero.
if (isa<UndefValue>(Arg0) || isa<UndefValue>(Arg1))
@@ -519,8 +521,33 @@ static Value *simplifyX86pmulh(IntrinsicInst &II,
if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1))
return ConstantAggregateZero::get(ResTy);
- // TODO: Constant folding.
- return nullptr;
+ // Constant folding.
+ if (!isa<Constant>(Arg0) || !isa<Constant>(Arg1))
+ return nullptr;
+
+ // Extend to twice the width and multiply.
+ auto Cast =
+ IsSigned ? Instruction::CastOps::SExt : Instruction::CastOps::ZExt;
+ auto *ExtTy = FixedVectorType::getExtendedElementVectorType(ArgTy);
+ Value *LHS = Builder.CreateCast(Cast, Arg0, ExtTy);
+ Value *RHS = Builder.CreateCast(Cast, Arg1, ExtTy);
+ Value *Mul = Builder.CreateMul(LHS, RHS);
+
+ if (IsRounding) {
+ // PMULHRSW: truncate to vXi18 of the most significant bits, add one and
+ // extract bits[16:1].
+ auto *RndEltTy = IntegerType::get(ExtTy->getContext(), 18);
+ auto *RndTy = FixedVectorType::get(RndEltTy, ExtTy);
+ Mul = Builder.CreateLShr(Mul, 14);
+ Mul = Builder.CreateTrunc(Mul, RndTy);
+ Mul = Builder.CreateAdd(Mul, ConstantInt::get(RndTy, 1));
+ Mul = Builder.CreateLShr(Mul, 1);
+ } else {
+ // PMULH/PMULHU: extract the vXi16 most significant bits.
+ Mul = Builder.CreateLShr(Mul, 16);
+ }
+
+ return Builder.CreateTrunc(Mul, ResTy);
}
static Value *simplifyX86pmadd(IntrinsicInst &II,
@@ -2592,13 +2619,23 @@ X86TTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
case Intrinsic::x86_sse2_pmulh_w:
case Intrinsic::x86_avx2_pmulh_w:
case Intrinsic::x86_avx512_pmulh_w_512:
+ if (Value *V = simplifyX86pmulh(II, IC.Builder, true, false)) {
+ return IC.replaceInstUsesWith(II, V);
+ }
+ break;
+
case Intrinsic::x86_sse2_pmulhu_w:
case Intrinsic::x86_avx2_pmulhu_w:
case Intrinsic::x86_avx512_pmulhu_w_512:
+ if (Value *V = simplifyX86pmulh(II, IC.Builder, false, false)) {
+ return IC.replaceInstUsesWith(II, V);
+ }
+ break;
+
case Intrinsic::x86_ssse3_pmul_hr_sw_128:
case Intrinsic::x86_avx2_pmul_hr_sw:
case Intrinsic::x86_avx512_pmul_hr_sw_512:
- if (Value *V = simplifyX86pmulh(II, IC.Builder)) {
+ if (Value *V = simplifyX86pmulh(II, IC.Builder, true, true)) {
return IC.replaceInstUsesWith(II, V);
}
break;
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
index d6a06e7d08358..53b15383aec9a 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
@@ -111,8 +111,7 @@ define <32 x i16> @zero_pmulh_512_commute(<32 x i16> %a0) {
define <8 x i16> @fold_pmulh_128() {
; CHECK-LABEL: @fold_pmulh_128(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> <i16 0, i16 0, i16 -2, i16 -2, i16 0, i16 -1, i16 -4, i16 -4>
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
ret <8 x i16> %1
@@ -120,8 +119,7 @@ define <8 x i16> @fold_pmulh_128() {
define <16 x i16> @fold_pmulh_256() {
; CHECK-LABEL: @fold_pmulh_256(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> <i16 0, i16 -1, i16 -1, i16 1, i16 0, i16 0, i16 -3, i16 3, i16 -1, i16 -1, i16 4, i16 5, i16 -1, i16 -1, i16 6, i16 -8>
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
ret <16 x i16> %1
@@ -129,8 +127,7 @@ define <16 x i16> @fold_pmulh_256() {
define <32 x i16> @fold_pmulh_512() {
; CHECK-LABEL: @fold_pmulh_512(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> <i16 0, i16 -1, i16 -1, i16 1, i16 0, i16 0, i16 -3, i16 3, i16 -1, i16 -1, i16 4, i16 5, i16 -1, i16 -1, i16 6, i16 -8, i16 0, i16 -1, i16 -1, i16 1, i16 0, i16 0, i16 -3, i16 3, i16 -1, i16 -1, i16 4, i16 5, i16 -1, i16 -1, i16 6, i16 -8>
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
ret <32 x i16> %1
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
index 2c42534cae8b1..acc3fd0803365 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
@@ -111,8 +111,7 @@ define <32 x i16> @zero_pmulh_512_commute(<32 x i16> %a0) {
define <8 x i16> @fold_pmulh_128() {
; CHECK-LABEL: @fold_pmulh_128(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> <i16 0, i16 0, i16 -3, i16 -4, i16 0, i16 0, i16 -7, i16 -8>
;
%1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
ret <8 x i16> %1
@@ -120,8 +119,7 @@ define <8 x i16> @fold_pmulh_128() {
define <16 x i16> @fold_pmulh_256() {
; CHECK-LABEL: @fold_pmulh_256(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> <i16 0, i16 0, i16 -2, i16 3, i16 0, i16 0, i16 -6, i16 7, i16 0, i16 0, i16 10, i16 11, i16 0, i16 0, i16 14, i16 -15>
;
%1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
ret <16 x i16> %1
@@ -129,8 +127,7 @@ define <16 x i16> @fold_pmulh_256() {
define <32 x i16> @fold_pmulh_512() {
; CHECK-LABEL: @fold_pmulh_512(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> <i16 0, i16 0, i16 -2, i16 3, i16 0, i16 0, i16 -6, i16 7, i16 0, i16 0, i16 10, i16 11, i16 0, i16 0, i16 14, i16 -15, i16 0, i16 0, i16 -2, i16 3, i16 0, i16 0, i16 -6, i16 7, i16 0, i16 0, i16 10, i16 11, i16 0, i16 0, i16 14, i16 -15>
;
%1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
ret <32 x i16> %1
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
index 81b890b7df6e6..52945ce82a183 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
@@ -111,8 +111,7 @@ define <32 x i16> @zero_pmulhu_512_commute(<32 x i16> %a0) {
define <8 x i16> @fold_pmulhu_128() {
; CHECK-LABEL: @fold_pmulhu_128(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> <i16 -6, i16 0, i16 1, i16 32763, i16 -14, i16 5, i16 3, i16 32757>
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> <i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8>, <8 x i16> <i16 -5, i16 7, i16 -32768, i16 32765, i16 -9, i16 -11, i16 -32763, i16 32761>)
ret <8 x i16> %1
@@ -120,8 +119,7 @@ define <8 x i16> @fold_pmulhu_128() {
define <16 x i16> @fold_pmulhu_256() {
; CHECK-LABEL: @fold_pmulhu_256(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> <i16 0, i16 6, i16 1, i16 1, i16 -13, i16 -16, i16 3, i16 3, i16 12, i16 8, i16 -32766, i16 5, i16 16, i16 12, i16 -32764, i16 32748>
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>, <16 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>)
ret <16 x i16> %1
@@ -129,8 +127,7 @@ define <16 x i16> @fold_pmulhu_256() {
define <32 x i16> @fold_pmulhu_512() {
; CHECK-LABEL: @fold_pmulhu_512(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> <i16 0, i16 6, i16 1, i16 1, i16 -13, i16 -16, i16 3, i16 3, i16 12, i16 8, i16 -32766, i16 5, i16 16, i16 12, i16 -32764, i16 32748, i16 0, i16 6, i16 1, i16 1, i16 -13, i16 -16, i16 3, i16 3, i16 12, i16 8, i16 -32766, i16 5, i16 16, i16 12, i16 -32764, i16 32748>
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> <i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15, i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756>, <32 x i16> <i16 -5, i16 7, i16 -32768, i16 32766, i16 -9, i16 -11, i16 -32764, i16 32762, i16 13, i16 -15, i16 -32760, i16 32758, i16 17, i16 -19, i16 -32756, i16 32756, i16 0, i16 -1, i16 2, i16 3, i16 -4, i16 -5, i16 6, i16 7, i16 -8, i16 9, i16 -10, i16 11, i16 -12, i16 13, i16 -14, i16 -15>)
ret <32 x i16> %1
>From aa3c84c85c8c2ef5b8665932e7934458a44504ce Mon Sep 17 00:00:00 2001
From: Aaron Ballman <aaron at aaronballman.com>
Date: Wed, 3 Jul 2024 10:35:36 -0400
Subject: [PATCH 135/246] Clang 19 isn't yet released; NFC
...just updates the colors in the table.
---
clang/www/cxx_status.html | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/www/cxx_status.html b/clang/www/cxx_status.html
index 8f21a7c9f8737..f12ce38ba3d79 100755
--- a/clang/www/cxx_status.html
+++ b/clang/www/cxx_status.html
@@ -564,7 +564,7 @@ <h2 id="cxx20">C++20 implementation status</h2>
</tr>
<tr> <!-- from Cologne -->
<td><a href="https://wg21.link/p0848r3">P0848R3</a></td>
- <td rowspan="1" class="full" align="center">Clang 19</td>
+ <td rowspan="1" class="unreleased" align="center">Clang 19</td>
</tr>
<tr>
<td><a href="https://wg21.link/p1616r1">P1616R1</a></td>
>From 54aa1d28b6a26b4980df4d5448fb64d19dc1a100 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Timm=20B=C3=A4der?= <tbaeder at redhat.com>
Date: Wed, 3 Jul 2024 15:34:16 +0200
Subject: [PATCH 136/246] [clang][Interp] Fix initializing atomic record types
Remove the atomic type when visiting InitListExprs.
---
clang/lib/AST/Interp/Compiler.cpp | 27 ++++++++++++++++-----------
clang/test/CodeGenCXX/atomicinit.cpp | 1 +
2 files changed, 17 insertions(+), 11 deletions(-)
diff --git a/clang/lib/AST/Interp/Compiler.cpp b/clang/lib/AST/Interp/Compiler.cpp
index 9ca71e0496989..775cabf7f8c59 100644
--- a/clang/lib/AST/Interp/Compiler.cpp
+++ b/clang/lib/AST/Interp/Compiler.cpp
@@ -1285,7 +1285,13 @@ bool Compiler<Emitter>::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
template <class Emitter>
bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
const Expr *ArrayFiller, const Expr *E) {
- if (E->getType()->isVoidType())
+
+ QualType QT = E->getType();
+
+ if (const auto *AT = QT->getAs<AtomicType>())
+ QT = AT->getValueType();
+
+ if (QT->isVoidType())
return this->emitInvalid(E);
// Handle discarding first.
@@ -1298,17 +1304,16 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
}
// Primitive values.
- if (std::optional<PrimType> T = classify(E->getType())) {
+ if (std::optional<PrimType> T = classify(QT)) {
assert(!DiscardResult);
if (Inits.size() == 0)
- return this->visitZeroInitializer(*T, E->getType(), E);
+ return this->visitZeroInitializer(*T, QT, E);
assert(Inits.size() == 1);
return this->delegate(Inits[0]);
}
- QualType T = E->getType();
- if (T->isRecordType()) {
- const Record *R = getRecord(E->getType());
+ if (QT->isRecordType()) {
+ const Record *R = getRecord(QT);
if (Inits.size() == 1 && E->getType() == Inits[0]->getType())
return this->delegate(Inits[0]);
@@ -1405,8 +1410,8 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
return this->emitFinishInit(E);
}
- if (T->isArrayType()) {
- if (Inits.size() == 1 && E->getType() == Inits[0]->getType())
+ if (QT->isArrayType()) {
+ if (Inits.size() == 1 && QT == Inits[0]->getType())
return this->delegate(Inits[0]);
unsigned ElementIndex = 0;
@@ -1438,7 +1443,7 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
// FIXME: This should go away.
if (ArrayFiller) {
const ConstantArrayType *CAT =
- Ctx.getASTContext().getAsConstantArrayType(E->getType());
+ Ctx.getASTContext().getAsConstantArrayType(QT);
uint64_t NumElems = CAT->getZExtSize();
for (; ElementIndex != NumElems; ++ElementIndex) {
@@ -1450,7 +1455,7 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
return this->emitFinishInit(E);
}
- if (const auto *ComplexTy = E->getType()->getAs<ComplexType>()) {
+ if (const auto *ComplexTy = QT->getAs<ComplexType>()) {
unsigned NumInits = Inits.size();
if (NumInits == 1)
@@ -1480,7 +1485,7 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
return true;
}
- if (const auto *VecT = E->getType()->getAs<VectorType>()) {
+ if (const auto *VecT = QT->getAs<VectorType>()) {
unsigned NumVecElements = VecT->getNumElements();
assert(NumVecElements >= Inits.size());
diff --git a/clang/test/CodeGenCXX/atomicinit.cpp b/clang/test/CodeGenCXX/atomicinit.cpp
index a568f17b90d0c..b507a22e84bc1 100644
--- a/clang/test/CodeGenCXX/atomicinit.cpp
+++ b/clang/test/CodeGenCXX/atomicinit.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -fno-inline-functions %s -emit-llvm -O1 -o - -triple=i686-apple-darwin9 -std=c++11 | FileCheck %s
+// RUN: %clang_cc1 -fno-inline-functions %s -emit-llvm -O1 -o - -triple=i686-apple-darwin9 -std=c++11 -fexperimental-new-constant-interpreter | FileCheck %s
// CHECK-DAG: @PR22043 ={{.*}} local_unnamed_addr global i32 0, align 4
typedef _Atomic(int) AtomicInt;
>From 0865b782f62fadcdb82e5bd7be432c96109228d0 Mon Sep 17 00:00:00 2001
From: "A. Jiang" <de34 at live.cn>
Date: Wed, 3 Jul 2024 22:51:23 +0800
Subject: [PATCH 137/246] [libc++] Fix the signatures of
`std::rethrow_if_nested` (#91365)
Fixes #54470.
See https://eel.is/c++draft/global.functions#2:
> A call to a non-member function signature described in [support]
> through [thread] and [depr] shall behave as if the implementation
> declared no additional non-member function signatures.
and https://eel.is/c++draft/global.functions#3:
> An implementation shall not declare a non-member function signature
> with additional default arguments.
---
libcxx/include/__exception/nested_exception.h | 10 +++----
.../except.nested/rethrow_if_nested.pass.cpp | 27 +++++++++++++++++++
2 files changed, 31 insertions(+), 6 deletions(-)
diff --git a/libcxx/include/__exception/nested_exception.h b/libcxx/include/__exception/nested_exception.h
index 1bf2df939258a..feb489f87f62f 100644
--- a/libcxx/include/__exception/nested_exception.h
+++ b/libcxx/include/__exception/nested_exception.h
@@ -84,17 +84,15 @@ struct __can_dynamic_cast
: _BoolConstant< is_polymorphic<_From>::value &&
(!is_base_of<_To, _From>::value || is_convertible<const _From*, const _To*>::value)> {};
-template <class _Ep>
-inline _LIBCPP_HIDE_FROM_ABI void
-rethrow_if_nested(const _Ep& __e, __enable_if_t< __can_dynamic_cast<_Ep, nested_exception>::value>* = 0) {
+template <class _Ep, __enable_if_t< __can_dynamic_cast<_Ep, nested_exception>::value, int> = 0>
+inline _LIBCPP_HIDE_FROM_ABI void rethrow_if_nested(const _Ep& __e) {
const nested_exception* __nep = dynamic_cast<const nested_exception*>(std::addressof(__e));
if (__nep)
__nep->rethrow_nested();
}
-template <class _Ep>
-inline _LIBCPP_HIDE_FROM_ABI void
-rethrow_if_nested(const _Ep&, __enable_if_t<!__can_dynamic_cast<_Ep, nested_exception>::value>* = 0) {}
+template <class _Ep, __enable_if_t<!__can_dynamic_cast<_Ep, nested_exception>::value, int> = 0>
+inline _LIBCPP_HIDE_FROM_ABI void rethrow_if_nested(const _Ep&) {}
} // namespace std
diff --git a/libcxx/test/std/language.support/support.exception/except.nested/rethrow_if_nested.pass.cpp b/libcxx/test/std/language.support/support.exception/except.nested/rethrow_if_nested.pass.cpp
index 39bf62b8193bb..30ce86f5277b0 100644
--- a/libcxx/test/std/language.support/support.exception/except.nested/rethrow_if_nested.pass.cpp
+++ b/libcxx/test/std/language.support/support.exception/except.nested/rethrow_if_nested.pass.cpp
@@ -18,8 +18,10 @@
// template <class E> void rethrow_if_nested(const E& e);
#include <exception>
+#include <cstddef>
#include <cstdlib>
#include <cassert>
+#include <utility>
#include "test_macros.h"
@@ -58,6 +60,31 @@ class E1 : public std::nested_exception {};
class E2 : public std::nested_exception {};
class E : public E1, public E2 {};
+#if TEST_STD_VER >= 11
+template <class, class...>
+struct can_rethrow_if_nested_impl {
+ static constexpr bool value = false;
+};
+
+template <class... Args>
+struct can_rethrow_if_nested_impl<decltype((void)std::rethrow_if_nested(std::declval<Args>()...)), Args...> {
+ static constexpr bool value = true;
+};
+
+template <class... Args>
+struct can_rethrow_if_nested : can_rethrow_if_nested_impl<void, Args...> {};
+
+static_assert(!can_rethrow_if_nested<>::value, "");
+static_assert(can_rethrow_if_nested<A>::value, "");
+static_assert(can_rethrow_if_nested<const A&>::value, "");
+static_assert(can_rethrow_if_nested<B>::value, "");
+static_assert(can_rethrow_if_nested<const B&>::value, "");
+static_assert(!can_rethrow_if_nested<A, int*>::value, "");
+static_assert(!can_rethrow_if_nested<B, int*>::value, "");
+static_assert(!can_rethrow_if_nested<A, std::nullptr_t>::value, "");
+static_assert(!can_rethrow_if_nested<B, std::nullptr_t>::value, "");
+#endif
+
int main(int, char**)
{
{
>From 86d456ad8f2aef6edcc36d44d26119025331981e Mon Sep 17 00:00:00 2001
From: Kirill <77356738+kirillpyasecky at users.noreply.github.com>
Date: Wed, 3 Jul 2024 18:00:58 +0300
Subject: [PATCH 138/246] Fix error: name 'maybe' is not defined (#96741)
---
utils/bazel/examples/http_archive/WORKSPACE | 2 ++
1 file changed, 2 insertions(+)
diff --git a/utils/bazel/examples/http_archive/WORKSPACE b/utils/bazel/examples/http_archive/WORKSPACE
index efc3a083e059d..94dffa7fa63ae 100644
--- a/utils/bazel/examples/http_archive/WORKSPACE
+++ b/utils/bazel/examples/http_archive/WORKSPACE
@@ -39,6 +39,8 @@ load("@llvm-raw//utils/bazel:configure.bzl", "llvm_configure")
llvm_configure(name = "llvm-project")
+load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
+
maybe(
http_archive,
name = "llvm_zlib",
>From f1a8f94bba8b090a0d667065e4b8b4fc66b6a5cc Mon Sep 17 00:00:00 2001
From: OverMighty <its.overmighty at gmail.com>
Date: Wed, 3 Jul 2024 17:07:49 +0200
Subject: [PATCH 139/246] [libc][docs] Add doc for using containers to test on
a different arch (#97431)
---
libc/docs/build_and_test.rst | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
diff --git a/libc/docs/build_and_test.rst b/libc/docs/build_and_test.rst
index 1571dded96673..22b09b07d9612 100644
--- a/libc/docs/build_and_test.rst
+++ b/libc/docs/build_and_test.rst
@@ -79,3 +79,33 @@ Building with Bazel
$> bazel test --config=generic_clang @llvm-project//libc/...
#. The bazel target layout of `libc` is located at: `utils/bazel/llvm-project-overlay/libc/BUILD.bazel <https://github.com/llvm/llvm-project/tree/main/utils/bazel/llvm-project-overlay/libc/BUILD.bazel>`_.
+
+Building in a container for a different architecture
+====================================================
+
+`Podman <https://podman.io/>`_ can be used together with
+`QEMU <https://www.qemu.org/>`_ to run container images built for architectures
+other than the host's. This can be used to build and test the libc on other
+supported architectures for which you do not have access to hardware. It can
+also be used if the hardware is slower than emulation of its architecture on a
+more powerful machine under a different architecture.
+
+As an example, to build and test in a container for 32-bit Arm:
+
+#. To install the necessary packages on Arch Linux:
+
+ .. code-block:: sh
+
+ $> pacman -S podman qemu-user-static qemu-user-static-binfmt \
+ qemu-system-arm
+
+#. To run Bash interactively in an Ubuntu 22.04 container for 32-bit Arm and
+ bind-mount an existing checkout of llvm-project on the host:
+
+ .. code-block:: sh
+
+ $> podman run -it \
+ -v </host/path/to/llvm-project>:</container/path/to/llvm-project> \
+ --arch arm docker.io/ubuntu:jammy bash
+
+#. Install necessary packages, invoke CMake, build, and run tests.
>From f7b09b5225a58a657fc0315377438077f9e9363f Mon Sep 17 00:00:00 2001
From: Izaak Schroeder <izaak.schroeder at gmail.com>
Date: Wed, 3 Jul 2024 08:16:17 -0700
Subject: [PATCH 140/246] [libc] Add function spec for `getauxval` (#97492)
Does what it says on the box.
---
libc/spec/gnu_ext.td | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/libc/spec/gnu_ext.td b/libc/spec/gnu_ext.td
index 161bb4e4a0d9d..e360c766c5c54 100644
--- a/libc/spec/gnu_ext.td
+++ b/libc/spec/gnu_ext.td
@@ -237,7 +237,11 @@ def GnuExtensions : StandardSpec<"GNUExtensions"> {
[], // Types
[], // Enumerations
[
- //TODO: Add getauxval here
+ FunctionSpec<
+ "getauxval",
+ RetValSpec<UnsignedLongType>,
+ [ArgSpec<UnsignedLongType>]
+ >,
] // Functions
>;
>From 56f0ecd6db9219b7d14a8eda613d6b75060643eb Mon Sep 17 00:00:00 2001
From: realqhc <caiqihan021 at hotmail.com>
Date: Thu, 4 Jul 2024 01:25:10 +1000
Subject: [PATCH 141/246] [RISCV] Implement Intrinsics Support for XCValu
Extension in CV32E40P (#85603)
Implement XCValu intrinsics for CV32E40P according to the specification.
This commit is part of a patch-set to upstream the vendor specific
extensions of CV32E40P that need LLVM intrinsics to implement Clang
builtins.
Contributors: @CharKeaney, @ChunyuLiao, @jeremybennett, @lewis-revill,
@NandniJamnadas, @PaoloS02, @serkm, @simonpcook, @xingmingjie.
---
llvm/include/llvm/IR/IntrinsicsRISCVXCV.td | 23 ++
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 16 +-
llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td | 63 +++-
llvm/test/CodeGen/RISCV/xcvalu.ll | 302 ++++++++++++++++++++
4 files changed, 397 insertions(+), 7 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/xcvalu.ll
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td b/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td
index f1590ad66e362..8b4f4966fbd9a 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td
@@ -18,6 +18,18 @@ class ScalarCoreVBitManipGprIntrinsic
: DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty],
[IntrNoMem, IntrSpeculatable]>;
+class ScalarCoreVAluGprIntrinsic
+ : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+
+class ScalarCoreVAluGprGprIntrinsic
+ : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+
+class ScalarCoreVAluGprGprGprIntrinsic
+ : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+
let TargetPrefix = "riscv" in {
def int_riscv_cv_bitmanip_extract : ScalarCoreVBitManipGprGprIntrinsic;
def int_riscv_cv_bitmanip_extractu : ScalarCoreVBitManipGprGprIntrinsic;
@@ -34,4 +46,15 @@ let TargetPrefix = "riscv" in {
: DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, IntrWillReturn, IntrSpeculatable,
ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
+
+ def int_riscv_cv_alu_clip : ScalarCoreVAluGprGprIntrinsic;
+ def int_riscv_cv_alu_clipu : ScalarCoreVAluGprGprIntrinsic;
+ def int_riscv_cv_alu_addn : ScalarCoreVAluGprGprGprIntrinsic;
+ def int_riscv_cv_alu_addun : ScalarCoreVAluGprGprGprIntrinsic;
+ def int_riscv_cv_alu_addrn : ScalarCoreVAluGprGprGprIntrinsic;
+ def int_riscv_cv_alu_addurn : ScalarCoreVAluGprGprGprIntrinsic;
+ def int_riscv_cv_alu_subn : ScalarCoreVAluGprGprGprIntrinsic;
+ def int_riscv_cv_alu_subun : ScalarCoreVAluGprGprGprIntrinsic;
+ def int_riscv_cv_alu_subrn : ScalarCoreVAluGprGprGprIntrinsic;
+ def int_riscv_cv_alu_suburn : ScalarCoreVAluGprGprGprIntrinsic;
} // TargetPrefix = "riscv"
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7e38e14689fa0..6fe683410d59c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -250,10 +250,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (RV64LegalI32 && Subtarget.is64Bit())
setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
- setCondCodeAction(ISD::SETLE, XLenVT, Expand);
+ if (!Subtarget.hasVendorXCValu())
+ setCondCodeAction(ISD::SETLE, XLenVT, Expand);
setCondCodeAction(ISD::SETGT, XLenVT, Custom);
setCondCodeAction(ISD::SETGE, XLenVT, Expand);
- setCondCodeAction(ISD::SETULE, XLenVT, Expand);
+ if (!Subtarget.hasVendorXCValu())
+ setCondCodeAction(ISD::SETULE, XLenVT, Expand);
setCondCodeAction(ISD::SETUGT, XLenVT, Custom);
setCondCodeAction(ISD::SETUGE, XLenVT, Expand);
@@ -1458,6 +1460,16 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
}
+ if (Subtarget.hasVendorXCValu()) {
+ setOperationAction(ISD::ABS, XLenVT, Legal);
+ setOperationAction(ISD::SMIN, XLenVT, Legal);
+ setOperationAction(ISD::UMIN, XLenVT, Legal);
+ setOperationAction(ISD::SMAX, XLenVT, Legal);
+ setOperationAction(ISD::UMAX, XLenVT, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
+ }
+
// Function alignments.
const Align FunctionAlignment(Subtarget.hasStdExtCOrZca() ? 2 : 4);
setMinFunctionAlignment(FunctionAlignment);
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
index f0d6913a9d3fe..79b960c6da21c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
@@ -198,7 +198,7 @@ let DecoderNamespace = "XCValu" in {
} // DecoderNamespace = "XCValu"
-let Predicates = [HasVendorXCValu],
+let Predicates = [HasVendorXCValu, IsRV32],
hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
// General ALU Operations
def CV_ABS : CVInstAluR<0b0101000, 0b011, "cv.abs">,
@@ -249,10 +249,10 @@ let Predicates = [HasVendorXCValu],
Sched<[]>;
def CV_SUBURN : CVInstAluRRI<0b11, 0b011, "cv.suburn">,
Sched<[]>;
-} // Predicates = [HasVendorXCValu],
+} // Predicates = [HasVendorXCValu, IsRV32],
// hasSideEffects = 0, mayLoad = 0, mayStore = 0
-let Predicates = [HasVendorXCValu],
+let Predicates = [HasVendorXCValu, IsRV32],
hasSideEffects = 0, mayLoad = 0, mayStore = 0,
Constraints = "$rd = $rd_wb" in {
def CV_ADDNR : CVInstAluRRNR<0b1000000, 0b011, "cv.addnr">,
@@ -272,7 +272,7 @@ let Predicates = [HasVendorXCValu],
def CV_SUBURNR : CVInstAluRRNR<0b1000111, 0b011, "cv.suburnr">,
Sched<[]>;
-} // Predicates = [HasVendorXCValu],
+} // Predicates = [HasVendorXCValu, IsRV32],
// hasSideEffects = 0, mayLoad = 0, mayStore = 0,
// Constraints = "$rd = $rd_wb"
@@ -716,6 +716,13 @@ def CV_HI5: SDNodeXForm<imm, [{
N->getValueType(0));
}]>;
+def powerOf2Minus1 : ImmLeaf<XLenVT, [{ return isPowerOf2_32(Imm+1); }]>;
+def trailing1sPlus1 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(
+ llvm::countr_one(N->getZExtValue()) + 1,
+ SDLoc(N), N->getValueType(0));
+}]>;
+
multiclass PatCoreVBitManip<Intrinsic intr> {
def : PatGprGpr<intr, !cast<RVInst>("CV_" # NAME # "R")>;
def : Pat<(intr GPR:$rs1, cv_uimm10:$imm),
@@ -748,8 +755,54 @@ let Predicates = [HasVendorXCVbitmanip, IsRV32] in {
def : Pat<(bitreverse (XLenVT GPR:$rs)), (CV_BITREV GPR:$rs, 0, 0)>;
}
+class PatCoreVAluGpr<string intr, string asm> :
+ PatGpr<!cast<Intrinsic>("int_riscv_cv_alu_" # intr),
+ !cast<RVInst>("CV_" # asm)>;
+class PatCoreVAluGprGpr <string intr, string asm> :
+ PatGprGpr<!cast<Intrinsic>("int_riscv_cv_alu_" # intr),
+ !cast<RVInst>("CV_" # asm)>;
+
+multiclass PatCoreVAluGprImm<Intrinsic intr> {
+ def : PatGprGpr<intr, !cast<RVInst>("CV_" # NAME # "R")>;
+ def : Pat<(intr (XLenVT GPR:$rs1), powerOf2Minus1:$upperBound),
+ (!cast<RVInst>("CV_" # NAME) GPR:$rs1,
+ (trailing1sPlus1 imm:$upperBound))>;
+}
+
+multiclass PatCoreVAluGprGprImm<Intrinsic intr> {
+ def : Pat<(intr GPR:$rs1, GPR:$rs2, GPR:$rs3),
+ (!cast<RVInst>("CV_" # NAME # "R") GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
+ def : Pat<(intr GPR:$rs1, GPR:$rs2, uimm5:$imm),
+ (!cast<RVInst>("CV_" # NAME) GPR:$rs1, GPR:$rs2, uimm5:$imm)>;
+}
+
+let Predicates = [HasVendorXCValu, IsRV32], AddedComplexity = 1 in {
+ def : PatGpr<abs, CV_ABS>;
+ def : PatGprGpr<setle, CV_SLET>;
+ def : PatGprGpr<setule, CV_SLETU>;
+ def : PatGprGpr<smin, CV_MIN>;
+ def : PatGprGpr<umin, CV_MINU>;
+ def : PatGprGpr<smax, CV_MAX>;
+ def : PatGprGpr<umax, CV_MAXU>;
+
+ def : Pat<(sext_inreg (XLenVT GPR:$rs1), i16), (CV_EXTHS GPR:$rs1)>;
+ def : Pat<(sext_inreg (XLenVT GPR:$rs1), i8), (CV_EXTBS GPR:$rs1)>;
+ def : Pat<(and (XLenVT GPR:$rs1), 0xffff), (CV_EXTHZ GPR:$rs1)>;
+
+ defm CLIP : PatCoreVAluGprImm<int_riscv_cv_alu_clip>;
+ defm CLIPU : PatCoreVAluGprImm<int_riscv_cv_alu_clipu>;
+ defm ADDN : PatCoreVAluGprGprImm<int_riscv_cv_alu_addn>;
+ defm ADDUN : PatCoreVAluGprGprImm<int_riscv_cv_alu_addun>;
+ defm ADDRN : PatCoreVAluGprGprImm<int_riscv_cv_alu_addrn>;
+ defm ADDURN : PatCoreVAluGprGprImm<int_riscv_cv_alu_addurn>;
+ defm SUBN : PatCoreVAluGprGprImm<int_riscv_cv_alu_subn>;
+ defm SUBUN : PatCoreVAluGprGprImm<int_riscv_cv_alu_subun>;
+ defm SUBRN : PatCoreVAluGprGprImm<int_riscv_cv_alu_subrn>;
+ defm SUBURN : PatCoreVAluGprGprImm<int_riscv_cv_alu_suburn>;
+} // Predicates = [HasVendorXCValu, IsRV32]
+
//===----------------------------------------------------------------------===//
-// Patterns for immediate branching operations
+// Patterns for immediate branching operations
//===----------------------------------------------------------------------===//
let Predicates = [HasVendorXCVbi, IsRV32], AddedComplexity = 2 in {
diff --git a/llvm/test/CodeGen/RISCV/xcvalu.ll b/llvm/test/CodeGen/RISCV/xcvalu.ll
new file mode 100644
index 0000000000000..b1031731d06fa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xcvalu.ll
@@ -0,0 +1,302 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=riscv32 -mattr=+m -mattr=+xcvalu -verify-machineinstrs < %s \
+; RUN: | FileCheck %s
+
+declare i32 @llvm.abs.i32(i32, i1)
+declare i32 @llvm.smin.i32(i32, i32)
+declare i32 @llvm.smax.i32(i32, i32)
+declare i32 @llvm.umin.i32(i32, i32)
+declare i32 @llvm.umax.i32(i32, i32)
+
+define i32 @abs(i32 %a) {
+; CHECK-LABEL: abs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.abs a0, a0
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.abs.i32(i32 %a, i1 false)
+ ret i32 %1
+}
+
+define i1 @slet(i32 %a, i32 %b) {
+; CHECK-LABEL: slet:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.slet a0, a0, a1
+; CHECK-NEXT: ret
+ %1 = icmp sle i32 %a, %b
+ ret i1 %1
+}
+
+define i1 @sletu(i32 %a, i32 %b) {
+; CHECK-LABEL: sletu:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.sletu a0, a0, a1
+; CHECK-NEXT: ret
+ %1 = icmp ule i32 %a, %b
+ ret i1 %1
+}
+
+define i32 @smin(i32 %a, i32 %b) {
+; CHECK-LABEL: smin:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.min a0, a0, a1
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.smin.i32(i32 %a, i32 %b)
+ ret i32 %1
+}
+
+define i32 @umin(i32 %a, i32 %b) {
+; CHECK-LABEL: umin:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.minu a0, a0, a1
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.umin.i32(i32 %a, i32 %b)
+ ret i32 %1
+}
+
+define i32 @smax(i32 %a, i32 %b) {
+; CHECK-LABEL: smax:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.max a0, a0, a1
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+ ret i32 %1
+}
+
+define i32 @umax(i32 %a, i32 %b) {
+; CHECK-LABEL: umax:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.maxu a0, a0, a1
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.umax.i32(i32 %a, i32 %b)
+ ret i32 %1
+}
+
+define i32 @exths(i16 %a) {
+; CHECK-LABEL: exths:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $x11 killed $x10
+; CHECK-NEXT: cv.exths a0, a0
+; CHECK-NEXT: ret
+ %1 = sext i16 %a to i32
+ ret i32 %1
+}
+
+define i32 @exthz(i16 %a) {
+; CHECK-LABEL: exthz:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $x11 killed $x10
+; CHECK-NEXT: cv.exthz a0, a0
+; CHECK-NEXT: ret
+ %1 = zext i16 %a to i32
+ ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.alu.clip(i32, i32)
+
+define i32 @test.cv.alu.clip.case.a(i32 %a) {
+; CHECK-LABEL: test.cv.alu.clip.case.a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.clip a0, a0, 5
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.clip(i32 %a, i32 15)
+ ret i32 %1
+}
+
+define i32 @test.cv.alu.clip.case.b(i32 %a) {
+; CHECK-LABEL: test.cv.alu.clip.case.b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 10
+; CHECK-NEXT: cv.clipr a0, a0, a1
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.clip(i32 %a, i32 10)
+ ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.alu.clipu(i32, i32)
+
+define i32 @test.cv.alu.clipu.case.a(i32 %a) {
+; CHECK-LABEL: test.cv.alu.clipu.case.a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.clipu a0, a0, 9
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.clipu(i32 %a, i32 255)
+ ret i32 %1
+}
+
+define i32 @test.cv.alu.clipu.case.b(i32 %a) {
+; CHECK-LABEL: test.cv.alu.clipu.case.b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 200
+; CHECK-NEXT: cv.clipur a0, a0, a1
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.clipu(i32 %a, i32 200)
+ ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.alu.addn(i32, i32, i32)
+
+define i32 @test.cv.alu.addn.case.a(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.addn.case.a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.addn a0, a0, a1, 15
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.addn(i32 %a, i32 %b, i32 15)
+ ret i32 %1
+}
+
+define i32 @test.cv.alu.addn.case.b(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.addn.case.b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: cv.addnr a0, a1, a2
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.addn(i32 %a, i32 %b, i32 32)
+ ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.alu.addun(i32, i32, i32)
+
+define i32 @test.cv.alu.addun.case.a(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.addun.case.a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.addun a0, a0, a1, 15
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.addun(i32 %a, i32 %b, i32 15)
+ ret i32 %1
+}
+
+define i32 @test.cv.alu.addun.case.b(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.addun.case.b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: cv.addunr a0, a1, a2
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.addun(i32 %a, i32 %b, i32 32)
+ ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.alu.addrn(i32, i32, i32)
+
+define i32 @test.cv.alu.addrn.case.a(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.addrn.case.a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.addrn a0, a0, a1, 15
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.addrn(i32 %a, i32 %b, i32 15)
+ ret i32 %1
+}
+
+define i32 @test.cv.alu.addrn.case.b(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.addrn.case.b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: cv.addrnr a0, a1, a2
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.addrn(i32 %a, i32 %b, i32 32)
+ ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.alu.addurn(i32, i32, i32)
+
+define i32 @test.cv.alu.addurn.case.a(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.addurn.case.a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.addurn a0, a0, a1, 15
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.addurn(i32 %a, i32 %b, i32 15)
+ ret i32 %1
+}
+
+define i32 @test.cv.alu.addurn.case.b(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.addurn.case.b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: cv.addurnr a0, a1, a2
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.addurn(i32 %a, i32 %b, i32 32)
+ ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.alu.subn(i32, i32, i32)
+
+define i32 @test.cv.alu.subn.case.a(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.subn.case.a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.subn a0, a0, a1, 15
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.subn(i32 %a, i32 %b, i32 15)
+ ret i32 %1
+}
+
+define i32 @test.cv.alu.subn.case.b(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.subn.case.b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: cv.subnr a0, a1, a2
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.subn(i32 %a, i32 %b, i32 32)
+ ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.alu.subun(i32, i32, i32)
+
+define i32 @test.cv.alu.subun.case.a(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.subun.case.a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.subun a0, a0, a1, 15
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.subun(i32 %a, i32 %b, i32 15)
+ ret i32 %1
+}
+
+define i32 @test.cv.alu.subun.case.b(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.subun.case.b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: cv.subunr a0, a1, a2
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.subun(i32 %a, i32 %b, i32 32)
+ ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.alu.subrn(i32, i32, i32)
+
+define i32 @test.cv.alu.subrn.case.a(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.subrn.case.a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.subrn a0, a0, a1, 15
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.subrn(i32 %a, i32 %b, i32 15)
+ ret i32 %1
+}
+
+define i32 @test.cv.alu.subrn.case.b(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.subrn.case.b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: cv.subrnr a0, a1, a2
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.subrn(i32 %a, i32 %b, i32 32)
+ ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.alu.suburn(i32, i32, i32)
+
+define i32 @test.cv.alu.suburn.case.a(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.suburn.case.a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.suburn a0, a0, a1, 15
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.suburn(i32 %a, i32 %b, i32 15)
+ ret i32 %1
+}
+
+define i32 @test.cv.alu.suburn.case.b(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.alu.suburn.case.b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: cv.suburnr a0, a1, a2
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.riscv.cv.alu.suburn(i32 %a, i32 %b, i32 32)
+ ret i32 %1
+}
>From 07fa7fc00f3ad5a349ae53d576f48bfd54121a7f Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 3 Jul 2024 16:00:33 +0100
Subject: [PATCH 142/246] [InstCombine][X86] Add multiply-by-one tests for
PMULH/PMULHU/PMULHRS intrinsics
PMULH/PMULHU will simplify, PMULHRS doesn't really and we're better off keeping the intrinsic
---
.../Transforms/InstCombine/X86/x86-pmulh.ll | 58 +++++++++++++++++++
.../Transforms/InstCombine/X86/x86-pmulhrs.ll | 58 +++++++++++++++++++
.../Transforms/InstCombine/X86/x86-pmulhu.ll | 58 +++++++++++++++++++
3 files changed, 174 insertions(+)
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
index 53b15383aec9a..699a3c9198e8a 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
@@ -105,6 +105,64 @@ define <32 x i16> @zero_pmulh_512_commute(<32 x i16> %a0) {
ret <32 x i16> %1
}
+;
+; Multiply by One
+;
+
+define <8 x i16> @one_pmulh_128(<8 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[A0:%.*]], <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @one_pmulh_128_commute(<8 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_128_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @one_pmulh_256(<16 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[A0:%.*]], <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <16 x i16> %1
+}
+
+define <16 x i16> @one_pmulh_256_commute(<16 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_256_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <16 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <16 x i16> %a0)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @one_pmulh_512(<32 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[A0:%.*]], <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <32 x i16> %1
+}
+
+define <32 x i16> @one_pmulh_512_commute(<32 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_512_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <32 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <32 x i16> %a0)
+ ret <32 x i16> %1
+}
+
;
; Constant Folding
;
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
index acc3fd0803365..40ad11a699391 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhrs.ll
@@ -105,6 +105,64 @@ define <32 x i16> @zero_pmulh_512_commute(<32 x i16> %a0) {
ret <32 x i16> %1
}
+;
+; Multiply by One
+;
+
+define <8 x i16> @one_pmulh_128(<8 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> [[A0:%.*]], <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @one_pmulh_128_commute(<8 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_128_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @one_pmulh_256(<16 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[A0:%.*]], <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <16 x i16> %1
+}
+
+define <16 x i16> @one_pmulh_256_commute(<16 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_256_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <16 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <16 x i16> %a0)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @one_pmulh_512(<32 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[A0:%.*]], <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <32 x i16> %1
+}
+
+define <32 x i16> @one_pmulh_512_commute(<32 x i16> %a0) {
+; CHECK-LABEL: @one_pmulh_512_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <32 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <32 x i16> %a0)
+ ret <32 x i16> %1
+}
+
;
; Constant Folding
;
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
index 52945ce82a183..e970ae6080612 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
@@ -105,6 +105,64 @@ define <32 x i16> @zero_pmulhu_512_commute(<32 x i16> %a0) {
ret <32 x i16> %1
}
+;
+; Multiply by One
+;
+
+define <8 x i16> @one_pmulhu_128(<8 x i16> %a0) {
+; CHECK-LABEL: @one_pmulhu_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A0:%.*]], <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @one_pmulhu_128_commute(<8 x i16> %a0) {
+; CHECK-LABEL: @one_pmulhu_128_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
+ ret <8 x i16> %1
+}
+
+define <16 x i16> @one_pmulhu_256(<16 x i16> %a0) {
+; CHECK-LABEL: @one_pmulhu_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[A0:%.*]], <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <16 x i16> %1
+}
+
+define <16 x i16> @one_pmulhu_256_commute(<16 x i16> %a0) {
+; CHECK-LABEL: @one_pmulhu_256_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <16 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <16 x i16> %a0)
+ ret <16 x i16> %1
+}
+
+define <32 x i16> @one_pmulhu_512(<32 x i16> %a0) {
+; CHECK-LABEL: @one_pmulhu_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[A0:%.*]], <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <32 x i16> %1
+}
+
+define <32 x i16> @one_pmulhu_512_commute(<32 x i16> %a0) {
+; CHECK-LABEL: @one_pmulhu_512_commute(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <32 x i16> [[A0:%.*]])
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <32 x i16> %a0)
+ ret <32 x i16> %1
+}
+
;
; Constant Folding
;
>From cb48ad6603c237b0832b88cb85530a58a1a6a01c Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Wed, 3 Jul 2024 16:48:07 +0100
Subject: [PATCH 143/246] [AArch64] Clean up formatting of
AArch64FrameLowering. NFC
---
.../Target/AArch64/AArch64FrameLowering.cpp | 187 +++++++++---------
1 file changed, 99 insertions(+), 88 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 8216fa7db822c..75e89e8222ae9 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -481,8 +481,8 @@ bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
/// immediately on entry to the current function. This eliminates the need for
/// add/sub sp brackets around call sites. Returns true if the call frame is
/// included as part of the stack frame.
-bool
-AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+bool AArch64FrameLowering::hasReservedCallFrame(
+ const MachineFunction &MF) const {
// The stack probing code for the dynamically allocated outgoing arguments
// area assumes that the stack is probed at the top - either by the prologue
// code, which issues a probe if `hasVarSizedObjects` return true, or by the
@@ -1834,8 +1834,8 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
// pointer from the funclet. We only save the callee saved registers in the
// funclet, which are really the callee saved registers of the parent
// function, including the funclet.
- int64_t NumBytes = IsFunclet ? getWinEHFuncletFrameSize(MF)
- : MFI.getStackSize();
+ int64_t NumBytes =
+ IsFunclet ? getWinEHFuncletFrameSize(MF) : MFI.getStackSize();
if (!AFI->hasStackFrame() && !windowsRequiresStackProbe(MF, NumBytes)) {
assert(!HasFP && "unexpected function without stack frame but with FP");
assert(!SVEStackSize &&
@@ -1856,7 +1856,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
if (EmitCFI) {
// Label used to tie together the PROLOG_LABEL and the MachineMoves.
MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
- // Encode the stack size of the leaf function.
+ // Encode the stack size of the leaf function.
unsigned CFIIndex = MF.addFrameInst(
MCCFIInstruction::cfiDefCfaOffset(FrameLabel, NumBytes));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
@@ -1999,22 +1999,22 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
// exceeding 256MB in size.
if (NumBytes >= (1 << 28))
report_fatal_error("Stack size cannot exceed 256MB for stack "
- "unwinding purposes");
+ "unwinding purposes");
uint32_t LowNumWords = NumWords & 0xFFFF;
BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVZXi), AArch64::X15)
- .addImm(LowNumWords)
- .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
- .setMIFlag(MachineInstr::FrameSetup);
+ .addImm(LowNumWords)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
+ .setMIFlag(MachineInstr::FrameSetup);
BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
- .setMIFlag(MachineInstr::FrameSetup);
+ .setMIFlag(MachineInstr::FrameSetup);
if ((NumWords & 0xFFFF0000) != 0) {
- BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X15)
- .addReg(AArch64::X15)
- .addImm((NumWords & 0xFFFF0000) >> 16) // High half
- .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 16))
- .setMIFlag(MachineInstr::FrameSetup);
- BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X15)
+ .addReg(AArch64::X15)
+ .addImm((NumWords & 0xFFFF0000) >> 16) // High half
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 16))
+ .setMIFlag(MachineInstr::FrameSetup);
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
.setMIFlag(MachineInstr::FrameSetup);
}
} else {
@@ -2023,7 +2023,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
.setMIFlags(MachineInstr::FrameSetup);
}
- const char* ChkStk = Subtarget.getChkStkName();
+ const char *ChkStk = Subtarget.getChkStkName();
switch (MF.getTarget().getCodeModel()) {
case CodeModel::Tiny:
case CodeModel::Small:
@@ -2604,7 +2604,7 @@ static StackOffset getStackOffset(const MachineFunction &MF,
return StackOffset::getFixed(ObjectOffset + (int64_t)MFI.getStackSize());
}
- // TODO: This function currently does not work for scalable vectors.
+// TODO: This function currently does not work for scalable vectors.
int AArch64FrameLowering::getSEHFrameIndexOffset(const MachineFunction &MF,
int FI) const {
const auto *RegInfo = static_cast<const AArch64RegisterInfo *>(
@@ -3012,9 +3012,9 @@ static void computeCalleeSaveRegisterPairs(
// Round up size of non-pair to pair size if we need to pad the
// callee-save area to ensure 16-byte alignment.
- if (NeedGapToAlignStack && !NeedsWinCFI &&
- !RPI.isScalable() && RPI.Type != RegPairInfo::FPR128 &&
- !RPI.isPaired() && ByteOffset % 16 != 0) {
+ if (NeedGapToAlignStack && !NeedsWinCFI && !RPI.isScalable() &&
+ RPI.Type != RegPairInfo::FPR128 && !RPI.isPaired() &&
+ ByteOffset % 16 != 0) {
ByteOffset += 8 * StackFillDir;
assert(MFI.getObjectAlign(RPI.FrameIdx) <= Align(16));
// A stack frame with a gap looks like this, bottom up:
@@ -3044,10 +3044,9 @@ static void computeCalleeSaveRegisterPairs(
// Save the offset to frame record so that the FP register can point to the
// innermost frame record (spilled FP and LR registers).
- if (NeedsFrameRecord && ((!IsWindows && RPI.Reg1 == AArch64::LR &&
- RPI.Reg2 == AArch64::FP) ||
- (IsWindows && RPI.Reg1 == AArch64::FP &&
- RPI.Reg2 == AArch64::LR)))
+ if (NeedsFrameRecord &&
+ ((!IsWindows && RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
+ (IsWindows && RPI.Reg1 == AArch64::FP && RPI.Reg2 == AArch64::LR)))
AFI->setCalleeSaveBaseToFrameRecordOffset(Offset);
RegPairs.push_back(RPI);
@@ -3117,30 +3116,30 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
Align Alignment;
switch (RPI.Type) {
case RegPairInfo::GPR:
- StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
- Size = 8;
- Alignment = Align(8);
- break;
+ StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
+ Size = 8;
+ Alignment = Align(8);
+ break;
case RegPairInfo::FPR64:
- StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
- Size = 8;
- Alignment = Align(8);
- break;
+ StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
+ Size = 8;
+ Alignment = Align(8);
+ break;
case RegPairInfo::FPR128:
- StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
- Size = 16;
- Alignment = Align(16);
- break;
+ StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
+ Size = 16;
+ Alignment = Align(16);
+ break;
case RegPairInfo::ZPR:
StrOpc = RPI.isPaired() ? AArch64::ST1B_2Z_IMM : AArch64::STR_ZXI;
Size = 16;
Alignment = Align(16);
break;
case RegPairInfo::PPR:
- StrOpc = AArch64::STR_PXI;
- Size = 2;
- Alignment = Align(2);
- break;
+ StrOpc = AArch64::STR_PXI;
+ Size = 2;
+ Alignment = Align(2);
+ break;
case RegPairInfo::VG:
StrOpc = AArch64::STRXui;
Size = 8;
@@ -3358,30 +3357,30 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
Align Alignment;
switch (RPI.Type) {
case RegPairInfo::GPR:
- LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
- Size = 8;
- Alignment = Align(8);
- break;
+ LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
+ Size = 8;
+ Alignment = Align(8);
+ break;
case RegPairInfo::FPR64:
- LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
- Size = 8;
- Alignment = Align(8);
- break;
+ LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
+ Size = 8;
+ Alignment = Align(8);
+ break;
case RegPairInfo::FPR128:
- LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
- Size = 16;
- Alignment = Align(16);
- break;
+ LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
+ Size = 16;
+ Alignment = Align(16);
+ break;
case RegPairInfo::ZPR:
- LdrOpc = RPI.isPaired() ? AArch64::LD1B_2Z_IMM : AArch64::LDR_ZXI;
- Size = 16;
- Alignment = Align(16);
- break;
+ LdrOpc = RPI.isPaired() ? AArch64::LD1B_2Z_IMM : AArch64::LDR_ZXI;
+ Size = 16;
+ Alignment = Align(16);
+ break;
case RegPairInfo::PPR:
- LdrOpc = AArch64::LDR_PXI;
- Size = 2;
- Alignment = Align(2);
- break;
+ LdrOpc = AArch64::LDR_PXI;
+ Size = 2;
+ Alignment = Align(2);
+ break;
case RegPairInfo::VG:
continue;
}
@@ -3607,11 +3606,12 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
SavedRegs.set(AArch64::LR);
}
- LLVM_DEBUG(dbgs() << "*** determineCalleeSaves\nSaved CSRs:";
- for (unsigned Reg
- : SavedRegs.set_bits()) dbgs()
- << ' ' << printReg(Reg, RegInfo);
- dbgs() << "\n";);
+ LLVM_DEBUG({
+ dbgs() << "*** determineCalleeSaves\nSaved CSRs:";
+ for (unsigned Reg : SavedRegs.set_bits())
+ dbgs() << ' ' << printReg(Reg, RegInfo);
+ dbgs() << "\n";
+ });
// If any callee-saved registers are used, the frame cannot be eliminated.
int64_t SVEStackSize =
@@ -3628,7 +3628,8 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
int64_t CalleeStackUsed = 0;
for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) {
int64_t FixedOff = MFI.getObjectOffset(I);
- if (FixedOff > CalleeStackUsed) CalleeStackUsed = FixedOff;
+ if (FixedOff > CalleeStackUsed)
+ CalleeStackUsed = FixedOff;
}
// Conservatively always assume BigStack when there are SVE spills.
@@ -3689,8 +3690,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
uint64_t AlignedCSStackSize = alignTo(CSStackSize, 16);
LLVM_DEBUG(dbgs() << "Estimated stack frame size: "
- << EstimatedStackSize + AlignedCSStackSize
- << " bytes.\n");
+ << EstimatedStackSize + AlignedCSStackSize << " bytes.\n");
assert((!MFI.isCalleeSavedInfoValid() ||
AFI->getCalleeSavedStackSize() == AlignedCSStackSize) &&
@@ -3728,8 +3728,10 @@ bool AArch64FrameLowering::assignCalleeSavedSpillSlots(
if (UsesWinAAPCS && hasFP(MF) && AFI->hasSwiftAsyncContext()) {
int FrameIdx = MFI.CreateStackObject(8, Align(16), true);
AFI->setSwiftAsyncContextFrameIdx(FrameIdx);
- if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
- if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
+ if ((unsigned)FrameIdx < MinCSFrameIndex)
+ MinCSFrameIndex = FrameIdx;
+ if ((unsigned)FrameIdx > MaxCSFrameIndex)
+ MaxCSFrameIndex = FrameIdx;
}
// Insert VG into the list of CSRs, immediately before LR if saved.
@@ -3768,16 +3770,20 @@ bool AArch64FrameLowering::assignCalleeSavedSpillSlots(
int FrameIdx = MFI.CreateStackObject(Size, Alignment, true);
CS.setFrameIdx(FrameIdx);
- if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
- if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
+ if ((unsigned)FrameIdx < MinCSFrameIndex)
+ MinCSFrameIndex = FrameIdx;
+ if ((unsigned)FrameIdx > MaxCSFrameIndex)
+ MaxCSFrameIndex = FrameIdx;
// Grab 8 bytes below FP for the extended asynchronous frame info.
if (hasFP(MF) && AFI->hasSwiftAsyncContext() && !UsesWinAAPCS &&
Reg == AArch64::FP) {
FrameIdx = MFI.CreateStackObject(8, Alignment, true);
AFI->setSwiftAsyncContextFrameIdx(FrameIdx);
- if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
- if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
+ if ((unsigned)FrameIdx < MinCSFrameIndex)
+ MinCSFrameIndex = FrameIdx;
+ if ((unsigned)FrameIdx > MaxCSFrameIndex)
+ MaxCSFrameIndex = FrameIdx;
}
}
return true;
@@ -4178,9 +4184,12 @@ void TagStoreEdit::emitCode(MachineBasicBlock::iterator &InsertI,
mergeMemRefs(TagStores, CombinedMemRefs);
- LLVM_DEBUG(dbgs() << "Replacing adjacent STG instructions:\n";
- for (const auto &Instr
- : TagStores) { dbgs() << " " << *Instr.MI; });
+ LLVM_DEBUG({
+ dbgs() << "Replacing adjacent STG instructions:\n";
+ for (const auto &Instr : TagStores) {
+ dbgs() << " " << *Instr.MI;
+ }
+ });
// Size threshold where a loop becomes shorter than a linear sequence of
// tagging instructions.
@@ -4617,16 +4626,18 @@ void AArch64FrameLowering::orderFrameObjects(
ObjectsToAllocate[i++] = Obj.ObjectIndex;
}
- LLVM_DEBUG(dbgs() << "Final frame order:\n"; for (auto &Obj
- : FrameObjects) {
- if (!Obj.IsValid)
- break;
- dbgs() << " " << Obj.ObjectIndex << ": group " << Obj.GroupIndex;
- if (Obj.ObjectFirst)
- dbgs() << ", first";
- if (Obj.GroupFirst)
- dbgs() << ", group-first";
- dbgs() << "\n";
+ LLVM_DEBUG({
+ dbgs() << "Final frame order:\n";
+ for (auto &Obj : FrameObjects) {
+ if (!Obj.IsValid)
+ break;
+ dbgs() << " " << Obj.ObjectIndex << ": group " << Obj.GroupIndex;
+ if (Obj.ObjectFirst)
+ dbgs() << ", first";
+ if (Obj.GroupFirst)
+ dbgs() << ", group-first";
+ dbgs() << "\n";
+ }
});
}
>From 1db4221c6da0f77517e1921f7c17a6f4e493e38f Mon Sep 17 00:00:00 2001
From: Vyacheslav Levytskyy <vyacheslav.levytskyy at intel.com>
Date: Wed, 3 Jul 2024 17:56:26 +0200
Subject: [PATCH 144/246] [SPIR-V] Fix a crash in SPIRV Backend during the
'finalize lowering' stage on management of function forward calls (#97435)
This PR is to fix a crash in SPIRV Backend during the 'finalize
lowering' stage on management of function forward calls. The reproducer
is committed as a new test case.
---
llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp | 4 +-
.../function/forward-call-type-inference.ll | 75 +++++++++++++++++++
2 files changed, 77 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/CodeGen/SPIRV/function/forward-call-type-inference.ll
diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
index 5558c7a5a4a5f..e70ddc4d39455 100644
--- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
@@ -318,15 +318,15 @@ Register SPIRVGlobalRegistry::buildConstantInt(uint64_t Val,
if (EmitIR) {
MIRBuilder.buildConstant(Res, *ConstInt);
} else {
+ if (!SpvType)
+ SpvType = getOrCreateSPIRVIntegerType(BitWidth, MIRBuilder);
MachineInstrBuilder MIB;
if (Val) {
- assert(SpvType);
MIB = MIRBuilder.buildInstr(SPIRV::OpConstantI)
.addDef(Res)
.addUse(getSPIRVTypeID(SpvType));
addNumImm(APInt(BitWidth, Val), MIB);
} else {
- assert(SpvType);
MIB = MIRBuilder.buildInstr(SPIRV::OpConstantNull)
.addDef(Res)
.addUse(getSPIRVTypeID(SpvType));
diff --git a/llvm/test/CodeGen/SPIRV/function/forward-call-type-inference.ll b/llvm/test/CodeGen/SPIRV/function/forward-call-type-inference.ll
new file mode 100644
index 0000000000000..a1ef4f00dd4f5
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/function/forward-call-type-inference.ll
@@ -0,0 +1,75 @@
+; Adapted from Khronos Translator:
+; https://github.com/KhronosGroup/SPIRV-LLVM-Translator/blob/main/test/type-scavenger/equivalence.ll
+; The goal of the test is to ensure that the Backend doesn't crash during
+; the 'finalize lowering' stage on management of function forward calls.
+
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-COUNT-9: OpFunction
+
+define spir_func void @_func1() {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.cond, %entry
+ %call3 = call spir_func ptr addrspace(4) @_func2()
+ %call5 = call spir_func ptr addrspace(4) @_func0(ptr addrspace(4) %call3, i64 0)
+ br label %for.cond
+}
+
+define spir_func void @_func3() {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.cond, %entry
+ %call3 = call spir_func ptr @_func4()
+ %call3.ascast = addrspacecast ptr %call3 to ptr addrspace(4)
+ %call5 = call spir_func ptr addrspace(4) @_func0(ptr addrspace(4) %call3.ascast, i64 0)
+ br label %for.cond
+}
+
+declare spir_func ptr addrspace(4) @_func5()
+
+define spir_func void @_func6(ptr addrspace(4) %call3.ascast) {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.cond, %entry
+ %call5 = call spir_func ptr addrspace(4) @_func0(ptr addrspace(4) %call3.ascast, i64 0)
+ br label %for.cond
+}
+
+define spir_func void @_func7() {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.cond, %entry
+ %call3 = call spir_func ptr addrspace(4) @_func5()
+ %call5 = call spir_func ptr addrspace(4) @_func0(ptr addrspace(4) %call3, i64 0)
+ br label %for.cond
+}
+
+declare spir_func ptr @_func4()
+
+declare spir_func ptr addrspace(4) @_func2()
+
+define spir_func ptr addrspace(4) @_func0(ptr addrspace(4) %this, i64 %index) {
+entry:
+ %arrayidx = getelementptr [5 x i32], ptr addrspace(4) %this, i64 0, i64 %index
+ ret ptr addrspace(4) null
+}
+
+define spir_func void @_func8() {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.cond, %entry
+ %call8 = call spir_func ptr addrspace(4) @_func0(ptr addrspace(4) null, i64 0)
+ br label %for.cond
+}
+
+uselistorder ptr @_func0, { 0, 4, 3, 2, 1 }
>From bf9e9e5e843838f9bc905d4a0d575d8d39e9b5f2 Mon Sep 17 00:00:00 2001
From: Vyacheslav Levytskyy <vyacheslav.levytskyy at intel.com>
Date: Wed, 3 Jul 2024 17:56:41 +0200
Subject: [PATCH 145/246] [SPIR-V] Improve type inference for a known
instruction's builtin: OpGroupAsyncCopy (#96895)
This PR improves type inference for a known instruction's builtin:
OpGroupAsyncCopy:
* deduce a type of one source/destination pointer when it's possible to
deduce a type of another argument, and
* validate src and dest types and tries to unfold a parameter if it's a
structure wrapper around a scalar/vector type.
---
llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp | 85 ++++++++++++++++++-
llvm/lib/Target/SPIRV/SPIRVBuiltins.h | 8 +-
llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp | 28 +++++-
llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp | 37 ++++++++
.../SPIRV/transcoding/spirv-event-null.ll | 54 +++++++++++-
5 files changed, 203 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index 0b93a4d85eedf..dfec10bec3f9e 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -184,10 +184,16 @@ lookupBuiltin(StringRef DemangledCall,
SPIRV::InstructionSet::InstructionSet Set,
Register ReturnRegister, const SPIRVType *ReturnType,
const SmallVectorImpl<Register> &Arguments) {
+ const static std::string PassPrefix = "(anonymous namespace)::";
+ std::string BuiltinName;
+ // Itanium Demangler result may have "(anonymous namespace)::" prefix
+ if (DemangledCall.starts_with(PassPrefix.c_str()))
+ BuiltinName = DemangledCall.substr(PassPrefix.length());
+ else
+ BuiltinName = DemangledCall;
// Extract the builtin function name and types of arguments from the call
// skeleton.
- std::string BuiltinName =
- DemangledCall.substr(0, DemangledCall.find('(')).str();
+ BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
// Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
@@ -2377,9 +2383,80 @@ static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call,
return true;
}
-/// Lowers a builtin funtion call using the provided \p DemangledCall skeleton
-/// and external instruction \p Set.
namespace SPIRV {
+// Try to find a builtin function attributes by a demangled function name and
+// return a tuple <builtin group, op code, ext instruction number>, or a special
+// tuple value <-1, 0, 0> if the builtin function is not found.
+// Not all builtin functions are supported, only those with a ready-to-use op
+// code or instruction number defined in TableGen.
+// TODO: consider a major rework of mapping demangled calls into a builtin
+// functions to unify search and decrease number of individual cases.
+std::tuple<int, unsigned, unsigned>
+mapBuiltinToOpcode(const StringRef DemangledCall,
+ SPIRV::InstructionSet::InstructionSet Set) {
+ Register Reg;
+ SmallVector<Register> Args;
+ std::unique_ptr<const IncomingCall> Call =
+ lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
+ if (!Call)
+ return std::make_tuple(-1, 0, 0);
+
+ switch (Call->Builtin->Group) {
+ case SPIRV::Relational:
+ case SPIRV::Atomic:
+ case SPIRV::Barrier:
+ case SPIRV::CastToPtr:
+ case SPIRV::ImageMiscQuery:
+ case SPIRV::SpecConstant:
+ case SPIRV::Enqueue:
+ case SPIRV::AsyncCopy:
+ case SPIRV::LoadStore:
+ case SPIRV::CoopMatr:
+ if (const auto *R =
+ SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
+ return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
+ break;
+ case SPIRV::Extended:
+ if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
+ Call->Builtin->Set))
+ return std::make_tuple(Call->Builtin->Group, 0, R->Number);
+ break;
+ case SPIRV::VectorLoadStore:
+ if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
+ Call->Builtin->Set))
+ return std::make_tuple(SPIRV::Extended, 0, R->Number);
+ break;
+ case SPIRV::Group:
+ if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
+ return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
+ break;
+ case SPIRV::AtomicFloating:
+ if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
+ return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
+ break;
+ case SPIRV::IntelSubgroups:
+ if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
+ return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
+ break;
+ case SPIRV::GroupUniform:
+ if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
+ return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
+ break;
+ case SPIRV::WriteImage:
+ return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
+ case SPIRV::Select:
+ return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
+ case SPIRV::Construct:
+ return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
+ 0);
+ case SPIRV::KernelClock:
+ return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
+ default:
+ return std::make_tuple(-1, 0, 0);
+ }
+ return std::make_tuple(-1, 0, 0);
+}
+
std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
SPIRV::InstructionSet::InstructionSet Set,
MachineIRBuilder &MIRBuilder,
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.h b/llvm/lib/Target/SPIRV/SPIRVBuiltins.h
index 649f5bfd1d7c2..68bff602d1d10 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.h
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.h
@@ -19,7 +19,7 @@
namespace llvm {
namespace SPIRV {
-/// Lowers a builtin funtion call using the provided \p DemangledCall skeleton
+/// Lowers a builtin function call using the provided \p DemangledCall skeleton
/// and external instruction \p Set.
///
/// \return the lowering success status if the called function is a recognized
@@ -38,6 +38,12 @@ std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
const SmallVectorImpl<Register> &Args,
SPIRVGlobalRegistry *GR);
+/// Helper function for finding a builtin function attributes
+/// by a demangled function name. Defined in SPIRVBuiltins.cpp.
+std::tuple<int, unsigned, unsigned>
+mapBuiltinToOpcode(const StringRef DemangledCall,
+ SPIRV::InstructionSet::InstructionSet Set);
+
/// Parses the provided \p ArgIdx argument base type in the \p DemangledCall
/// skeleton. A base type is either a basic type (e.g. i32 for int), pointer
/// element type (e.g. i8 for char*), or builtin type (TargetExtType).
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index 3eab6f5a07186..566eafd41e9bd 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -67,6 +67,7 @@ class SPIRVEmitIntrinsics
DenseMap<Instruction *, Constant *> AggrConsts;
DenseMap<Instruction *, Type *> AggrConstTypes;
DenseSet<Instruction *> AggrStores;
+ SPIRV::InstructionSet::InstructionSet InstrSet;
// deduce element type of untyped pointers
Type *deduceElementType(Value *I, bool UnknownElemTypeI8);
@@ -384,9 +385,10 @@ Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
std::string DemangledName =
getOclOrSpirvBuiltinDemangledName(CalledF->getName());
auto AsArgIt = ResTypeByArg.find(DemangledName);
- if (AsArgIt != ResTypeByArg.end())
+ if (AsArgIt != ResTypeByArg.end()) {
Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
Visited);
+ }
}
}
@@ -544,6 +546,28 @@ void SPIRVEmitIntrinsics::deduceOperandElementType(Instruction *I) {
KnownElemTy = ElemTy1;
Ops.push_back(std::make_pair(Op0, 0));
}
+ } else if (auto *CI = dyn_cast<CallInst>(I)) {
+ if (Function *CalledF = CI->getCalledFunction()) {
+ std::string DemangledName =
+ getOclOrSpirvBuiltinDemangledName(CalledF->getName());
+ if (DemangledName.length() > 0 &&
+ !StringRef(DemangledName).starts_with("llvm.")) {
+ auto [Grp, Opcode, ExtNo] =
+ SPIRV::mapBuiltinToOpcode(DemangledName, InstrSet);
+ if (Opcode == SPIRV::OpGroupAsyncCopy) {
+ for (unsigned i = 0, PtrCnt = 0; i < CI->arg_size() && PtrCnt < 2;
+ ++i) {
+ Value *Op = CI->getArgOperand(i);
+ if (!isPointerTy(Op->getType()))
+ continue;
+ ++PtrCnt;
+ if (Type *ElemTy = GR->findDeducedElementType(Op))
+ KnownElemTy = ElemTy; // src will rewrite dest if both are defined
+ Ops.push_back(std::make_pair(Op, i));
+ }
+ }
+ }
+ }
}
// There is no enough info to deduce types or all is valid.
@@ -1385,6 +1409,8 @@ bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(Func);
GR = ST.getSPIRVGlobalRegistry();
+ InstrSet = ST.isOpenCLEnv() ? SPIRV::InstructionSet::OpenCL_std
+ : SPIRV::InstructionSet::GLSL_std_450;
F = &Func;
IRBuilder<> B(Func.getContext());
diff --git a/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
index 4383d1c5c0e25..2344ec529e16d 100644
--- a/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
@@ -203,6 +203,39 @@ static void validateGroupWaitEventsPtr(const SPIRVSubtarget &STI,
doInsertBitcast(STI, MRI, GR, I, OpReg, OpIdx, NewPtrType);
}
+static void validateGroupAsyncCopyPtr(const SPIRVSubtarget &STI,
+ MachineRegisterInfo *MRI,
+ SPIRVGlobalRegistry &GR, MachineInstr &I,
+ unsigned OpIdx) {
+ MachineFunction *MF = I.getParent()->getParent();
+ Register OpReg = I.getOperand(OpIdx).getReg();
+ Register OpTypeReg = getTypeReg(MRI, OpReg);
+ SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpTypeReg, MF);
+ if (!OpType || OpType->getOpcode() != SPIRV::OpTypePointer)
+ return;
+ SPIRVType *ElemType = GR.getSPIRVTypeForVReg(OpType->getOperand(2).getReg());
+ if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeStruct ||
+ ElemType->getNumOperands() != 2)
+ return;
+ // It's a structure-wrapper around another type with a single member field.
+ SPIRVType *MemberType =
+ GR.getSPIRVTypeForVReg(ElemType->getOperand(1).getReg());
+ if (!MemberType)
+ return;
+ unsigned MemberTypeOp = MemberType->getOpcode();
+ if (MemberTypeOp != SPIRV::OpTypeVector && MemberTypeOp != SPIRV::OpTypeInt &&
+ MemberTypeOp != SPIRV::OpTypeFloat && MemberTypeOp != SPIRV::OpTypeBool)
+ return;
+ // It's a structure-wrapper around a valid type. Insert a bitcast before the
+ // instruction to keep SPIR-V code valid.
+ SPIRV::StorageClass::StorageClass SC =
+ static_cast<SPIRV::StorageClass::StorageClass>(
+ OpType->getOperand(1).getImm());
+ MachineIRBuilder MIB(I);
+ SPIRVType *NewPtrType = GR.getOrCreateSPIRVPointerType(MemberType, MIB, SC);
+ doInsertBitcast(STI, MRI, GR, I, OpReg, OpIdx, NewPtrType);
+}
+
// Insert a bitcast before the function call instruction to keep SPIR-V code
// valid when there is a type mismatch between actual and expected types of an
// argument:
@@ -380,6 +413,10 @@ void SPIRVTargetLowering::finalizeLowering(MachineFunction &MF) const {
SPIRV::OpTypeBool))
MI.setDesc(STI.getInstrInfo()->get(SPIRV::OpLogicalNotEqual));
break;
+ case SPIRV::OpGroupAsyncCopy:
+ validateGroupAsyncCopyPtr(STI, MRI, GR, MI, 3);
+ validateGroupAsyncCopyPtr(STI, MRI, GR, MI, 4);
+ break;
case SPIRV::OpGroupWaitEvents:
// OpGroupWaitEvents ..., ..., <pointer to OpTypeEvent>
validateGroupWaitEventsPtr(STI, MRI, GR, MI);
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll
index fe0d96f2773ec..df11565ca8180 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll
@@ -8,7 +8,18 @@
; CHECK-DAG: %[[#TyStruct:]] = OpTypeStruct %[[#TyEvent]]
; CHECK-DAG: %[[#ConstEvent:]] = OpConstantNull %[[#TyEvent]]
; CHECK-DAG: %[[#TyEventPtr:]] = OpTypePointer Function %[[#TyEvent]]
+; CHECK-DAG: %[[#TyEventPtrGen:]] = OpTypePointer Generic %[[#TyEvent]]
; CHECK-DAG: %[[#TyStructPtr:]] = OpTypePointer Function %[[#TyStruct]]
+; CHECK-DAG: %[[#TyChar:]] = OpTypeInt 8 0
+; CHECK-DAG: %[[#TyV4:]] = OpTypeVector %[[#TyChar]] 4
+; CHECK-DAG: %[[#TyStructV4:]] = OpTypeStruct %[[#TyV4]]
+; CHECK-DAG: %[[#TyPtrSV4_W:]] = OpTypePointer Workgroup %[[#TyStructV4]]
+; CHECK-DAG: %[[#TyPtrSV4_CW:]] = OpTypePointer CrossWorkgroup %[[#TyStructV4]]
+; CHECK-DAG: %[[#TyPtrV4_W:]] = OpTypePointer Workgroup %[[#TyV4]]
+; CHECK-DAG: %[[#TyPtrV4_CW:]] = OpTypePointer CrossWorkgroup %[[#TyV4]]
+
+; Check correct translation of __spirv_GroupAsyncCopy and target("spirv.Event") zeroinitializer
+
; CHECK: OpFunction
; CHECK: OpFunctionParameter
; CHECK: %[[#Src:]] = OpFunctionParameter
@@ -17,12 +28,13 @@
; CHECK: %[[#Dest:]] = OpInBoundsPtrAccessChain
; CHECK: %[[#CopyRes:]] = OpGroupAsyncCopy %[[#TyEvent]] %[[#]] %[[#Dest]] %[[#Src]] %[[#]] %[[#]] %[[#ConstEvent]]
; CHECK: OpStore %[[#EventVar]] %[[#CopyRes]]
+; CHECK: OpFunctionEnd
-%"class.sycl::_V1::device_event" = type { target("spirv.Event") }
+%StructEvent = type { target("spirv.Event") }
-define spir_kernel void @foo(ptr addrspace(1) %_arg_out_ptr, ptr addrspace(3) noundef %_arg_local_acc) {
+define spir_kernel void @foo(ptr addrspace(1) %_arg_out_ptr, ptr addrspace(3) %_arg_local_acc) {
entry:
- %var = alloca %"class.sycl::_V1::device_event"
+ %var = alloca %StructEvent
%dev_event.i.sroa.0 = alloca target("spirv.Event")
%add.ptr.i26 = getelementptr inbounds i32, ptr addrspace(1) %_arg_out_ptr, i64 0
%call3.i = tail call spir_func target("spirv.Event") @_Z22__spirv_GroupAsyncCopyjPU3AS1iPU3AS3Kimm9ocl_event(i32 2, ptr addrspace(1) %add.ptr.i26, ptr addrspace(3) %_arg_local_acc, i64 16, i64 10, target("spirv.Event") zeroinitializer)
@@ -31,3 +43,39 @@ entry:
}
declare dso_local spir_func target("spirv.Event") @_Z22__spirv_GroupAsyncCopyjPU3AS1iPU3AS3Kimm9ocl_event(i32, ptr addrspace(1), ptr addrspace(3), i64, i64, target("spirv.Event"))
+
+; Check correct type inference when calling __spirv_GroupAsyncCopy:
+; we expect that the Backend is able to deduce a type of the %_arg_Local
+; given facts that it's possible to deduce a type of the %_arg
+; and %_arg_Local and %_arg are source/destination arguments in OpGroupAsyncCopy
+
+; CHECK: OpFunction
+; CHECK: %[[#BarArg1:]] = OpFunctionParameter %[[#TyPtrSV4_W]]
+; CHECK: %[[#BarArg2:]] = OpFunctionParameter %[[#TyPtrSV4_CW]]
+; CHECK: %[[#EventVarBar:]] = OpVariable %[[#TyStructPtr]] Function
+; CHECK: %[[#SrcBar:]] = OpInBoundsPtrAccessChain %[[#TyPtrSV4_CW]] %[[#BarArg2]] %[[#]]
+; CHECK-DAG: %[[#BarArg1Casted:]] = OpBitcast %[[#TyPtrV4_W]] %[[#BarArg1]]
+; CHECK-DAG: %[[#SrcBarCasted:]] = OpBitcast %[[#TyPtrV4_CW]] %[[#SrcBar]]
+; CHECK: %[[#ResBar:]] = OpGroupAsyncCopy %[[#TyEvent]] %[[#]] %[[#BarArg1Casted]] %[[#SrcBarCasted]] %[[#]] %[[#]] %[[#ConstEvent]]
+; CHECK: %[[#EventVarBarCasted:]] = OpBitcast %[[#TyEventPtr]] %[[#EventVarBar]]
+; CHECK: OpStore %[[#EventVarBarCasted]] %[[#ResBar]]
+; CHECK: %[[#EventVarBarCasted2:]] = OpBitcast %[[#TyEventPtr]] %[[#EventVarBar]]
+; CHECK: %[[#EventVarBarGen:]] = OpPtrCastToGeneric %[[#TyEventPtrGen]] %[[#EventVarBarCasted2]]
+; CHECK: OpGroupWaitEvents %[[#]] %[[#]] %[[#EventVarBarGen]]
+; CHECK: OpFunctionEnd
+
+%Vec4 = type { <4 x i8> }
+
+define spir_kernel void @bar(ptr addrspace(3) %_arg_Local, ptr addrspace(1) readonly %_arg) {
+entry:
+ %E1 = alloca %StructEvent
+ %srcptr = getelementptr inbounds %Vec4, ptr addrspace(1) %_arg, i64 0
+ %r1 = tail call spir_func target("spirv.Event") @_Z22__spirv_GroupAsyncCopyjPU3AS3Dv4_aPU3AS1KS_mm9ocl_event(i32 2, ptr addrspace(3) %_arg_Local, ptr addrspace(1) %srcptr, i64 16, i64 10, target("spirv.Event") zeroinitializer)
+ store target("spirv.Event") %r1, ptr %E1
+ %E.ascast.i = addrspacecast ptr %E1 to ptr addrspace(4)
+ call spir_func void @_Z23__spirv_GroupWaitEventsjiP9ocl_event(i32 2, i32 1, ptr addrspace(4) %E.ascast.i)
+ ret void
+}
+
+declare dso_local spir_func target("spirv.Event") @_Z22__spirv_GroupAsyncCopyjPU3AS3Dv4_aPU3AS1KS_mm9ocl_event(i32, ptr addrspace(3), ptr addrspace(1), i64, i64, target("spirv.Event"))
+declare dso_local spir_func void @_Z23__spirv_GroupWaitEventsjiP9ocl_event(i32, i32, ptr addrspace(4))
>From f60f7b47a92dbf2f3d994868d3cd43b86a18a76c Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 3 Jul 2024 16:46:49 +0100
Subject: [PATCH 146/246] [InstCombine][X86] Add multiply-by-one handling for
MULH/PMULHU/PMULHRS intrinsics
MULH/PMULHU simplifies to ASHR/ZERO as they just become a SEXT/ZEXT sign-splat instruction
PMULHRS doesn't simplify as much so I've not attempted to fold it.
---
.../lib/Target/X86/X86InstCombineIntrinsic.cpp | 10 ++++++++++
.../Transforms/InstCombine/X86/x86-pmulh.ll | 12 ++++++------
.../Transforms/InstCombine/X86/x86-pmulhu.ll | 18 ++++++------------
3 files changed, 22 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
index 6d4734d477b3e..163584b3750d3 100644
--- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
@@ -521,6 +521,16 @@ static Value *simplifyX86pmulh(IntrinsicInst &II,
if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1))
return ConstantAggregateZero::get(ResTy);
+ // Multiply by one.
+ if (!IsRounding) {
+ if (match(Arg0, PatternMatch::m_One()))
+ return IsSigned ? Builder.CreateAShr(Arg1, 15)
+ : ConstantAggregateZero::get(ResTy);
+ if (match(Arg1, PatternMatch::m_One()))
+ return IsSigned ? Builder.CreateAShr(Arg0, 15)
+ : ConstantAggregateZero::get(ResTy);
+ }
+
// Constant folding.
if (!isa<Constant>(Arg0) || !isa<Constant>(Arg1))
return nullptr;
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
index 699a3c9198e8a..185ab46deed89 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
@@ -111,7 +111,7 @@ define <32 x i16> @zero_pmulh_512_commute(<32 x i16> %a0) {
define <8 x i16> @one_pmulh_128(<8 x i16> %a0) {
; CHECK-LABEL: @one_pmulh_128(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[A0:%.*]], <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: [[TMP1:%.*]] = ashr <8 x i16> [[A0:%.*]], <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
; CHECK-NEXT: ret <8 x i16> [[TMP1]]
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
@@ -120,7 +120,7 @@ define <8 x i16> @one_pmulh_128(<8 x i16> %a0) {
define <8 x i16> @one_pmulh_128_commute(<8 x i16> %a0) {
; CHECK-LABEL: @one_pmulh_128_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> [[A0:%.*]])
+; CHECK-NEXT: [[TMP1:%.*]] = ashr <8 x i16> [[A0:%.*]], <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
; CHECK-NEXT: ret <8 x i16> [[TMP1]]
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
@@ -129,7 +129,7 @@ define <8 x i16> @one_pmulh_128_commute(<8 x i16> %a0) {
define <16 x i16> @one_pmulh_256(<16 x i16> %a0) {
; CHECK-LABEL: @one_pmulh_256(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[A0:%.*]], <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: [[TMP1:%.*]] = ashr <16 x i16> [[A0:%.*]], <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
; CHECK-NEXT: ret <16 x i16> [[TMP1]]
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
@@ -138,7 +138,7 @@ define <16 x i16> @one_pmulh_256(<16 x i16> %a0) {
define <16 x i16> @one_pmulh_256_commute(<16 x i16> %a0) {
; CHECK-LABEL: @one_pmulh_256_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <16 x i16> [[A0:%.*]])
+; CHECK-NEXT: [[TMP1:%.*]] = ashr <16 x i16> [[A0:%.*]], <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
; CHECK-NEXT: ret <16 x i16> [[TMP1]]
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <16 x i16> %a0)
@@ -147,7 +147,7 @@ define <16 x i16> @one_pmulh_256_commute(<16 x i16> %a0) {
define <32 x i16> @one_pmulh_512(<32 x i16> %a0) {
; CHECK-LABEL: @one_pmulh_512(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[A0:%.*]], <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+; CHECK-NEXT: [[TMP1:%.*]] = ashr <32 x i16> [[A0:%.*]], <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
; CHECK-NEXT: ret <32 x i16> [[TMP1]]
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
@@ -156,7 +156,7 @@ define <32 x i16> @one_pmulh_512(<32 x i16> %a0) {
define <32 x i16> @one_pmulh_512_commute(<32 x i16> %a0) {
; CHECK-LABEL: @one_pmulh_512_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <32 x i16> [[A0:%.*]])
+; CHECK-NEXT: [[TMP1:%.*]] = ashr <32 x i16> [[A0:%.*]], <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
; CHECK-NEXT: ret <32 x i16> [[TMP1]]
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <32 x i16> %a0)
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
index e970ae6080612..b18833f703a5f 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
@@ -111,8 +111,7 @@ define <32 x i16> @zero_pmulhu_512_commute(<32 x i16> %a0) {
define <8 x i16> @one_pmulhu_128(<8 x i16> %a0) {
; CHECK-LABEL: @one_pmulhu_128(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A0:%.*]], <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
ret <8 x i16> %1
@@ -120,8 +119,7 @@ define <8 x i16> @one_pmulhu_128(<8 x i16> %a0) {
define <8 x i16> @one_pmulhu_128_commute(<8 x i16> %a0) {
; CHECK-LABEL: @one_pmulhu_128_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+; CHECK-NEXT: ret <8 x i16> zeroinitializer
;
%1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
ret <8 x i16> %1
@@ -129,8 +127,7 @@ define <8 x i16> @one_pmulhu_128_commute(<8 x i16> %a0) {
define <16 x i16> @one_pmulhu_256(<16 x i16> %a0) {
; CHECK-LABEL: @one_pmulhu_256(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[A0:%.*]], <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
ret <16 x i16> %1
@@ -138,8 +135,7 @@ define <16 x i16> @one_pmulhu_256(<16 x i16> %a0) {
define <16 x i16> @one_pmulhu_256_commute(<16 x i16> %a0) {
; CHECK-LABEL: @one_pmulhu_256_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <16 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+; CHECK-NEXT: ret <16 x i16> zeroinitializer
;
%1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <16 x i16> %a0)
ret <16 x i16> %1
@@ -147,8 +143,7 @@ define <16 x i16> @one_pmulhu_256_commute(<16 x i16> %a0) {
define <32 x i16> @one_pmulhu_512(<32 x i16> %a0) {
; CHECK-LABEL: @one_pmulhu_512(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[A0:%.*]], <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
ret <32 x i16> %1
@@ -156,8 +151,7 @@ define <32 x i16> @one_pmulhu_512(<32 x i16> %a0) {
define <32 x i16> @one_pmulhu_512_commute(<32 x i16> %a0) {
; CHECK-LABEL: @one_pmulhu_512_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <32 x i16> [[A0:%.*]])
-; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+; CHECK-NEXT: ret <32 x i16> zeroinitializer
;
%1 = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <32 x i16> %a0)
ret <32 x i16> %1
>From f057130b169fe551b1fec6633fadba26ef19bcdd Mon Sep 17 00:00:00 2001
From: Jonas Devlieghere <jonas at devlieghere.com>
Date: Wed, 3 Jul 2024 09:02:22 -0700
Subject: [PATCH 147/246] [lldb] Remove commented-out Platform::FindPlugin
(NFC)
---
lldb/include/lldb/Target/Platform.h | 15 -------------
lldb/source/Target/Platform.cpp | 34 -----------------------------
2 files changed, 49 deletions(-)
diff --git a/lldb/include/lldb/Target/Platform.h b/lldb/include/lldb/Target/Platform.h
index 48988b838f67a..5ed2fc33356d9 100644
--- a/lldb/include/lldb/Target/Platform.h
+++ b/lldb/include/lldb/Target/Platform.h
@@ -108,21 +108,6 @@ class Platform : public PluginInterface {
static ArchSpec GetAugmentedArchSpec(Platform *platform,
llvm::StringRef triple);
- /// Find a platform plugin for a given process.
- ///
- /// Scans the installed Platform plug-ins and tries to find an instance that
- /// can be used for \a process
- ///
- /// \param[in] process
- /// The process for which to try and locate a platform
- /// plug-in instance.
- ///
- /// \param[in] plugin_name
- /// An optional name of a specific platform plug-in that
- /// should be used. If nullptr, pick the best plug-in.
- // static lldb::PlatformSP
- // FindPlugin (Process *process, ConstString plugin_name);
-
/// Set the target's executable based off of the existing architecture
/// information in \a target given a path to an executable \a exe_file.
///
diff --git a/lldb/source/Target/Platform.cpp b/lldb/source/Target/Platform.cpp
index b3116545b91d1..c06abd3070f31 100644
--- a/lldb/source/Target/Platform.cpp
+++ b/lldb/source/Target/Platform.cpp
@@ -161,40 +161,6 @@ Platform::LocateExecutableScriptingResources(Target *target, Module &module,
return FileSpecList();
}
-// PlatformSP
-// Platform::FindPlugin (Process *process, ConstString plugin_name)
-//{
-// PlatformCreateInstance create_callback = nullptr;
-// if (plugin_name)
-// {
-// create_callback =
-// PluginManager::GetPlatformCreateCallbackForPluginName (plugin_name);
-// if (create_callback)
-// {
-// ArchSpec arch;
-// if (process)
-// {
-// arch = process->GetTarget().GetArchitecture();
-// }
-// PlatformSP platform_sp(create_callback(process, &arch));
-// if (platform_sp)
-// return platform_sp;
-// }
-// }
-// else
-// {
-// for (uint32_t idx = 0; (create_callback =
-// PluginManager::GetPlatformCreateCallbackAtIndex(idx)) != nullptr;
-// ++idx)
-// {
-// PlatformSP platform_sp(create_callback(process, nullptr));
-// if (platform_sp)
-// return platform_sp;
-// }
-// }
-// return PlatformSP();
-//}
-
Status Platform::GetSharedModule(
const ModuleSpec &module_spec, Process *process, ModuleSP &module_sp,
const FileSpecList *module_search_paths_ptr,
>From 3ab2247d10673419609333a33bca0eca8a56bf3d Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 3 Jul 2024 15:51:39 +0200
Subject: [PATCH 148/246] [ConstantRange] Optimize icmp() implementation (NFC)
These are pretty hot code paths, so provide direct implementations
for them, instead of going through makeSatisfyingICmpRegion().
---
llvm/lib/IR/ConstantRange.cpp | 31 ++++++++++++++++++++++++++++++-
1 file changed, 30 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/IR/ConstantRange.cpp b/llvm/lib/IR/ConstantRange.cpp
index 50de975d83c0a..0ead677422803 100644
--- a/llvm/lib/IR/ConstantRange.cpp
+++ b/llvm/lib/IR/ConstantRange.cpp
@@ -241,7 +241,36 @@ bool ConstantRange::getEquivalentICmp(CmpInst::Predicate &Pred,
bool ConstantRange::icmp(CmpInst::Predicate Pred,
const ConstantRange &Other) const {
- return makeSatisfyingICmpRegion(Pred, Other).contains(*this);
+ if (isEmptySet() || Other.isEmptySet())
+ return true;
+
+ switch (Pred) {
+ case CmpInst::ICMP_EQ:
+ if (const APInt *L = getSingleElement())
+ if (const APInt *R = Other.getSingleElement())
+ return *L == *R;
+ return false;
+ case CmpInst::ICMP_NE:
+ return inverse().contains(Other);
+ case CmpInst::ICMP_ULT:
+ return getUnsignedMax().ult(Other.getUnsignedMin());
+ case CmpInst::ICMP_ULE:
+ return getUnsignedMax().ule(Other.getUnsignedMin());
+ case CmpInst::ICMP_UGT:
+ return getUnsignedMin().ugt(Other.getUnsignedMax());
+ case CmpInst::ICMP_UGE:
+ return getUnsignedMin().uge(Other.getUnsignedMax());
+ case CmpInst::ICMP_SLT:
+ return getSignedMax().slt(Other.getSignedMin());
+ case CmpInst::ICMP_SLE:
+ return getSignedMax().sle(Other.getSignedMin());
+ case CmpInst::ICMP_SGT:
+ return getSignedMin().sgt(Other.getSignedMax());
+ case CmpInst::ICMP_SGE:
+ return getSignedMin().sge(Other.getSignedMax());
+ default:
+ llvm_unreachable("Invalid ICmp predicate");
+ }
}
/// Exact mul nuw region for single element RHS.
>From 584e431a4b257098d1ff13a0e9926842222ba601 Mon Sep 17 00:00:00 2001
From: Krystian Stasiowski <sdkrystian at gmail.com>
Date: Wed, 3 Jul 2024 12:12:53 -0400
Subject: [PATCH 149/246] [Clang][Sema] Treat explicit specializations of
static data member templates declared without 'static' as static data members
when diagnosing uses of 'auto' (#97425)
After #93873 clang no longer permits declarations of explicit
specializations of static data member templates to use the `auto`
_placeholder-type-specifier_:
```
struct A {
template<int N>
static constexpr auto x = 0;
template<>
constexpr auto x<1> = 1; // error: 'auto' not allowed in non-static struct member
};
```
This patch fixes the issue.
---
clang/lib/Sema/SemaType.cpp | 3 +--
.../dcl.spec/dcl.type/dcl.spec.auto/p5-cxx14.cpp | 12 ++++++++++++
2 files changed, 13 insertions(+), 2 deletions(-)
create mode 100644 clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.spec.auto/p5-cxx14.cpp
diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp
index 50c15a1aa89e8..066003c47eb43 100644
--- a/clang/lib/Sema/SemaType.cpp
+++ b/clang/lib/Sema/SemaType.cpp
@@ -3194,8 +3194,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
break;
}
case DeclaratorContext::Member: {
- if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
- D.isFunctionDeclarator())
+ if (D.isStaticMember() || D.isFunctionDeclarator())
break;
bool Cxx = SemaRef.getLangOpts().CPlusPlus;
if (isa<ObjCContainerDecl>(SemaRef.CurContext)) {
diff --git a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.spec.auto/p5-cxx14.cpp b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.spec.auto/p5-cxx14.cpp
new file mode 100644
index 0000000000000..8f3dac2426a30
--- /dev/null
+++ b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.spec.auto/p5-cxx14.cpp
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++14
+
+struct A {
+ template<int N>
+ static constexpr auto x = N;
+
+ template<>
+ constexpr auto x<1> = 1;
+
+ template<>
+ static constexpr auto x<2> = 2; // expected-warning{{explicit specialization cannot have a storage class}}
+};
>From ab930ee7cad8b8bf7968bb8d0c0d72524e2313c4 Mon Sep 17 00:00:00 2001
From: Fred Grim <fgrim at apple.com>
Date: Wed, 3 Jul 2024 09:19:18 -0700
Subject: [PATCH 150/246] [llvm-readobj][ELF] Alter JSON/LLVM output on note
sections to allow for multiple notes per section in JSON (#96813)
It turns out that the notes section for corefiles (or really any elf
file with multiple notes) is set up in such a way for LLVM formatted
output that the JSON equivalent only has the last note since the notes
are held in a dictionary with every key being Note. This pr alters the
layout for the notes to a list of dictionaries to sidestep this issue
for JSON output. Prior to this pr a note section in the output looked
like (for LLVM output):
```
Notes [
NoteSection {
Name: <?>
Offset: 0x2148
Size: 0x1F864
Note {
Owner: CORE
Data size: 0x150
Type: NT_PRSTATUS (prstatus structure)
Description data (
0000: 06000000 00000000 00000000 06000000 |................|
...
)
}
Note {
Owner: CORE
Data size: 0x88
Type: NT_PRPSINFO (prpsinfo structure)
Description data (
0000: 02440000 00000000 04054040 00000000 |.D........@@....|
....
```
But is now:
```
NoteSections [
NoteSection {
Name: <?>
Offset: 0x2148
Size: 0x1F864
Notes [
{
Owner: CORE
Data size: 0x150
Type: NT_PRSTATUS (prstatus structure)
Description data (
0000: 06000000 00000000 00000000 06000000 |................|
...
)
}
{
Owner: CORE
Data size: 0x88
Type: NT_PRPSINFO (prpsinfo structure)
Description data (
0000: 02440000 00000000 04054040 00000000 |.D........@@....|
...
```
---
lld/test/ELF/gnu-property-align-32.s | 4 +-
lld/test/ELF/gnu-property-align.s | 4 +-
lld/test/ELF/partition-notes.s | 10 +-
.../test/tools/llvm-objcopy/ELF/add-note.test | 6 +-
.../ELF/AArch64/aarch64-feature-pauth.s | 12 +-
.../ELF/AArch64/aarch64-note-gnu-property.s | 6 +-
.../llvm-readobj/ELF/AArch64/memtag.test | 6 +-
.../ELF/AArch64/note-android-unknown.test | 6 +-
.../tools/llvm-readobj/ELF/gnu-note-size.test | 6 +-
.../tools/llvm-readobj/ELF/gnu-notes.test | 42 ++++--
.../llvm-readobj/ELF/note-amd-invalid-v2.test | 38 +++--
.../llvm-readobj/ELF/note-amd-invalid-v3.test | 6 +-
.../llvm-readobj/ELF/note-amd-valid-v2.test | 34 +++--
.../llvm-readobj/ELF/note-amd-valid-v3.s | 6 +-
llvm/test/tools/llvm-readobj/ELF/note-amd.s | 22 ++-
.../llvm-readobj/ELF/note-amdgpu-invalid.s | 12 +-
.../tools/llvm-readobj/ELF/note-amdgpu.test | 10 +-
.../llvm-readobj/ELF/note-core-ntfile.test | 108 +++++++-------
.../tools/llvm-readobj/ELF/note-core.test | 7 +-
.../llvm-readobj/ELF/note-freebsd-core.test | 132 +++++++++---------
.../tools/llvm-readobj/ELF/note-freebsd.test | 120 ++++++++--------
.../tools/llvm-readobj/ELF/note-generic.s | 50 ++++---
.../llvm-readobj/ELF/note-gnu-property.s | 52 +++----
.../llvm-readobj/ELF/note-gnu-property2.s | 20 +--
.../llvm-readobj/ELF/note-llvmompoffload.test | 40 +++---
.../ELF/note-multiple-sections.test | 39 ++++++
.../llvm-readobj/ELF/note-netbsd-core.test | 34 ++---
.../llvm-readobj/ELF/note-openbsd-core.test | 54 +++----
.../tools/llvm-readobj/ELF/note-unknown.s | 68 +++++----
llvm/test/tools/llvm-readobj/archive.test | 4 +-
llvm/tools/llvm-readobj/ELFDumper.cpp | 15 +-
31 files changed, 574 insertions(+), 399 deletions(-)
create mode 100644 llvm/test/tools/llvm-readobj/ELF/note-multiple-sections.test
diff --git a/lld/test/ELF/gnu-property-align-32.s b/lld/test/ELF/gnu-property-align-32.s
index 8022a49d34c6c..c9c64f930da96 100644
--- a/lld/test/ELF/gnu-property-align-32.s
+++ b/lld/test/ELF/gnu-property-align-32.s
@@ -17,7 +17,9 @@
# CHECK-NEXT: Info: 0
# CHECK-NEXT: AddressAlignment: 4
-# CHECK: Note {
+# CHECK: Size: 0x1C
+# CHECK-NEXT: Notes [
+# CHECK-NEXT: {
# CHECK-NEXT: Owner: GNU
# CHECK-NEXT: Data size: 0xC
# CHECK-NEXT: Type: NT_GNU_PROPERTY_TYPE_0 (property note)
diff --git a/lld/test/ELF/gnu-property-align.s b/lld/test/ELF/gnu-property-align.s
index b109c09039a2c..d920ea5d0e468 100644
--- a/lld/test/ELF/gnu-property-align.s
+++ b/lld/test/ELF/gnu-property-align.s
@@ -17,7 +17,9 @@
# CHECK-NEXT: Info: 0
# CHECK-NEXT: AddressAlignment: 8
-# CHECK: Note {
+# CHECK: Size: 0x20
+# CHECK-NEXT: Notes [
+# CHECK-NEXT: {
# CHECK-NEXT: Owner: GNU
# CHECK-NEXT: Data size: 0x10
# CHECK-NEXT: Type: NT_GNU_PROPERTY_TYPE_0 (property note)
diff --git a/lld/test/ELF/partition-notes.s b/lld/test/ELF/partition-notes.s
index c5ade3a47e052..e8bc32da93d90 100644
--- a/lld/test/ELF/partition-notes.s
+++ b/lld/test/ELF/partition-notes.s
@@ -15,12 +15,13 @@
// CHECK: Type: PT_NOTE
// CHECK-NEXT: Offset: 0x{{0*}}[[NOTE_OFFSET:[^ ]*]]
-// CHECK: Notes [
+// CHECK: NoteSections [
// CHECK-NEXT: NoteSection {
// CHECK-NEXT: Name: .note.obj
// CHECK-NEXT: Offset: 0x{{0*}}[[NOTE_OFFSET]]
// CHECK-NEXT: Size:
-// CHECK-NEXT: Note {
+// CHECK-NEXT: Notes [
+// CHECK-NEXT: {
// CHECK-NEXT: Owner: foo
// CHECK-NEXT: Data size: 0x4
// CHECK-NEXT: Type: NT_VERSION (version)
@@ -28,17 +29,20 @@
// CHECK-NEXT: 0000: 62617200 |bar.|
// CHECK-NEXT: )
// CHECK-NEXT: }
+// CHECK-NEXT: ]
// CHECK-NEXT: }
// CHECK-NEXT: NoteSection {
// CHECK-NEXT: Name: .note.gnu.build-id
// CHECK-NEXT: Offset:
// CHECK-NEXT: Size:
-// CHECK-NEXT: Note {
+// CHECK-NEXT: Notes [
+// CHECK-NEXT: {
// CHECK-NEXT: Owner: GNU
// CHECK-NEXT: Data size:
// CHECK-NEXT: Type: NT_GNU_BUILD_ID (unique build ID bitstring)
// CHECK-NEXT: Build ID: d5101cb9d03b7e836ba9b64f5768a0b31980920f{{$}}
// CHECK-NEXT: }
+// CHECK-NEXT: ]
// CHECK-NEXT: }
// CHECK-NEXT: ]
diff --git a/llvm/test/tools/llvm-objcopy/ELF/add-note.test b/llvm/test/tools/llvm-objcopy/ELF/add-note.test
index b68103b665db3..839378b2684ce 100644
--- a/llvm/test/tools/llvm-objcopy/ELF/add-note.test
+++ b/llvm/test/tools/llvm-objcopy/ELF/add-note.test
@@ -22,16 +22,18 @@ FileHeader:
Type: ET_REL
Machine: EM_X86_64
-# CHECK: Notes [
+# CHECK: NoteSections [
# CHECK-NEXT: NoteSection {
# CHECK-NEXT: Name: .note.gnu.build-id
# CHECK-NEXT: Offset:
# CHECK-NEXT: Size:
-# CHECK-NEXT: Note {
+# CHECK-NEXT: Notes [
+# CHECK-NEXT: {
# CHECK-NEXT: Owner: GNU
# CHECK-NEXT: Data size: 0x10
# CHECK-NEXT: Type: NT_GNU_BUILD_ID
# CHECK-NEXT: Build ID: 000102030405060708090a0b0c0d0e0f
# CHECK-NEXT: }
+# CHECK-NEXT: ]
# CHECK-NEXT: }
# CHECK-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-feature-pauth.s b/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-feature-pauth.s
index 512531748cd25..91cc8b0ed88fa 100644
--- a/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-feature-pauth.s
+++ b/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-feature-pauth.s
@@ -27,12 +27,13 @@ end:
# ELF-NEXT: GNU 0x00000018 NT_GNU_PROPERTY_TYPE_0 (property note)
# ELF-NEXT: AArch64 PAuth ABI core info: platform [[PLATFORM]], version [[VERSION]]
-# OBJ: Notes [
+# OBJ: NoteSections [
# OBJ-NEXT: NoteSection {
# OBJ-NEXT: Name: .note.gnu.property
# OBJ-NEXT: Offset: 0x40
# OBJ-NEXT: Size: 0x28
-# OBJ-NEXT: Note {
+# OBJ-NEXT: Notes [
+# OBJ-NEXT: {
# OBJ-NEXT: Owner: GNU
# OBJ-NEXT: Data size: 0x18
# OBJ-NEXT: Type: NT_GNU_PROPERTY_TYPE_0 (property note)
@@ -40,6 +41,7 @@ end:
# OBJ-NEXT: AArch64 PAuth ABI core info: platform [[PLATFORM]], version [[VERSION]]
# OBJ-NEXT: ]
# OBJ-NEXT: }
+# OBJ-NEXT: ]
# OBJ-NEXT: }
# OBJ-NEXT: ]
@@ -162,12 +164,13 @@ end:
# ELF-ERR-NEXT: GNU 0x000000[[DATASIZE]] NT_GNU_PROPERTY_TYPE_0 (property note)
# ELF-ERR-NEXT: AArch64 PAuth ABI core info: [[ERR]]
-# OBJ-ERR: Notes [
+# OBJ-ERR: NoteSections [
# OBJ-ERR-NEXT: NoteSection {
# OBJ-ERR-NEXT: Name: .note.gnu.property
# OBJ-ERR-NEXT: Offset: 0x40
# OBJ-ERR-NEXT: Size: 0x[[SIZE]]
-# OBJ-ERR-NEXT: Note {
+# OBJ-ERR-NEXT: Notes [
+# OBJ-ERR-NEXT: {
# OBJ-ERR-NEXT: Owner: GNU
# OBJ-ERR-NEXT: Data size: 0x[[DATASIZE]]
# OBJ-ERR-NEXT: Type: NT_GNU_PROPERTY_TYPE_0 (property note)
@@ -175,6 +178,7 @@ end:
# OBJ-ERR-NEXT: AArch64 PAuth ABI core info: [[ERR]]
# OBJ-ERR-NEXT: ]
# OBJ-ERR-NEXT: }
+# OBJ-ERR-NEXT: ]
# OBJ-ERR-NEXT: }
# OBJ-ERR-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-note-gnu-property.s b/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-note-gnu-property.s
index b517f0b381554..37dda7aaf31fa 100644
--- a/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-note-gnu-property.s
+++ b/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-note-gnu-property.s
@@ -9,12 +9,13 @@
// GNU-NEXT: GNU 0x00000010 NT_GNU_PROPERTY_TYPE_0 (property note)
// GNU-NEXT: Properties: aarch64 feature: BTI, PAC, GCS
-// LLVM: Notes [
+// LLVM: NoteSections [
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.gnu.property
// LLVM-NEXT: Offset: 0x40
// LLVM-NEXT: Size: 0x20
-// LLVM-NEXT: Note {
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
// LLVM-NEXT: Owner: GNU
// LLVM-NEXT: Data size: 0x10
// LLVM-NEXT: Type: NT_GNU_PROPERTY_TYPE_0 (property note)
@@ -22,6 +23,7 @@
// LLVM-NEXT: aarch64 feature: BTI, PAC, GCS
// LLVM-NEXT: ]
// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/AArch64/memtag.test b/llvm/test/tools/llvm-readobj/ELF/AArch64/memtag.test
index 4bc9308feda0b..213eeed9f3a0a 100644
--- a/llvm/test/tools/llvm-readobj/ELF/AArch64/memtag.test
+++ b/llvm/test/tools/llvm-readobj/ELF/AArch64/memtag.test
@@ -90,13 +90,14 @@
# GNU-OK-NEXT: Android 0x00000004 NT_ANDROID_TYPE_MEMTAG (Android memory tagging information)
# GNU-BAD-NEXT: Android 0x00000000 NT_ANDROID_TYPE_MEMTAG (Android memory tagging information)
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.android.memtag
# LLVM-NEXT: Offset: 0x40
# LLVM-OK-NEXT: Size: 0x18
# LLVM-BAD-NEXT: Size: 0x14
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: Android
# LLVM-OK-NEXT: Data size: 0x4
# LLVM-BAD-NEXT: Data size: 0x0
@@ -114,6 +115,7 @@
# NOSTACK-NEXT: Stack: Disabled
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/AArch64/note-android-unknown.test b/llvm/test/tools/llvm-readobj/ELF/AArch64/note-android-unknown.test
index 3e48086ba6a98..b6d4718d9d981 100644
--- a/llvm/test/tools/llvm-readobj/ELF/AArch64/note-android-unknown.test
+++ b/llvm/test/tools/llvm-readobj/ELF/AArch64/note-android-unknown.test
@@ -7,12 +7,13 @@
# GNU-NEXT: Android 0x00000005 Unknown note type: (0x00001337)
# GNU-NEXT: description data: 01 23 45 67 89
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.android.unknown
# LLVM-NEXT: Offset: 0x40
# LLVM-NEXT: Size: 0x1C
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: Android
# LLVM-NEXT: Data size: 0x5
# LLVM-NEXT: Type: Unknown (0x00001337)
@@ -20,6 +21,7 @@
# LLVM-NEXT: 0000: 01234567 89
# LLVM-NEXT: )
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/gnu-note-size.test b/llvm/test/tools/llvm-readobj/ELF/gnu-note-size.test
index 2f131a6cb347b..2b82230605ce5 100644
--- a/llvm/test/tools/llvm-readobj/ELF/gnu-note-size.test
+++ b/llvm/test/tools/llvm-readobj/ELF/gnu-note-size.test
@@ -11,12 +11,13 @@
# GNU-NEXT: <corrupt GNU_ABI_TAG>
# GNU-EMPTY:
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.ABI-tag
# LLVM-NEXT: Offset:
# LLVM-NEXT: Size: 0x14
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: GNU
# LLVM-NEXT: Data size: 0x4
# LLVM-NEXT: Type: NT_GNU_ABI_TAG (ABI version tag)
@@ -25,6 +26,7 @@
# LLVM-NEXT: 0000: 00000000 |....|
# LLVM-NEXT: )
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/gnu-notes.test b/llvm/test/tools/llvm-readobj/ELF/gnu-notes.test
index e73238e6093a8..49b8f0dd2c5d8 100644
--- a/llvm/test/tools/llvm-readobj/ELF/gnu-notes.test
+++ b/llvm/test/tools/llvm-readobj/ELF/gnu-notes.test
@@ -28,46 +28,53 @@
# GNU-NEXT: description data: 61 62 63 64
# GNU-EMPTY:
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.ABI-tag
# LLVM-NEXT: Offset: 0x78
# LLVM-NEXT: Size: 0x20
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: GNU
# LLVM-NEXT: Data size: 0x10
# LLVM-NEXT: Type: NT_GNU_ABI_TAG (ABI version tag)
# LLVM-NEXT: OS: Linux
# LLVM-NEXT: ABI: 2.6.32
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.gnu.build-id
# LLVM-NEXT: Offset: 0x98
# LLVM-NEXT: Size: 0x20
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: GNU
# LLVM-NEXT: Data size: 0x10
# LLVM-NEXT: Type: NT_GNU_BUILD_ID (unique build ID bitstring)
# LLVM-NEXT: Build ID: 4fcb712aa6387724a9f465a32cd8c14b
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.gnu.gold-version
# LLVM-NEXT: Offset: 0xB8
# LLVM-NEXT: Size: 0x1C
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: GNU
# LLVM-NEXT: Data size: 0x9
# LLVM-NEXT: Type: NT_GNU_GOLD_VERSION (gold version)
# LLVM-NEXT: Version: gold 1.11
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.gnu.unknown
# LLVM-NEXT: Offset: 0xD4
# LLVM-NEXT: Size: 0x14
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: GNU
# LLVM-NEXT: Data size: 0x4
# LLVM-NEXT: Type: Unknown (0x0000abcd)
@@ -75,21 +82,24 @@
# LLVM-NEXT: 0000: 61626364 |abcd|
# LLVM-NEXT: )
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
## Note: the section name is <?> here because the section header table is not present.
-# LLVM-STRIPPED: Notes [
+# LLVM-STRIPPED: NoteSections [
# LLVM-STRIPPED-NEXT: NoteSection {
# LLVM-STRIPPED-NEXT: Name: <?>
# LLVM-STRIPPED-NEXT: Offset: 0x78
# LLVM-STRIPPED-NEXT: Size: 0x20
-# LLVM-STRIPPED-NEXT: Note {
+# LLVM-STRIPPED-NEXT: Notes [
+# LLVM-STRIPPED-NEXT: {
# LLVM-STRIPPED-NEXT: Owner: GNU
# LLVM-STRIPPED-NEXT: Data size: 0x10
# LLVM-STRIPPED-NEXT: Type: NT_GNU_BUILD_ID (unique build ID bitstring)
# LLVM-STRIPPED-NEXT: Build ID: 4fcb712aa6387724a9f465a32cd8c14b
# LLVM-STRIPPED-NEXT: }
+# LLVM-STRIPPED-NEXT: ]
# LLVM-STRIPPED-NEXT: }
# LLVM-STRIPPED-NEXT: ]
@@ -144,12 +154,14 @@ ProgramHeaders:
# ERR1-GNU-NEXT: warning: '[[FILE]]': unable to read notes from the SHT_NOTE section with index 1: invalid offset (0xffff0000) or size (0x0)
# ERR1-GNU-EMPTY:
-# ERR1-LLVM: Notes [
+# ERR1-LLVM: NoteSections [
# ERR1-LLVM-NEXT: NoteSection {
# ERR1-LLVM-NEXT: Name: .note
# ERR1-LLVM-NEXT: Offset: 0xFFFF0000
# ERR1-LLVM-NEXT: Size: 0x0
+# ERR1-LLVM-NEXT: Notes [
# ERR1-LLVM-NEXT: warning: '[[FILE]]': unable to read notes from the SHT_NOTE section with index 1: invalid offset (0xffff0000) or size (0x0)
+# ERR1-LLVM-NEXT: ]
# ERR1-LLVM-NEXT: }
# ERR1-LLVM-NEXT: ]
@@ -177,12 +189,14 @@ Sections:
# ERR2-GNU-NEXT: warning: '[[FILE]]': unable to read notes from the SHT_NOTE section with index 1: invalid offset (0x40) or size (0xffff0000)
# ERR2-GNU-EMPTY:
-# ERR2-LLVM: Notes [
+# ERR2-LLVM: NoteSections [
# ERR2-LLVM-NEXT: NoteSection {
# ERR2-LLVM-NEXT: Name: .note
# ERR2-LLVM-NEXT: Offset: 0x40
# ERR2-LLVM-NEXT: Size: 0xFFFF0000
+# ERR2-LLVM-NEXT: Notes [
# ERR2-LLVM-NEXT: warning: '[[FILE]]': unable to read notes from the SHT_NOTE section with index 1: invalid offset (0x40) or size (0xffff0000)
+# ERR2-LLVM-NEXT: ]
# ERR2-LLVM-NEXT: }
# ERR2-LLVM-NEXT: ]
@@ -198,12 +212,14 @@ Sections:
# ERR3-GNU-NEXT: warning: '[[FILE]]': unable to read notes from the PT_NOTE segment with index 0: invalid offset (0xffff0000) or size (0x0)
# ERR3-GNU-NOT: {{.}}
-# ERR3-LLVM: Notes [
+# ERR3-LLVM: NoteSections [
# ERR3-LLVM-NEXT: NoteSection {
# ERR3-LLVM-NEXT: Name: <?>
# ERR3-LLVM-NEXT: Offset: 0xFFFF0000
# ERR3-LLVM-NEXT: Size: 0x0
+# ERR3-LLVM-NEXT: Notes [
# ERR3-LLVM-NEXT: warning: '[[FILE]]': unable to read notes from the PT_NOTE segment with index 0: invalid offset (0xffff0000) or size (0x0)
+# ERR3-LLVM-NEXT: ]
# ERR3-LLVM-NEXT: }
# ERR3-LLVM-NEXT: ]
@@ -229,12 +245,14 @@ ProgramHeaders:
# ERR4-GNU-NEXT: warning: '[[FILE]]': unable to read notes from the PT_NOTE segment with index 0: invalid offset (0x0) or size (0xffff0000)
# ERR4-GNU-EMPTY:
-# ERR4-LLVM: Notes [
+# ERR4-LLVM: NoteSections [
# ERR4-LLVM-NEXT: NoteSection {
# ERR4-LLVM-NEXT: Name: <?>
# ERR4-LLVM-NEXT: Offset: 0x0
# ERR4-LLVM-NEXT: Size: 0xFFFF0000
+# ERR4-LLVM-NEXT: Notes [
# ERR4-LLVM-NEXT: warning: '[[FILE]]': unable to read notes from the PT_NOTE segment with index 0: invalid offset (0x0) or size (0xffff0000)
+# ERR4-LLVM-NEXT: ]
# ERR4-LLVM-NEXT: }
# ERR4-LLVM-NEXT: ]
@@ -247,6 +265,6 @@ ProgramHeaders:
# PHENTSIZE-WARN-GNU: warning: '[[FILE]]': unable to read program headers to locate the PT_DYNAMIC segment: invalid e_phentsize: 1
# PHENTSIZE-WARN-GNU: warning: '[[FILE]]': unable to read program headers to locate the PT_NOTE segment: invalid e_phentsize: 1
-# PHENTSIZE-WARN-LLVM: Notes [
+# PHENTSIZE-WARN-LLVM: NoteSections [
# PHENTSIZE-WARN-LLVM-NEXT: warning: '[[FILE]]': unable to read program headers to locate the PT_NOTE segment: invalid e_phentsize: 1
# PHENTSIZE-WARN-LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-amd-invalid-v2.test b/llvm/test/tools/llvm-readobj/ELF/note-amd-invalid-v2.test
index 778724b8ab6ce..a9b099ca48ace 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-amd-invalid-v2.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-amd-invalid-v2.test
@@ -5,105 +5,123 @@
# RUN: llvm-readobj --notes %t.o | FileCheck %s --match-full-lines --check-prefix=LLVM
# RUN: llvm-readelf --notes %t.o | FileCheck %s --match-full-lines --check-prefix=GNU
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_code_object_version_0
# LLVM-NEXT: Offset: 0x40
# LLVM-NEXT: Size: 0x14
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x4
# LLVM-NEXT: Type: NT_AMD_HSA_CODE_OBJECT_VERSION (AMD HSA Code Object Version)
# LLVM-NEXT: AMD HSA Code Object Version: Invalid AMD HSA Code Object Version
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_code_object_version_1
# LLVM-NEXT: Offset: 0x54
# LLVM-NEXT: Size: 0x1C
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0xC
# LLVM-NEXT: Type: NT_AMD_HSA_CODE_OBJECT_VERSION (AMD HSA Code Object Version)
# LLVM-NEXT: AMD HSA Code Object Version: Invalid AMD HSA Code Object Version
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_hsail_0
# LLVM-NEXT: Offset: 0x70
# LLVM-NEXT: Size: 0x1C
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0xA
# LLVM-NEXT: Type: NT_AMD_HSA_HSAIL (AMD HSA HSAIL Properties)
# LLVM-NEXT: AMD HSA HSAIL Properties: Invalid AMD HSA HSAIL Properties
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_hsail_1
# LLVM-NEXT: Offset: 0x8C
# LLVM-NEXT: Size: 0x24
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x14
# LLVM-NEXT: Type: NT_AMD_HSA_HSAIL (AMD HSA HSAIL Properties)
# LLVM-NEXT: AMD HSA HSAIL Properties: Invalid AMD HSA HSAIL Properties
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_isa_version_0
# LLVM-NEXT: Offset: 0xB0
# LLVM-NEXT: Size: 0x18
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x8
# LLVM-NEXT: Type: NT_AMD_HSA_ISA_VERSION (AMD HSA ISA Version)
# LLVM-NEXT: AMD HSA ISA Version: Invalid AMD HSA ISA Version
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_isa_version_1
# LLVM-NEXT: Offset: 0xC8
# LLVM-NEXT: Size: 0x28
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x17
# LLVM-NEXT: Type: NT_AMD_HSA_ISA_VERSION (AMD HSA ISA Version)
# LLVM-NEXT: AMD HSA ISA Version: Invalid AMD HSA ISA Version
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_isa_version_2
# LLVM-NEXT: Offset: 0xF0
# LLVM-NEXT: Size: 0x28
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x17
# LLVM-NEXT: Type: NT_AMD_HSA_ISA_VERSION (AMD HSA ISA Version)
# LLVM-NEXT: AMD HSA ISA Version: Invalid AMD HSA ISA Version
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_isa_version_3
# LLVM-NEXT: Offset: 0x118
# LLVM-NEXT: Size: 0x28
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x17
# LLVM-NEXT: Type: NT_AMD_HSA_ISA_VERSION (AMD HSA ISA Version)
# LLVM-NEXT: AMD HSA ISA Version: Invalid AMD HSA ISA Version
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_pal_metadata
# LLVM-NEXT: Offset: 0x140
# LLVM-NEXT: Size: 0x14
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x4
# LLVM-NEXT: Type: NT_AMD_PAL_METADATA (AMD PAL Metadata)
# LLVM-NEXT: AMD PAL Metadata: Invalid AMD PAL Metadata
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-amd-invalid-v3.test b/llvm/test/tools/llvm-readobj/ELF/note-amd-invalid-v3.test
index dd090b9483e29..48d4f3ee4ce85 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-amd-invalid-v3.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-amd-invalid-v3.test
@@ -5,12 +5,13 @@
# RUN: llvm-readobj --notes %t.o | FileCheck %s --match-full-lines --check-prefix=LLVM
# RUN: llvm-readelf --notes %t.o | FileCheck %s --match-full-lines --check-prefix=GNU
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amdgpu_metadata
# LLVM-NEXT: Offset: 0x40
# LLVM-NEXT: Size: 0x38
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMDGPU
# LLVM-NEXT: Data size: 0x24
# LLVM-NEXT: Type: NT_AMDGPU_METADATA (AMDGPU Metadata)
@@ -21,6 +22,7 @@
# LLVM-NEXT: ...
# LLVM-EMPTY:
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-amd-valid-v2.test b/llvm/test/tools/llvm-readobj/ELF/note-amd-valid-v2.test
index 3af1bb2acafdf..894a8993f135e 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-amd-valid-v2.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-amd-valid-v2.test
@@ -5,94 +5,110 @@
# RUN: llvm-readobj --notes %t.o | FileCheck %s --match-full-lines --check-prefix=LLVM
# RUN: llvm-readelf --notes %t.o | FileCheck %s --match-full-lines --check-prefix=GNU
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_code_object_version
# LLVM-NEXT: Offset: 0x40
# LLVM-NEXT: Size: 0x18
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x8
# LLVM-NEXT: Type: NT_AMD_HSA_CODE_OBJECT_VERSION (AMD HSA Code Object Version)
# LLVM-NEXT: AMD HSA Code Object Version: [Major: 2, Minor: 1]
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_hsail
# LLVM-NEXT: Offset: 0x58
# LLVM-NEXT: Size: 0x1C
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0xC
# LLVM-NEXT: Type: NT_AMD_HSA_HSAIL (AMD HSA HSAIL Properties)
# LLVM-NEXT: AMD HSA HSAIL Properties: [HSAIL Major: 2, HSAIL Minor: 1, Profile: 1, Machine Model: 2, Default Float Round: 3]
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_isa_version
# LLVM-NEXT: Offset: 0x74
# LLVM-NEXT: Size: 0x2C
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x1B
# LLVM-NEXT: Type: NT_AMD_HSA_ISA_VERSION (AMD HSA ISA Version)
# LLVM-NEXT: AMD HSA ISA Version: [Vendor: AMD, Architecture: AMDGPU, Major: 8, Minor: 0, Stepping: 2]
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_metadata_0
# LLVM-NEXT: Offset: 0xA0
# LLVM-NEXT: Size: 0x10
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x0
# LLVM-NEXT: Type: NT_AMD_HSA_METADATA (AMD HSA Metadata)
# LLVM-NEXT: AMD HSA Metadata:
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_metadata_1
# LLVM-NEXT: Offset: 0xB0
# LLVM-NEXT: Size: 0x18
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x6
# LLVM-NEXT: Type: NT_AMD_HSA_METADATA (AMD HSA Metadata)
# LLVM-NEXT: AMD HSA Metadata: abcde
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_isa_name_0
# LLVM-NEXT: Offset: 0xC8
# LLVM-NEXT: Size: 0x10
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x0
# LLVM-NEXT: Type: NT_AMD_HSA_ISA_NAME (AMD HSA ISA Name)
# LLVM-NEXT: AMD HSA ISA Name:
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_hsa_isa_name_1
# LLVM-NEXT: Offset: 0xD8
# LLVM-NEXT: Size: 0x18
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x6
# LLVM-NEXT: Type: NT_AMD_HSA_ISA_NAME (AMD HSA ISA Name)
# LLVM-NEXT: AMD HSA ISA Name: abcdef
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.nt_amd_pal_metadata
# LLVM-NEXT: Offset: 0xF0
# LLVM-NEXT: Size: 0x28
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMD
# LLVM-NEXT: Data size: 0x18
# LLVM-NEXT: Type: NT_AMD_PAL_METADATA (AMD PAL Metadata)
# LLVM-NEXT: AMD PAL Metadata: [2: 1][4: 2][8: 4]
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-amd-valid-v3.s b/llvm/test/tools/llvm-readobj/ELF/note-amd-valid-v3.s
index 73022c3c89f78..2397bf7e43ec5 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-amd-valid-v3.s
+++ b/llvm/test/tools/llvm-readobj/ELF/note-amd-valid-v3.s
@@ -6,12 +6,13 @@
# RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=obj < %s | llvm-readobj --notes - | FileCheck %s --match-full-lines --check-prefix=LLVM
# RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=obj < %s | llvm-readelf --notes - | FileCheck %s --match-full-lines --check-prefix=GNU
-#LLVM: Notes [
+#LLVM: NoteSections [
#LLVM-NEXT: NoteSection {
#LLVM-NEXT: Name: .note
#LLVM-NEXT: Offset: 0x40
#LLVM-NEXT: Size: 0x110
-#LLVM-NEXT: Note {
+#LLVM-NEXT: Notes [
+#LLVM-NEXT: {
#LLVM-NEXT: Owner: AMDGPU
#LLVM-NEXT: Data size: 0xFC
#LLVM-NEXT: Type: NT_AMDGPU_METADATA (AMDGPU Metadata)
@@ -33,6 +34,7 @@
#LLVM-NEXT: ...
#LLVM-EMPTY:
#LLVM-NEXT: }
+#LLVM-NEXT: ]
#LLVM-NEXT: }
#LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-amd.s b/llvm/test/tools/llvm-readobj/ELF/note-amd.s
index 260be3a725af7..0649e839f880e 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-amd.s
+++ b/llvm/test/tools/llvm-readobj/ELF/note-amd.s
@@ -34,57 +34,64 @@
// GNU-NEXT: description data: 61 62 63 64 65 66 00
// GNU-EMPTY:
-// LLVM: Notes [
+// LLVM: NoteSections [
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.no.desc
// LLVM-NEXT: Offset:
// LLVM-NEXT: Size:
-// LLVM-NEXT: Note {
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
// LLVM-NEXT: Owner: AMD
// LLVM-NEXT: Data size: 0x0
// LLVM-NEXT: Type: NT_AMD_HSA_METADATA (AMD HSA Metadata)
// LLVM-NEXT: AMD HSA Metadata:
// LLVM-NEXT: }
-// LLVM-NEXT: Note {
+// LLVM-NEXT: {
// LLVM-NEXT: Owner: AMD
// LLVM-NEXT: Data size: 0x0
// LLVM-NEXT: Type: NT_AMD_HSA_ISA_NAME (AMD HSA ISA Name)
// LLVM-NEXT: AMD HSA ISA Name:
// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.desc
// LLVM-NEXT: Offset:
// LLVM-NEXT: Size:
-// LLVM-NEXT: Note {
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
// LLVM-NEXT: Owner: AMD
// LLVM-NEXT: Data size: 0xA
// LLVM-NEXT: Type: NT_AMD_HSA_METADATA (AMD HSA Metadata)
// LLVM-NEXT: AMD HSA Metadata: meta_blah
// LLVM-NEXT: }
-// LLVM-NEXT: Note {
+// LLVM-NEXT: {
// LLVM-NEXT: Owner: AMD
// LLVM-NEXT: Data size: 0x9
// LLVM-NEXT: Type: NT_AMD_HSA_ISA_NAME (AMD HSA ISA Name)
// LLVM-NEXT: AMD HSA ISA Name: isa_blah
// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.other
// LLVM-NEXT: Offset:
// LLVM-NEXT: Size:
-// LLVM-NEXT: Note {
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
// LLVM-NEXT: Owner: AMD
// LLVM-NEXT: Data size: 0x0
// LLVM-NEXT: Type: NT_AMD_PAL_METADATA (AMD PAL Metadata)
// LLVM-NEXT: AMD PAL Metadata:
// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.unknown
// LLVM-NEXT: Offset:
// LLVM-NEXT: Size:
-// LLVM-NEXT: Note {
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
// LLVM-NEXT: Owner: AMD
// LLVM-NEXT: Data size: 0x7
// LLVM-NEXT: Type: Unknown (0x000004d2)
@@ -92,6 +99,7 @@
// LLVM-NEXT: 0000: 61626364 656600 |abcdef.|
// LLVM-NEXT: )
// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-amdgpu-invalid.s b/llvm/test/tools/llvm-readobj/ELF/note-amdgpu-invalid.s
index 0ed791c30954e..39f3e4b4e4623 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-amdgpu-invalid.s
+++ b/llvm/test/tools/llvm-readobj/ELF/note-amdgpu-invalid.s
@@ -30,12 +30,13 @@
# GNU-NEXT: description data: ab cd ef
# GNU-EMPTY:
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.foo
# LLVM-NEXT: Offset: 0x40
# LLVM-NEXT: Size: 0xE8
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMDGPU
# LLVM-NEXT: Data size: 0xD4
# LLVM-NEXT: Type: NT_AMDGPU_METADATA (AMDGPU Metadata)
@@ -55,12 +56,14 @@
# LLVM-NEXT: ...
# LLVM-EMPTY:
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.bar
# LLVM-NEXT: Offset: 0x128
# LLVM-NEXT: Size: 0x30
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMDGPU
# LLVM-NEXT: Data size: 0x3
# LLVM-NEXT: Type: NT_AMDGPU_METADATA (AMDGPU Metadata)
@@ -68,7 +71,7 @@
# LLVM-NEXT: 0000: 123456 |.4V|
# LLVM-NEXT: )
# LLVM-NEXT: }
-# LLVM-NEXT: Note {
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMDGPU
# LLVM-NEXT: Data size: 0x3
# LLVM-NEXT: Type: NT_AMDGPU_METADATA (AMDGPU Metadata)
@@ -76,6 +79,7 @@
# LLVM-NEXT: 0000: ABCDEF |...|
# LLVM-NEXT: )
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT:]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-amdgpu.test b/llvm/test/tools/llvm-readobj/ELF/note-amdgpu.test
index 5d4f41ced9b16..9f86b07c40b4b 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-amdgpu.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-amdgpu.test
@@ -31,12 +31,13 @@
# GNU-NEXT: description data: ab cd
# GNU-EMPTY:
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.foo
# LLVM-NEXT: Offset:
# LLVM-NEXT: Size:
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMDGPU
# LLVM-NEXT: Data size: 0xFB
# LLVM-NEXT: Type: NT_AMDGPU_METADATA (AMDGPU Metadata)
@@ -59,12 +60,14 @@
# LLVM-NEXT: ...
# LLVM-EMPTY:
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.unknown
# LLVM-NEXT: Offset: 0x150
# LLVM-NEXT: Size: 0x18
-# LLVM-NEXT: Note {
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
# LLVM-NEXT: Owner: AMDGPU
# LLVM-NEXT: Data size: 0x2
# LLVM-NEXT: Type: Unknown (0x00000101)
@@ -72,6 +75,7 @@
# LLVM-NEXT: 0000: ABCD |..|
# LLVM-NEXT: )
# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-core-ntfile.test b/llvm/test/tools/llvm-readobj/ELF/note-core-ntfile.test
index f4957b42a8778..c318234c357c6 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-core-ntfile.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-core-ntfile.test
@@ -63,72 +63,76 @@ ProgramHeaders:
# GNU-NEXT: [stack]
# GNU-EMPTY:
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: <?>
# LLVM-NEXT: Offset:
# LLVM-NEXT: Size:
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: CORE
-# LLVM-NEXT: Data size: 0x80
-# LLVM-NEXT: Type: NT_FILE (mapped files)
-# LLVM-NEXT: Page Size: 4096
-# LLVM-NEXT: Mappings [
+# LLVM-NEXT: Notes [
# LLVM-NEXT: {
-# LLVM-NEXT: Start: 0x1000
-# LLVM-NEXT: End: 0x2000
-# LLVM-NEXT: Offset: 0x3000
-# LLVM-NEXT: Filename: /path/to/a.out
+# LLVM-NEXT: Owner: CORE
+# LLVM-NEXT: Data size: 0x80
+# LLVM-NEXT: Type: NT_FILE (mapped files)
+# LLVM-NEXT: Page Size: 4096
+# LLVM-NEXT: Mappings [
+# LLVM-NEXT: {
+# LLVM-NEXT: Start: 0x1000
+# LLVM-NEXT: End: 0x2000
+# LLVM-NEXT: Offset: 0x3000
+# LLVM-NEXT: Filename: /path/to/a.out
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Start: 0x4000
+# LLVM-NEXT: End: 0x5000
+# LLVM-NEXT: Offset: 0x6000
+# LLVM-NEXT: Filename: /path/to/libc.so
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Start: 0x7000
+# LLVM-NEXT: End: 0x8000
+# LLVM-NEXT: Offset: 0x9000
+# LLVM-NEXT: Filename: [stack]
+# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
-# LLVM-NEXT: {
-# LLVM-NEXT: Start: 0x4000
-# LLVM-NEXT: End: 0x5000
-# LLVM-NEXT: Offset: 0x6000
-# LLVM-NEXT: Filename: /path/to/libc.so
-# LLVM-NEXT: }
-# LLVM-NEXT: {
-# LLVM-NEXT: Start: 0x7000
-# LLVM-NEXT: End: 0x8000
-# LLVM-NEXT: Offset: 0x9000
-# LLVM-NEXT: Filename: [stack]
-# LLVM-NEXT: }
-# LLVM-NEXT: ]
-# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
-# JSON: "Notes": [
+# JSON: "NoteSections": [
# JSON-NEXT: {
# JSON-NEXT: "NoteSection": {
# JSON-NEXT: "Name": "<?>",
# JSON-NEXT: "Offset": 120,
# JSON-NEXT: "Size": 148,
-# JSON-NEXT: "Note": {
-# JSON-NEXT: "Owner": "CORE",
-# JSON-NEXT: "Data size": 128,
-# JSON-NEXT: "Type": "NT_FILE (mapped files)",
-# JSON-NEXT: "Page Size": 4096,
-# JSON-NEXT: "Mappings": [
-# JSON-NEXT: {
-# JSON-NEXT: "Start": 4096,
-# JSON-NEXT: "End": 8192,
-# JSON-NEXT: "Offset": 12288,
-# JSON-NEXT: "Filename": "/path/to/a.out"
-# JSON-NEXT: },
-# JSON-NEXT: {
-# JSON-NEXT: "Start": 16384,
-# JSON-NEXT: "End": 20480,
-# JSON-NEXT: "Offset": 24576,
-# JSON-NEXT: "Filename": "/path/to/libc.so"
-# JSON-NEXT: },
-# JSON-NEXT: {
-# JSON-NEXT: "Start": 28672,
-# JSON-NEXT: "End": 32768,
-# JSON-NEXT: "Offset": 36864,
-# JSON-NEXT: "Filename": "[stack]"
-# JSON-NEXT: }
-# JSON-NEXT: ]
-# JSON-NEXT: }
+# JSON-NEXT: "Notes": [
+# JSON-NEXT: {
+# JSON-NEXT: "Owner": "CORE",
+# JSON-NEXT: "Data size": 128,
+# JSON-NEXT: "Type": "NT_FILE (mapped files)",
+# JSON-NEXT: "Page Size": 4096,
+# JSON-NEXT: "Mappings": [
+# JSON-NEXT: {
+# JSON-NEXT: "Start": 4096,
+# JSON-NEXT: "End": 8192,
+# JSON-NEXT: "Offset": 12288,
+# JSON-NEXT: "Filename": "/path/to/a.out"
+# JSON-NEXT: },
+# JSON-NEXT: {
+# JSON-NEXT: "Start": 16384,
+# JSON-NEXT: "End": 20480,
+# JSON-NEXT: "Offset": 24576,
+# JSON-NEXT: "Filename": "/path/to/libc.so"
+# JSON-NEXT: },
+# JSON-NEXT: {
+# JSON-NEXT: "Start": 28672,
+# JSON-NEXT: "End": 32768,
+# JSON-NEXT: "Offset": 36864,
+# JSON-NEXT: "Filename": "[stack]"
+# JSON-NEXT: }
+# JSON-NEXT: ]
+# JSON-NEXT: }
+# JSON-NEXT: ]
# JSON-NEXT: }
# JSON-NEXT: }
# JSON-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-core.test b/llvm/test/tools/llvm-readobj/ELF/note-core.test
index 84ec96b1702a9..395b3fecccf5f 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-core.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-core.test
@@ -284,11 +284,14 @@
# CHECK-GNU-NEXT: CORE 0x00000000 [[DESC]]
# CHECK-GNU-EMPTY:
-# CHECK-LLVM: Note {
+# CHECK-LLVM: Size: 0x14
+# CHECK-LLVM-NEXT: Notes [
+# CHECK-LLVM-NEXT: {
# CHECK-LLVM-NEXT: Owner: CORE
# CHECK-LLVM-NEXT: Data size: 0x0
# CHECK-LLVM-NEXT: Type: [[DESC]]
-# CHECK-LLVM-NEXT: }
+# CHECK-LLVM-NEXT: }
+# CHECK-LLVM-NEXT: ]
--- !ELF
FileHeader:
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-freebsd-core.test b/llvm/test/tools/llvm-readobj/ELF/note-freebsd-core.test
index 438c278de68ad..200ac1eac9936 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-freebsd-core.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-freebsd-core.test
@@ -68,78 +68,82 @@ ProgramHeaders:
# GNU-NEXT: description data: aa bb cc dd ee ff
# GNU-EMPTY:
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: <?>
# LLVM-NEXT: Offset: 0xB0
# LLVM-NEXT: Size: 0xDC
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_THRMISC (thrmisc structure)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_PROCSTAT_PROC (proc data)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_PROCSTAT_FILES (files data)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_PROCSTAT_VMMAP (vmmap data)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_PROCSTAT_GROUPS (groups data)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_PROCSTAT_UMASK (umask data)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_PROCSTAT_RLIMIT (rlimit data)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_PROCSTAT_OSREL (osreldate data)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_PROCSTAT_PSSTRINGS (ps_strings data)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_PROCSTAT_AUXV (auxv data)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: Unknown (0x00012345)
-# LLVM-NEXT: }
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_THRMISC (thrmisc structure)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_PROCSTAT_PROC (proc data)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_PROCSTAT_FILES (files data)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_PROCSTAT_VMMAP (vmmap data)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_PROCSTAT_GROUPS (groups data)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_PROCSTAT_UMASK (umask data)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_PROCSTAT_RLIMIT (rlimit data)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_PROCSTAT_OSREL (osreldate data)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_PROCSTAT_PSSTRINGS (ps_strings data)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_PROCSTAT_AUXV (auxv data)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: Unknown (0x00012345)
+# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: <?>
# LLVM-NEXT: Offset: 0x18C
# LLVM-NEXT: Size: 0x1C
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x6
-# LLVM-NEXT: Type: NT_PRPSINFO (prpsinfo structure)
-# LLVM-NEXT: Description data (
-# LLVM-NEXT: 0000: AABBCCDD EEFF |......|
-# LLVM-NEXT: )
-# LLVM-NEXT: }
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x6
+# LLVM-NEXT: Type: NT_PRPSINFO (prpsinfo structure)
+# LLVM-NEXT: Description data (
+# LLVM-NEXT: 0000: AABBCCDD EEFF |......|
+# LLVM-NEXT: )
+# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-freebsd.test b/llvm/test/tools/llvm-readobj/ELF/note-freebsd.test
index 937cb6f426b1a..f079ac8b4c387 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-freebsd.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-freebsd.test
@@ -61,68 +61,70 @@ Sections:
# GNU-NEXT: description data: 61 62 63 64 65 66
# GNU-EMPTY:
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: .note.tag
# LLVM-NEXT: Offset: 0x40
# LLVM-NEXT: Size: 0xCC
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x4
-# LLVM-NEXT: Type: NT_FREEBSD_ABI_TAG (ABI version tag)
-# LLVM-NEXT: ABI tag: 1300076
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x1
-# LLVM-NEXT: Type: NT_FREEBSD_ABI_TAG (ABI version tag)
-# LLVM-NEXT: Description data (
-# LLVM-NEXT: 0000: 6C |l|
-# LLVM-NEXT: )
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x7
-# LLVM-NEXT: Type: NT_FREEBSD_ARCH_TAG (architecture tag)
-# LLVM-NEXT: Arch tag: aarch64
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x4
-# LLVM-NEXT: Type: NT_FREEBSD_FEATURE_CTL (FreeBSD feature control)
-# LLVM-NEXT: Feature flags: ASLR_DISABLE PROTMAX_DISABLE STKGAP_DISABLE WXNEEDED LA48 ASG_DISABLE (0xFFFFFFFF)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x1
-# LLVM-NEXT: Type: NT_FREEBSD_FEATURE_CTL (FreeBSD feature control)
-# LLVM-NEXT: Description data (
-# LLVM-NEXT: 0000: 00 |.|
-# LLVM-NEXT: )
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x4
-# LLVM-NEXT: Type: NT_FREEBSD_NOINIT_TAG (no .init tag)
-# LLVM-NEXT: Description data (
-# LLVM-NEXT: 0000: 00000000 |....|
-# LLVM-NEXT: )
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x6
-# LLVM-NEXT: Type: Unknown (0x00abcdef)
-# LLVM-NEXT: Description data (
-# LLVM-NEXT: 0000: 61626364 6566 |abcdef|
-# LLVM-NEXT: )
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: FreeBSD
-# LLVM-NEXT: Data size: 0x6
-# LLVM-NEXT: Type: Unknown (0x0000000d)
-# LLVM-NEXT: Description data (
-# LLVM-NEXT: 0000: 61626364 6566 |abcdef|
-# LLVM-NEXT: )
-# LLVM-NEXT: }
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x4
+# LLVM-NEXT: Type: NT_FREEBSD_ABI_TAG (ABI version tag)
+# LLVM-NEXT: ABI tag: 1300076
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x1
+# LLVM-NEXT: Type: NT_FREEBSD_ABI_TAG (ABI version tag)
+# LLVM-NEXT: Description data (
+# LLVM-NEXT: 0000: 6C |l|
+# LLVM-NEXT: )
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x7
+# LLVM-NEXT: Type: NT_FREEBSD_ARCH_TAG (architecture tag)
+# LLVM-NEXT: Arch tag: aarch64
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x4
+# LLVM-NEXT: Type: NT_FREEBSD_FEATURE_CTL (FreeBSD feature control)
+# LLVM-NEXT: Feature flags: ASLR_DISABLE PROTMAX_DISABLE STKGAP_DISABLE WXNEEDED LA48 ASG_DISABLE (0xFFFFFFFF)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x1
+# LLVM-NEXT: Type: NT_FREEBSD_FEATURE_CTL (FreeBSD feature control)
+# LLVM-NEXT: Description data (
+# LLVM-NEXT: 0000: 00 |.|
+# LLVM-NEXT: )
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x4
+# LLVM-NEXT: Type: NT_FREEBSD_NOINIT_TAG (no .init tag)
+# LLVM-NEXT: Description data (
+# LLVM-NEXT: 0000: 00000000 |....|
+# LLVM-NEXT: )
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x6
+# LLVM-NEXT: Type: Unknown (0x00abcdef)
+# LLVM-NEXT: Description data (
+# LLVM-NEXT: 0000: 61626364 6566 |abcdef|
+# LLVM-NEXT: )
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: FreeBSD
+# LLVM-NEXT: Data size: 0x6
+# LLVM-NEXT: Type: Unknown (0x0000000d)
+# LLVM-NEXT: Description data (
+# LLVM-NEXT: 0000: 61626364 6566 |abcdef|
+# LLVM-NEXT: )
+# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT:]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-generic.s b/llvm/test/tools/llvm-readobj/ELF/note-generic.s
index 59df75e3ee19c..9f06d3b267d3e 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-generic.s
+++ b/llvm/test/tools/llvm-readobj/ELF/note-generic.s
@@ -20,46 +20,54 @@
// GNU-NEXT: Owner Data size Description
// GNU-NEXT: XYZ 0x00000000 func
-// LLVM: Notes [
+// LLVM: NoteSections [
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.version
// LLVM-NEXT: Offset: 0x40
// LLVM-NEXT: Size: 0x10
-// LLVM-NEXT: Note {
-// LLVM-NEXT: Owner: XYZ
-// LLVM-NEXT: Data size: 0x0
-// LLVM-NEXT: Type: NT_VERSION (version)
-// LLVM-NEXT: }
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
+// LLVM-NEXT: Owner: XYZ
+// LLVM-NEXT: Data size: 0x0
+// LLVM-NEXT: Type: NT_VERSION (version)
+// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.arch
// LLVM-NEXT: Offset: 0x50
// LLVM-NEXT: Size: 0x10
-// LLVM-NEXT: Note {
-// LLVM-NEXT: Owner: XYZ
-// LLVM-NEXT: Data size: 0x0
-// LLVM-NEXT: Type: NT_ARCH (architecture)
-// LLVM-NEXT: }
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
+// LLVM-NEXT: Owner: XYZ
+// LLVM-NEXT: Data size: 0x0
+// LLVM-NEXT: Type: NT_ARCH (architecture)
+// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.open
// LLVM-NEXT: Offset: 0x60
// LLVM-NEXT: Size: 0x10
-// LLVM-NEXT: Note {
-// LLVM-NEXT: Owner: XYZ
-// LLVM-NEXT: Data size: 0x0
-// LLVM-NEXT: Type: OPEN
-// LLVM-NEXT: }
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
+// LLVM-NEXT: Owner: XYZ
+// LLVM-NEXT: Data size: 0x0
+// LLVM-NEXT: Type: OPEN
+// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.func
// LLVM-NEXT: Offset: 0x70
// LLVM-NEXT: Size: 0x10
-// LLVM-NEXT: Note {
-// LLVM-NEXT: Owner: XYZ
-// LLVM-NEXT: Data size: 0x0
-// LLVM-NEXT: Type: func
-// LLVM-NEXT: }
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
+// LLVM-NEXT: Owner: XYZ
+// LLVM-NEXT: Data size: 0x0
+// LLVM-NEXT: Type: func
+// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-gnu-property.s b/llvm/test/tools/llvm-readobj/ELF/note-gnu-property.s
index 2d0d00f606391..59825266b48f2 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-gnu-property.s
+++ b/llvm/test/tools/llvm-readobj/ELF/note-gnu-property.s
@@ -24,35 +24,37 @@
// GNU-NEXT: x86 feature: IBT, <unknown flags: 0xf000f000>
// GNU-NEXT: <corrupt type (0x2) datasz: 0x1>
-// LLVM: Notes [
+// LLVM: NoteSections [
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.gnu.property
// LLVM-NEXT: Offset: 0x40
// LLVM-NEXT: Size: 0xF8
-// LLVM-NEXT: Note {
-// LLVM-NEXT: Owner: GNU
-// LLVM-NEXT: Data size: 0xE8
-// LLVM-NEXT: Type: NT_GNU_PROPERTY_TYPE_0 (property note)
-// LLVM-NEXT: Property [
-// LLVM-NEXT: stack size: 0x100
-// LLVM-NEXT: stack size: 0x100
-// LLVM-NEXT: no copy on protected
-// LLVM-NEXT: x86 feature: SHSTK
-// LLVM-NEXT: x86 feature: IBT, SHSTK
-// LLVM-NEXT: x86 feature: <None>
-// LLVM-NEXT: x86 feature needed: x86, x87, MMX, XMM, YMM
-// LLVM-NEXT: x86 feature used: ZMM, FXSR, XSAVE, XSAVEOPT, XSAVEC
-// LLVM-NEXT: x86 ISA needed: x86-64-baseline, x86-64-v2, x86-64-v3, x86-64-v4
-// LLVM-NEXT: x86 ISA used: x86-64-baseline, x86-64-v2, x86-64-v3, x86-64-v4
-// LLVM-NEXT: <application-specific type 0xfefefefe>
-// LLVM-NEXT: stack size: <corrupt length: 0x0>
-// LLVM-NEXT: stack size: <corrupt length: 0x4>
-// LLVM-NEXT: no copy on protected <corrupt length: 0x1>
-// LLVM-NEXT: x86 feature: <corrupt length: 0x0>
-// LLVM-NEXT: x86 feature: IBT, <unknown flags: 0xf000f000>
-// LLVM-NEXT: <corrupt type (0x2) datasz: 0x1>
-// LLVM-NEXT: ]
-// LLVM-NEXT: }
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
+// LLVM-NEXT: Owner: GNU
+// LLVM-NEXT: Data size: 0xE8
+// LLVM-NEXT: Type: NT_GNU_PROPERTY_TYPE_0 (property note)
+// LLVM-NEXT: Property [
+// LLVM-NEXT: stack size: 0x100
+// LLVM-NEXT: stack size: 0x100
+// LLVM-NEXT: no copy on protected
+// LLVM-NEXT: x86 feature: SHSTK
+// LLVM-NEXT: x86 feature: IBT, SHSTK
+// LLVM-NEXT: x86 feature: <None>
+// LLVM-NEXT: x86 feature needed: x86, x87, MMX, XMM, YMM
+// LLVM-NEXT: x86 feature used: ZMM, FXSR, XSAVE, XSAVEOPT, XSAVEC
+// LLVM-NEXT: x86 ISA needed: x86-64-baseline, x86-64-v2, x86-64-v3, x86-64-v4
+// LLVM-NEXT: x86 ISA used: x86-64-baseline, x86-64-v2, x86-64-v3, x86-64-v4
+// LLVM-NEXT: <application-specific type 0xfefefefe>
+// LLVM-NEXT: stack size: <corrupt length: 0x0>
+// LLVM-NEXT: stack size: <corrupt length: 0x4>
+// LLVM-NEXT: no copy on protected <corrupt length: 0x1>
+// LLVM-NEXT: x86 feature: <corrupt length: 0x0>
+// LLVM-NEXT: x86 feature: IBT, <unknown flags: 0xf000f000>
+// LLVM-NEXT: <corrupt type (0x2) datasz: 0x1>
+// LLVM-NEXT: ]
+// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-gnu-property2.s b/llvm/test/tools/llvm-readobj/ELF/note-gnu-property2.s
index 5ac35d45725c0..2e342a05ca28f 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-gnu-property2.s
+++ b/llvm/test/tools/llvm-readobj/ELF/note-gnu-property2.s
@@ -8,19 +8,21 @@
// GNU-NEXT: GNU 0x00000004 NT_GNU_PROPERTY_TYPE_0 (property note)
// GNU-NEXT: Properties: <corrupted GNU_PROPERTY_TYPE_0>
-// LLVM: Notes [
+// LLVM: NoteSections [
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.gnu.property
// LLVM-NEXT: Offset: 0x40
// LLVM-NEXT: Size: 0x18
-// LLVM-NEXT: Note {
-// LLVM-NEXT: Owner: GNU
-// LLVM-NEXT: Data size: 0x4
-// LLVM-NEXT: Type: NT_GNU_PROPERTY_TYPE_0 (property note)
-// LLVM-NEXT: Property [
-// LLVM-NEXT: <corrupted GNU_PROPERTY_TYPE_0>
-// LLVM-NEXT: ]
-// LLVM-NEXT: }
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
+// LLVM-NEXT: Owner: GNU
+// LLVM-NEXT: Data size: 0x4
+// LLVM-NEXT: Type: NT_GNU_PROPERTY_TYPE_0 (property note)
+// LLVM-NEXT: Property [
+// LLVM-NEXT: <corrupted GNU_PROPERTY_TYPE_0>
+// LLVM-NEXT: ]
+// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-llvmompoffload.test b/llvm/test/tools/llvm-readobj/ELF/note-llvmompoffload.test
index 050ef9a3f465c..d00b5b4b333d0 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-llvmompoffload.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-llvmompoffload.test
@@ -13,29 +13,31 @@
# RUN: llvm-readobj --notes %t.32be | FileCheck %s --match-full-lines --check-prefix=NOTES
# RUN: llvm-readelf --notes %t.32be | FileCheck %s --match-full-lines --check-prefix=NOTES-GNU
-# NOTES: Notes [
+# NOTES: NoteSections [
# NOTES-NEXT: NoteSection {
# NOTES-NEXT: Name: .note.openmp
# NOTES-NEXT: Offset: {{.*}}
# NOTES-NEXT: Size: {{.*}}
-# NOTES-NEXT: Note {
-# NOTES-NEXT: Owner: LLVMOMPOFFLOAD
-# NOTES-NEXT: Data size: 0x3
-# NOTES-NEXT: Type: NT_LLVM_OPENMP_OFFLOAD_VERSION (image format version)
-# NOTES-NEXT: Version: 1.0
-# NOTES-NEXT: }
-# NOTES-NEXT: Note {
-# NOTES-NEXT: Owner: LLVMOMPOFFLOAD
-# NOTES-NEXT: Data size: 0x4
-# NOTES-NEXT: Type: NT_LLVM_OPENMP_OFFLOAD_PRODUCER (producing toolchain)
-# NOTES-NEXT: Producer: LLVM
-# NOTES-NEXT: }
-# NOTES-NEXT: Note {
-# NOTES-NEXT: Owner: LLVMOMPOFFLOAD
-# NOTES-NEXT: Data size: 0x9
-# NOTES-NEXT: Type: NT_LLVM_OPENMP_OFFLOAD_PRODUCER_VERSION (producing toolchain version)
-# NOTES-NEXT: Producer version: 13.0.0git
-# NOTES-NEXT: }
+# NOTES-NEXT: Notes [
+# NOTES-NEXT: {
+# NOTES-NEXT: Owner: LLVMOMPOFFLOAD
+# NOTES-NEXT: Data size: 0x3
+# NOTES-NEXT: Type: NT_LLVM_OPENMP_OFFLOAD_VERSION (image format version)
+# NOTES-NEXT: Version: 1.0
+# NOTES-NEXT: }
+# NOTES-NEXT: {
+# NOTES-NEXT: Owner: LLVMOMPOFFLOAD
+# NOTES-NEXT: Data size: 0x4
+# NOTES-NEXT: Type: NT_LLVM_OPENMP_OFFLOAD_PRODUCER (producing toolchain)
+# NOTES-NEXT: Producer: LLVM
+# NOTES-NEXT: }
+# NOTES-NEXT: {
+# NOTES-NEXT: Owner: LLVMOMPOFFLOAD
+# NOTES-NEXT: Data size: 0x9
+# NOTES-NEXT: Type: NT_LLVM_OPENMP_OFFLOAD_PRODUCER_VERSION (producing toolchain version)
+# NOTES-NEXT: Producer version: 13.0.0git
+# NOTES-NEXT: }
+# NOTES-NEXT: ]
# NOTES-NEXT: }
# NOTES-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-multiple-sections.test b/llvm/test/tools/llvm-readobj/ELF/note-multiple-sections.test
new file mode 100644
index 0000000000000..868ccac7fc59c
--- /dev/null
+++ b/llvm/test/tools/llvm-readobj/ELF/note-multiple-sections.test
@@ -0,0 +1,39 @@
+## Test that note values are interpreted correctly for files with multiple sections.
+
+## Check NT_PRSTATUS + NT_PRPSINFO.
+# RUN: yaml2obj %s -DTYPE1=0x1 -DTYPE2=0x3 -o %t1.o
+# RUN: llvm-readelf --elf-output-style=JSON --pretty-print --notes %t1.o | FileCheck %s --check-prefix=CHECK-JSON -DDESC1="NT_PRSTATUS (prstatus structure)" -DDESC2="NT_PRPSINFO (prpsinfo structure)"
+# CHECK-JSON: "Size": 40,
+# CHECK-JSON-NEXT: "Notes": [
+# CHECK-JSON-NEXT: {
+# CHECK-JSON-NEXT: "Owner": "CORE",
+# CHECK-JSON-NEXT: "Data size": 0,
+# CHECK-JSON-NEXT: "Type": "[[DESC1]]"
+# CHECK-JSON-NEXT: },
+# CHECK-JSON-NEXT: {
+# CHECK-JSON-NEXT: "Owner": "CORE",
+# CHECK-JSON-NEXT: "Data size": 0,
+# CHECK-JSON-NEXT: "Type": "[[DESC2]]"
+# CHECK-JSON-NEXT: }
+# CHECK-JSON-NEXT: ]
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_CORE
+Sections:
+ - Name: .note.first
+ Type: SHT_NOTE
+ Notes:
+ - Name: CORE
+ Type: [[TYPE1]]
+ - Name: .note.second
+ Type: SHT_NOTE
+ Notes:
+ - Name: CORE
+ Type: [[TYPE2]]
+ProgramHeaders:
+ - Type: PT_NOTE
+ FirstSec: .note.first
+ LastSec: .note.second
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-netbsd-core.test b/llvm/test/tools/llvm-readobj/ELF/note-netbsd-core.test
index aec85aa513946..aae0870d5e21f 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-netbsd-core.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-netbsd-core.test
@@ -30,25 +30,27 @@ ProgramHeaders:
# GNU-NEXT: NetBSD-CORE 0x00000000 NT_NETBSDCORE_AUXV (ELF auxiliary vector data)
# GNU-NEXT: NetBSD-CORE at 3615 0x00000000 PT_LWPSTATUS (ptrace_lwpstatus structure)
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: <?>
# LLVM-NEXT: Offset: 0x78
# LLVM-NEXT: Size: 0x50
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: NetBSD-CORE
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_NETBSDCORE_PROCINFO (procinfo structure)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: NetBSD-CORE
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_NETBSDCORE_AUXV (ELF auxiliary vector data)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: NetBSD-CORE at 3615
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: PT_LWPSTATUS (ptrace_lwpstatus structure)
-# LLVM-NEXT: }
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: NetBSD-CORE
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_NETBSDCORE_PROCINFO (procinfo structure)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: NetBSD-CORE
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_NETBSDCORE_AUXV (ELF auxiliary vector data)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: NetBSD-CORE at 3615
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: PT_LWPSTATUS (ptrace_lwpstatus structure)
+# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-openbsd-core.test b/llvm/test/tools/llvm-readobj/ELF/note-openbsd-core.test
index 0ccb55a8d6f33..3044fded0d6fa 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-openbsd-core.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-openbsd-core.test
@@ -35,35 +35,37 @@ ProgramHeaders:
# GNU-NEXT: OpenBSD at 31337 0x00000000 NT_OPENBSD_REGS (regular registers)
# GNU-NEXT: OpenBSD at 31337 0x00000000 NT_OPENBSD_FPREGS (floating point registers)
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM-NEXT: NoteSection {
# LLVM-NEXT: Name: <?>
# LLVM-NEXT: Offset: 0x78
# LLVM-NEXT: Size: 0x74
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: OpenBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_OPENBSD_PROCINFO (procinfo structure)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: OpenBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_OPENBSD_AUXV (ELF auxiliary vector data)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: OpenBSD
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_OPENBSD_WCOOKIE (window cookie)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: OpenBSD at 31337
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_OPENBSD_REGS (regular registers)
-# LLVM-NEXT: }
-# LLVM-NEXT: Note {
-# LLVM-NEXT: Owner: OpenBSD at 31337
-# LLVM-NEXT: Data size: 0x0
-# LLVM-NEXT: Type: NT_OPENBSD_FPREGS (floating point registers)
-# LLVM-NEXT: }
+# LLVM-NEXT: Notes [
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: OpenBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_OPENBSD_PROCINFO (procinfo structure)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: OpenBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_OPENBSD_AUXV (ELF auxiliary vector data)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: OpenBSD
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_OPENBSD_WCOOKIE (window cookie)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: OpenBSD at 31337
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_OPENBSD_REGS (regular registers)
+# LLVM-NEXT: }
+# LLVM-NEXT: {
+# LLVM-NEXT: Owner: OpenBSD at 31337
+# LLVM-NEXT: Data size: 0x0
+# LLVM-NEXT: Type: NT_OPENBSD_FPREGS (floating point registers)
+# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
# LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-unknown.s b/llvm/test/tools/llvm-readobj/ELF/note-unknown.s
index aa74b51a3541b..38a3bdc52b37e 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-unknown.s
+++ b/llvm/test/tools/llvm-readobj/ELF/note-unknown.s
@@ -14,51 +14,57 @@
// GNU-NEXT: description data: 4c 6f 72 65 6d 20 69 70 73 75 6d 20 64 6f 6c 6f 72 20 73 69 74 20 61 6d 65 74 00 00
// GNU-EMPTY:
-// LLVM: Notes [
+// LLVM: NoteSections [
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.foo
// LLVM-NEXT: Offset: 0x40
// LLVM-NEXT: Size: 0x10
-// LLVM-NEXT: Note {
-// LLVM-NEXT: Owner: XYZ
-// LLVM-NEXT: Data size: 0x0
-// LLVM-NEXT: Type: Unknown (0x00000003)
-// LLVM-NEXT: }
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
+// LLVM-NEXT: Owner: XYZ
+// LLVM-NEXT: Data size: 0x0
+// LLVM-NEXT: Type: Unknown (0x00000003)
+// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.bar
// LLVM-NEXT: Offset: 0x50
// LLVM-NEXT: Size: 0x2C
-// LLVM-NEXT: Note {
-// LLVM-NEXT: Owner: XYZ
-// LLVM-NEXT: Data size: 0x1C
-// LLVM-NEXT: Type: Unknown (0x00000003)
-// LLVM-NEXT: Description data (
-// LLVM-NEXT: 0000: 4C6F7265 6D206970 73756D20 646F6C6F |Lorem ipsum dolo|
-// LLVM-NEXT: 0010: 72207369 7420616D 65740000 |r sit amet..|
-// LLVM-NEXT: )
-// LLVM-NEXT: }
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
+// LLVM-NEXT: Owner: XYZ
+// LLVM-NEXT: Data size: 0x1C
+// LLVM-NEXT: Type: Unknown (0x00000003)
+// LLVM-NEXT: Description data (
+// LLVM-NEXT: 0000: 4C6F7265 6D206970 73756D20 646F6C6F |Lorem ipsum dolo|
+// LLVM-NEXT: 0010: 72207369 7420616D 65740000 |r sit amet..|
+// LLVM-NEXT: )
+// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: NoteSection {
// LLVM-NEXT: Name: .note.8
// LLVM-NEXT: Offset: 0x80
// LLVM-NEXT: Size: 0x40
-// LLVM-NEXT: Note {
-// LLVM-NEXT: Owner: WXYZ
-// LLVM-NEXT: Data size: 0x8
-// LLVM-NEXT: Type: Unknown (0x00000006)
-// LLVM-NEXT: Description data (
-// LLVM-NEXT: 0000: 4C6F7265 6D000000 |Lorem...|
-// LLVM-NEXT: )
-// LLVM-NEXT: }
-// LLVM-NEXT: Note {
-// LLVM-NEXT: Owner: VWXYZ
-// LLVM-NEXT: Data size: 0x8
-// LLVM-NEXT: Type: Unknown (0x00000006)
-// LLVM-NEXT: Description data (
-// LLVM-NEXT: 0000: 78787800 00000000 |xxx.....|
-// LLVM-NEXT: )
-// LLVM-NEXT: }
+// LLVM-NEXT: Notes [
+// LLVM-NEXT: {
+// LLVM-NEXT: Owner: WXYZ
+// LLVM-NEXT: Data size: 0x8
+// LLVM-NEXT: Type: Unknown (0x00000006)
+// LLVM-NEXT: Description data (
+// LLVM-NEXT: 0000: 4C6F7265 6D000000 |Lorem...|
+// LLVM-NEXT: )
+// LLVM-NEXT: }
+// LLVM-NEXT: {
+// LLVM-NEXT: Owner: VWXYZ
+// LLVM-NEXT: Data size: 0x8
+// LLVM-NEXT: Type: Unknown (0x00000006)
+// LLVM-NEXT: Description data (
+// LLVM-NEXT: 0000: 78787800 00000000 |xxx.....|
+// LLVM-NEXT: )
+// LLVM-NEXT: }
+// LLVM-NEXT: ]
// LLVM-NEXT: }
// LLVM-NEXT: ]
diff --git a/llvm/test/tools/llvm-readobj/archive.test b/llvm/test/tools/llvm-readobj/archive.test
index 3e6d74fb6608f..ce9e9fb77e39f 100644
--- a/llvm/test/tools/llvm-readobj/archive.test
+++ b/llvm/test/tools/llvm-readobj/archive.test
@@ -21,7 +21,7 @@
# LLVM: Sections [
# LLVM: Relocations [
# LLVM: Symbols [
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM: ]
# LLVM: StackSizes [
# LLVM: ]
@@ -41,7 +41,7 @@
# LLVM: Sections [
# LLVM: Relocations [
# LLVM: Symbols [
-# LLVM: Notes [
+# LLVM: NoteSections [
# LLVM: ]
# LLVM: StackSizes [
# LLVM: ]
diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp
index 7596d90b4fcd2..f0a22f1568bef 100644
--- a/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -7989,24 +7989,29 @@ static void printCoreNoteLLVMStyle(const CoreNote &Note, ScopedPrinter &W) {
}
template <class ELFT> void LLVMELFDumper<ELFT>::printNotes() {
- ListScope L(W, "Notes");
+ ListScope L(W, "NoteSections");
- std::unique_ptr<DictScope> NoteScope;
+ std::unique_ptr<DictScope> NoteSectionScope;
+ std::unique_ptr<ListScope> NotesScope;
size_t Align = 0;
auto StartNotes = [&](std::optional<StringRef> SecName,
const typename ELFT::Off Offset,
const typename ELFT::Addr Size, size_t Al) {
Align = std::max<size_t>(Al, 4);
- NoteScope = std::make_unique<DictScope>(W, "NoteSection");
+ NoteSectionScope = std::make_unique<DictScope>(W, "NoteSection");
W.printString("Name", SecName ? *SecName : "<?>");
W.printHex("Offset", Offset);
W.printHex("Size", Size);
+ NotesScope = std::make_unique<ListScope>(W, "Notes");
};
- auto EndNotes = [&] { NoteScope.reset(); };
+ auto EndNotes = [&] {
+ NotesScope.reset();
+ NoteSectionScope.reset();
+ };
auto ProcessNote = [&](const Elf_Note &Note, bool IsCore) -> Error {
- DictScope D2(W, "Note");
+ DictScope D2(W);
StringRef Name = Note.getName();
ArrayRef<uint8_t> Descriptor = Note.getDesc(Align);
Elf_Word Type = Note.getType();
>From 3386d24ff4e5dae07e33326c12c8d7dfd29de06a Mon Sep 17 00:00:00 2001
From: Kamau Bridgeman <kamau.bridgeman.ibm at gmail.com>
Date: Wed, 3 Jul 2024 12:22:26 -0400
Subject: [PATCH 151/246] Revert "Reduce llvm-gsymutil memory usage" (#97603)
Reverts llvm/llvm-project#91023
Build break found in clang-ppc64le-linux-multistage build no. 583.
---
llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h | 23 +-----
llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp | 81 +++----------------
llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp | 15 +---
3 files changed, 14 insertions(+), 105 deletions(-)
diff --git a/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
index 26ef7db718dd5..80c27aea89312 100644
--- a/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
+++ b/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
@@ -22,7 +22,6 @@
#include "llvm/DebugInfo/DWARF/DWARFLocationExpression.h"
#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
#include "llvm/Support/DataExtractor.h"
-#include "llvm/Support/RWMutex.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
@@ -258,10 +257,6 @@ class DWARFUnit {
std::shared_ptr<DWARFUnit> DWO;
- mutable llvm::sys::RWMutex FreeDIEsMutex;
- mutable llvm::sys::RWMutex ExtractCUDieMutex;
- mutable llvm::sys::RWMutex ExtractNonCUDIEsMutex;
-
protected:
friend dwarf_linker::parallel::CompileUnit;
@@ -571,9 +566,6 @@ class DWARFUnit {
Error tryExtractDIEsIfNeeded(bool CUDieOnly);
- /// clearDIEs - Clear parsed DIEs to keep memory usage low.
- void clearDIEs(bool KeepCUDie);
-
private:
/// Size in bytes of the .debug_info data associated with this compile unit.
size_t getDebugInfoSize() const {
@@ -585,22 +577,13 @@ class DWARFUnit {
/// hasn't already been done
void extractDIEsIfNeeded(bool CUDieOnly);
- /// extracCUDieIfNeeded - Parse CU DIE if it hasn't already been done.
- /// Only to be used from extractDIEsIfNeeded, which holds the correct locks.
- bool extractCUDieIfNeeded(bool CUDieOnly, bool &HasCUDie);
-
- /// extractNonCUDIEsIfNeeded - Parses non-CU DIE's for a given CU if needed.
- /// Only to be used from extractDIEsIfNeeded, which holds the correct locks.
- Error extractNonCUDIEsIfNeeded(bool HasCUDie);
-
- /// extractNonCUDIEsHelper - helper to be invoked *only* from inside
- /// tryExtractDIEsIfNeeded, which holds the correct locks.
- Error extractNonCUDIEsHelper();
-
/// extractDIEsToVector - Appends all parsed DIEs to a vector.
void extractDIEsToVector(bool AppendCUDie, bool AppendNonCUDIEs,
std::vector<DWARFDebugInfoEntry> &DIEs) const;
+ /// clearDIEs - Clear parsed DIEs to keep memory usage low.
+ void clearDIEs(bool KeepCUDie);
+
/// parseDWO - Parses .dwo file for current compile unit. Returns true if
/// it was actually constructed.
/// The \p AlternativeLocation specifies an alternative location to get
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp b/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp
index 2760cef7edfdb..bdd04b00f557b 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp
@@ -495,78 +495,21 @@ void DWARFUnit::extractDIEsIfNeeded(bool CUDieOnly) {
Context.getRecoverableErrorHandler()(std::move(e));
}
-static bool DoubleCheckedRWLocker(llvm::sys::RWMutex &Mutex,
- const std::function<bool()> &reader,
- const std::function<void()> &writer) {
- {
- llvm::sys::ScopedReader Lock(Mutex);
- if (reader())
- return true;
- }
- llvm::sys::ScopedWriter Lock(Mutex);
- if (reader())
- return true;
- // If we get here, then the reader function returned false. This means that
- // no one else is currently writing to this data structure and it's safe for
- // us to write to it now. The scoped writer lock guarantees there are no
- // other readers or writers at this point.
- writer();
- return false;
-}
+Error DWARFUnit::tryExtractDIEsIfNeeded(bool CUDieOnly) {
+ if ((CUDieOnly && !DieArray.empty()) ||
+ DieArray.size() > 1)
+ return Error::success(); // Already parsed.
-// Helper to safely check if the Compile-Unit DIE has been extracted already.
-// If not, then extract it, and return false, indicating that it was *not*
-// already extracted.
-bool DWARFUnit::extractCUDieIfNeeded(bool CUDieOnly, bool &HasCUDie) {
- return DoubleCheckedRWLocker(
- ExtractCUDieMutex,
- // Calculate if the CU DIE has been extracted already.
- [&]() {
- return ((CUDieOnly && !DieArray.empty()) || DieArray.size() > 1);
- },
- // Lambda to extract the CU DIE.
- [&]() {
- HasCUDie = !DieArray.empty();
- extractDIEsToVector(!HasCUDie, !CUDieOnly, DieArray);
- });
-}
+ bool HasCUDie = !DieArray.empty();
+ extractDIEsToVector(!HasCUDie, !CUDieOnly, DieArray);
-// Helper to safely check if the non-Compile-Unit DIEs have been parsed
-// already. If they haven't been parsed, go ahead and parse them.
-Error DWARFUnit::extractNonCUDIEsIfNeeded(bool HasCUDie) {
- Error Result = Error::success();
- DoubleCheckedRWLocker(
- ExtractNonCUDIEsMutex,
- // Lambda to check if all DIEs have been extracted already.
- [=]() { return (DieArray.empty() || HasCUDie); },
- // Lambda to extract all the DIEs using the helper function
- [&]() {
- if (Error E = extractNonCUDIEsHelper()) {
- // Consume the success placeholder and save the actual error
- consumeError(std::move(Result));
- Result = std::move(E);
- }
- });
- return Result;
-}
-
-Error DWARFUnit::tryExtractDIEsIfNeeded(bool CUDieOnly) {
- // Acquire the FreeDIEsMutex lock (in read-mode) to prevent the Compile Unit
- // DIE from being freed by a thread calling clearDIEs() after the CU DIE was
- // parsed, but before the rest of the DIEs are parsed, as there are no other
- // locks held during that brief period.
- llvm::sys::ScopedReader FreeLock(FreeDIEsMutex);
- bool HasCUDie = false;
- if (extractCUDieIfNeeded(CUDieOnly, HasCUDie))
+ if (DieArray.empty())
return Error::success();
- // Right here is where the above-mentioned race condition exists.
- return extractNonCUDIEsIfNeeded(HasCUDie);
-}
-// Helper used from the tryExtractDIEsIfNeeded function: it must already have
-// acquired the ExtractNonCUDIEsMutex for writing.
-Error DWARFUnit::extractNonCUDIEsHelper() {
// If CU DIE was just parsed, copy several attribute values from it.
+ if (HasCUDie)
+ return Error::success();
+
DWARFDie UnitDie(this, &DieArray[0]);
if (std::optional<uint64_t> DWOId =
toUnsigned(UnitDie.find(DW_AT_GNU_dwo_id)))
@@ -710,10 +653,6 @@ bool DWARFUnit::parseDWO(StringRef DWOAlternativeLocation) {
}
void DWARFUnit::clearDIEs(bool KeepCUDie) {
- // We need to acquire the FreeDIEsMutex lock in write-mode, because we are
- // going to free the DIEs, when other threads might be trying to create them.
- llvm::sys::ScopedWriter FreeLock(FreeDIEsMutex);
-
// Do not use resize() + shrink_to_fit() to free memory occupied by dies.
// shrink_to_fit() is a *non-binding* request to reduce capacity() to size().
// It depends on the implementation whether the request is fulfilled.
diff --git a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
index e1b30648b6a77..601686fdd3dd5 100644
--- a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
+++ b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
@@ -587,11 +587,6 @@ Error DwarfTransformer::convert(uint32_t NumThreads, OutputAggregator &Out) {
DWARFDie Die = getDie(*CU);
CUInfo CUI(DICtx, dyn_cast<DWARFCompileUnit>(CU.get()));
handleDie(Out, CUI, Die);
- // Release the line table, once we're done.
- DICtx.clearLineTableForUnit(CU.get());
- // Free any DIEs that were allocated by the DWARF parser.
- // If/when they're needed by other CU's, they'll be recreated.
- CU->clearDIEs(/*KeepCUDie=*/false);
}
} else {
// LLVM Dwarf parser is not thread-safe and we need to parse all DWARF up
@@ -617,16 +612,11 @@ Error DwarfTransformer::convert(uint32_t NumThreads, OutputAggregator &Out) {
DWARFDie Die = getDie(*CU);
if (Die) {
CUInfo CUI(DICtx, dyn_cast<DWARFCompileUnit>(CU.get()));
- pool.async([this, CUI, &CU, &LogMutex, &Out, Die]() mutable {
+ pool.async([this, CUI, &LogMutex, &Out, Die]() mutable {
std::string storage;
raw_string_ostream StrStream(storage);
OutputAggregator ThreadOut(Out.GetOS() ? &StrStream : nullptr);
handleDie(ThreadOut, CUI, Die);
- // Release the line table once we're done.
- DICtx.clearLineTableForUnit(CU.get());
- // Free any DIEs that were allocated by the DWARF parser.
- // If/when they're needed by other CU's, they'll be recreated.
- CU->clearDIEs(/*KeepCUDie=*/false);
// Print ThreadLogStorage lines into an actual stream under a lock
std::lock_guard<std::mutex> guard(LogMutex);
if (Out.GetOS()) {
@@ -639,9 +629,6 @@ Error DwarfTransformer::convert(uint32_t NumThreads, OutputAggregator &Out) {
}
pool.wait();
}
- // Now get rid of all the DIEs that may have been recreated
- for (const auto &CU : DICtx.compile_units())
- CU->clearDIEs(/*KeepCUDie=*/false);
size_t FunctionsAddedCount = Gsym.getNumFunctionInfos() - NumBefore;
Out << "Loaded " << FunctionsAddedCount << " functions from DWARF.\n";
return Error::success();
>From 5da7179cb3ff80203f58ddea71562816b2ae4ff6 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Thu, 14 Sep 2023 12:20:06 -0700
Subject: [PATCH 152/246] [AMDGPU] Reland: Add IR LiveReg type-based
optimization
---
.../AMDGPU/AMDGPULateCodeGenPrepare.cpp | 302 ++-
.../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 4 +-
.../AMDGPU/GlobalISel/vni8-across-blocks.ll | 636 +++++
.../amdgpu-codegenprepare-break-large-phis.ll | 8 +-
...dagcomb-extract-vec-elt-different-sizes.ll | 39 +-
.../CodeGen/AMDGPU/extract-subvector-16bit.ll | 359 ++-
llvm/test/CodeGen/AMDGPU/extract-subvector.ll | 498 +++-
llvm/test/CodeGen/AMDGPU/llc-pipeline.ll | 24 +-
llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll | 24 +-
.../test/CodeGen/AMDGPU/vni8-across-blocks.ll | 2396 +++++------------
llvm/test/CodeGen/AMDGPU/vni8-live-reg-opt.ll | 352 +++
11 files changed, 2606 insertions(+), 2036 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/vni8-live-reg-opt.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
index 69fdeaebe0a01..2cc95f81d2f94 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
@@ -50,6 +50,8 @@ class AMDGPULateCodeGenPrepare
AssumptionCache *AC = nullptr;
UniformityInfo *UA = nullptr;
+ SmallVector<WeakTrackingVH, 8> DeadInsts;
+
public:
static char ID;
@@ -81,6 +83,69 @@ class AMDGPULateCodeGenPrepare
bool visitLoadInst(LoadInst &LI);
};
+using ValueToValueMap = DenseMap<const Value *, Value *>;
+
+class LiveRegOptimizer {
+private:
+ Module *Mod = nullptr;
+ const DataLayout *DL = nullptr;
+ const GCNSubtarget *ST;
+ /// The scalar type to convert to
+ Type *ConvertToScalar;
+ /// The set of visited Instructions
+ SmallPtrSet<Instruction *, 4> Visited;
+ /// Map of Value -> Converted Value
+ ValueToValueMap ValMap;
+ /// Map of containing conversions from Optimal Type -> Original Type per BB.
+ DenseMap<BasicBlock *, ValueToValueMap> BBUseValMap;
+
+public:
+ /// Calculate the and \p return the type to convert to given a problematic \p
+ /// OriginalType. In some instances, we may widen the type (e.g. v2i8 -> i32).
+ Type *calculateConvertType(Type *OriginalType);
+ /// Convert the virtual register defined by \p V to the compatible vector of
+ /// legal type
+ Value *convertToOptType(Instruction *V, BasicBlock::iterator &InstPt);
+ /// Convert the virtual register defined by \p V back to the original type \p
+ /// ConvertType, stripping away the MSBs in cases where there was an imperfect
+ /// fit (e.g. v2i32 -> v7i8)
+ Value *convertFromOptType(Type *ConvertType, Instruction *V,
+ BasicBlock::iterator &InstPt,
+ BasicBlock *InsertBlock);
+ /// Check for problematic PHI nodes or cross-bb values based on the value
+ /// defined by \p I, and coerce to legal types if necessary. For problematic
+ /// PHI node, we coerce all incoming values in a single invocation.
+ bool optimizeLiveType(Instruction *I,
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts);
+
+ // Whether or not the type should be replaced to avoid inefficient
+ // legalization code
+ bool shouldReplace(Type *ITy) {
+ FixedVectorType *VTy = dyn_cast<FixedVectorType>(ITy);
+ if (!VTy)
+ return false;
+
+ auto TLI = ST->getTargetLowering();
+
+ Type *EltTy = VTy->getElementType();
+ // If the element size is not less than the convert to scalar size, then we
+ // can't do any bit packing
+ if (!EltTy->isIntegerTy() ||
+ EltTy->getScalarSizeInBits() > ConvertToScalar->getScalarSizeInBits())
+ return false;
+
+ // Only coerce illegal types
+ TargetLoweringBase::LegalizeKind LK =
+ TLI->getTypeConversion(EltTy->getContext(), EVT::getEVT(EltTy, false));
+ return LK.first != TargetLoweringBase::TypeLegal;
+ }
+
+ LiveRegOptimizer(Module *Mod, const GCNSubtarget *ST) : Mod(Mod), ST(ST) {
+ DL = &Mod->getDataLayout();
+ ConvertToScalar = Type::getInt32Ty(Mod->getContext());
+ }
+};
+
} // end anonymous namespace
bool AMDGPULateCodeGenPrepare::doInitialization(Module &M) {
@@ -96,20 +161,243 @@ bool AMDGPULateCodeGenPrepare::runOnFunction(Function &F) {
const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
const TargetMachine &TM = TPC.getTM<TargetMachine>();
const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
- if (ST.hasScalarSubwordLoads())
- return false;
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
UA = &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
+ // "Optimize" the virtual regs that cross basic block boundaries. When
+ // building the SelectionDAG, vectors of illegal types that cross basic blocks
+ // will be scalarized and widened, with each scalar living in its
+ // own register. To work around this, this optimization converts the
+ // vectors to equivalent vectors of legal type (which are converted back
+ // before uses in subsequent blocks), to pack the bits into fewer physical
+ // registers (used in CopyToReg/CopyFromReg pairs).
+ LiveRegOptimizer LRO(Mod, &ST);
+
bool Changed = false;
- for (auto &BB : F)
- for (Instruction &I : llvm::make_early_inc_range(BB))
- Changed |= visit(I);
+ bool HasScalarSubwordLoads = ST.hasScalarSubwordLoads();
+
+ for (auto &BB : reverse(F))
+ for (Instruction &I : make_early_inc_range(reverse(BB))) {
+ Changed |= !HasScalarSubwordLoads && visit(I);
+ Changed |= LRO.optimizeLiveType(&I, DeadInsts);
+ }
+
+ RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts);
return Changed;
}
+Type *LiveRegOptimizer::calculateConvertType(Type *OriginalType) {
+ assert(OriginalType->getScalarSizeInBits() <=
+ ConvertToScalar->getScalarSizeInBits());
+
+ FixedVectorType *VTy = cast<FixedVectorType>(OriginalType);
+
+ TypeSize OriginalSize = DL->getTypeSizeInBits(VTy);
+ TypeSize ConvertScalarSize = DL->getTypeSizeInBits(ConvertToScalar);
+ unsigned ConvertEltCount =
+ (OriginalSize + ConvertScalarSize - 1) / ConvertScalarSize;
+
+ if (OriginalSize <= ConvertScalarSize)
+ return IntegerType::get(Mod->getContext(), ConvertScalarSize);
+
+ return VectorType::get(Type::getIntNTy(Mod->getContext(), ConvertScalarSize),
+ ConvertEltCount, false);
+}
+
+Value *LiveRegOptimizer::convertToOptType(Instruction *V,
+ BasicBlock::iterator &InsertPt) {
+ FixedVectorType *VTy = cast<FixedVectorType>(V->getType());
+ Type *NewTy = calculateConvertType(V->getType());
+
+ TypeSize OriginalSize = DL->getTypeSizeInBits(VTy);
+ TypeSize NewSize = DL->getTypeSizeInBits(NewTy);
+
+ IRBuilder<> Builder(V->getParent(), InsertPt);
+ // If there is a bitsize match, we can fit the old vector into a new vector of
+ // desired type.
+ if (OriginalSize == NewSize)
+ return Builder.CreateBitCast(V, NewTy, V->getName() + ".bc");
+
+ // If there is a bitsize mismatch, we must use a wider vector.
+ assert(NewSize > OriginalSize);
+ uint64_t ExpandedVecElementCount = NewSize / VTy->getScalarSizeInBits();
+
+ SmallVector<int, 8> ShuffleMask;
+ uint64_t OriginalElementCount = VTy->getElementCount().getFixedValue();
+ for (unsigned I = 0; I < OriginalElementCount; I++)
+ ShuffleMask.push_back(I);
+
+ for (uint64_t I = OriginalElementCount; I < ExpandedVecElementCount; I++)
+ ShuffleMask.push_back(OriginalElementCount);
+
+ Value *ExpandedVec = Builder.CreateShuffleVector(V, ShuffleMask);
+ return Builder.CreateBitCast(ExpandedVec, NewTy, V->getName() + ".bc");
+}
+
+Value *LiveRegOptimizer::convertFromOptType(Type *ConvertType, Instruction *V,
+ BasicBlock::iterator &InsertPt,
+ BasicBlock *InsertBB) {
+ FixedVectorType *NewVTy = cast<FixedVectorType>(ConvertType);
+
+ TypeSize OriginalSize = DL->getTypeSizeInBits(V->getType());
+ TypeSize NewSize = DL->getTypeSizeInBits(NewVTy);
+
+ IRBuilder<> Builder(InsertBB, InsertPt);
+ // If there is a bitsize match, we simply convert back to the original type.
+ if (OriginalSize == NewSize)
+ return Builder.CreateBitCast(V, NewVTy, V->getName() + ".bc");
+
+ // If there is a bitsize mismatch, then we must have used a wider value to
+ // hold the bits.
+ assert(OriginalSize > NewSize);
+ // For wide scalars, we can just truncate the value.
+ if (!V->getType()->isVectorTy()) {
+ Instruction *Trunc = cast<Instruction>(
+ Builder.CreateTrunc(V, IntegerType::get(Mod->getContext(), NewSize)));
+ return cast<Instruction>(Builder.CreateBitCast(Trunc, NewVTy));
+ }
+
+ // For wider vectors, we must strip the MSBs to convert back to the original
+ // type.
+ VectorType *ExpandedVT = VectorType::get(
+ Type::getIntNTy(Mod->getContext(), NewVTy->getScalarSizeInBits()),
+ (OriginalSize / NewVTy->getScalarSizeInBits()), false);
+ Instruction *Converted =
+ cast<Instruction>(Builder.CreateBitCast(V, ExpandedVT));
+
+ unsigned NarrowElementCount = NewVTy->getElementCount().getFixedValue();
+ SmallVector<int, 8> ShuffleMask(NarrowElementCount);
+ std::iota(ShuffleMask.begin(), ShuffleMask.end(), 0);
+
+ return Builder.CreateShuffleVector(Converted, ShuffleMask);
+}
+
+bool LiveRegOptimizer::optimizeLiveType(
+ Instruction *I, SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
+ SmallVector<Instruction *, 4> Worklist;
+ SmallPtrSet<PHINode *, 4> PhiNodes;
+ SmallPtrSet<Instruction *, 4> Defs;
+ SmallPtrSet<Instruction *, 4> Uses;
+
+ Worklist.push_back(cast<Instruction>(I));
+ while (!Worklist.empty()) {
+ Instruction *II = Worklist.pop_back_val();
+
+ if (!Visited.insert(II).second)
+ continue;
+
+ if (!shouldReplace(II->getType()))
+ continue;
+
+ if (PHINode *Phi = dyn_cast<PHINode>(II)) {
+ PhiNodes.insert(Phi);
+ // Collect all the incoming values of problematic PHI nodes.
+ for (Value *V : Phi->incoming_values()) {
+ // Repeat the collection process for newly found PHI nodes.
+ if (PHINode *OpPhi = dyn_cast<PHINode>(V)) {
+ if (!PhiNodes.count(OpPhi) && !Visited.count(OpPhi))
+ Worklist.push_back(OpPhi);
+ continue;
+ }
+
+ Instruction *IncInst = dyn_cast<Instruction>(V);
+ // Other incoming value types (e.g. vector literals) are unhandled
+ if (!IncInst && !isa<ConstantAggregateZero>(V))
+ return false;
+
+ // Collect all other incoming values for coercion.
+ if (IncInst)
+ Defs.insert(IncInst);
+ }
+ }
+
+ // Collect all relevant uses.
+ for (User *V : II->users()) {
+ // Repeat the collection process for problematic PHI nodes.
+ if (PHINode *OpPhi = dyn_cast<PHINode>(V)) {
+ if (!PhiNodes.count(OpPhi) && !Visited.count(OpPhi))
+ Worklist.push_back(OpPhi);
+ continue;
+ }
+
+ Instruction *UseInst = cast<Instruction>(V);
+ // Collect all uses of PHINodes and any use the crosses BB boundaries.
+ if (UseInst->getParent() != II->getParent() || isa<PHINode>(II)) {
+ Uses.insert(UseInst);
+ if (!Defs.count(II) && !isa<PHINode>(II)) {
+ Defs.insert(II);
+ }
+ }
+ }
+ }
+
+ // Coerce and track the defs.
+ for (Instruction *D : Defs) {
+ if (!ValMap.contains(D)) {
+ BasicBlock::iterator InsertPt = std::next(D->getIterator());
+ Value *ConvertVal = convertToOptType(D, InsertPt);
+ assert(ConvertVal);
+ ValMap[D] = ConvertVal;
+ }
+ }
+
+ // Construct new-typed PHI nodes.
+ for (PHINode *Phi : PhiNodes) {
+ ValMap[Phi] = PHINode::Create(calculateConvertType(Phi->getType()),
+ Phi->getNumIncomingValues(),
+ Phi->getName() + ".tc", Phi->getIterator());
+ }
+
+ // Connect all the PHI nodes with their new incoming values.
+ for (PHINode *Phi : PhiNodes) {
+ PHINode *NewPhi = cast<PHINode>(ValMap[Phi]);
+ bool MissingIncVal = false;
+ for (int I = 0, E = Phi->getNumIncomingValues(); I < E; I++) {
+ Value *IncVal = Phi->getIncomingValue(I);
+ if (isa<ConstantAggregateZero>(IncVal)) {
+ Type *NewType = calculateConvertType(Phi->getType());
+ NewPhi->addIncoming(ConstantInt::get(NewType, 0, false),
+ Phi->getIncomingBlock(I));
+ } else if (ValMap.contains(IncVal))
+ NewPhi->addIncoming(ValMap[IncVal], Phi->getIncomingBlock(I));
+ else
+ MissingIncVal = true;
+ }
+ Instruction *DeadInst = Phi;
+ if (MissingIncVal) {
+ DeadInst = cast<Instruction>(ValMap[Phi]);
+ // Do not use the dead phi
+ ValMap[Phi] = Phi;
+ }
+ DeadInsts.emplace_back(DeadInst);
+ }
+ // Coerce back to the original type and replace the uses.
+ for (Instruction *U : Uses) {
+ // Replace all converted operands for a use.
+ for (auto [OpIdx, Op] : enumerate(U->operands())) {
+ if (ValMap.contains(Op)) {
+ Value *NewVal = nullptr;
+ if (BBUseValMap.contains(U->getParent()) &&
+ BBUseValMap[U->getParent()].contains(ValMap[Op]))
+ NewVal = BBUseValMap[U->getParent()][ValMap[Op]];
+ else {
+ BasicBlock::iterator InsertPt = U->getParent()->getFirstNonPHIIt();
+ NewVal =
+ convertFromOptType(Op->getType(), cast<Instruction>(ValMap[Op]),
+ InsertPt, U->getParent());
+ BBUseValMap[U->getParent()][ValMap[Op]] = NewVal;
+ }
+ assert(NewVal);
+ U->setOperand(OpIdx, NewVal);
+ }
+ }
+ }
+
+ return true;
+}
+
bool AMDGPULateCodeGenPrepare::canWidenScalarExtLoad(LoadInst &LI) const {
unsigned AS = LI.getPointerAddressSpace();
// Skip non-constant address space.
@@ -119,7 +407,7 @@ bool AMDGPULateCodeGenPrepare::canWidenScalarExtLoad(LoadInst &LI) const {
// Skip non-simple loads.
if (!LI.isSimple())
return false;
- auto *Ty = LI.getType();
+ Type *Ty = LI.getType();
// Skip aggregate types.
if (Ty->isAggregateType())
return false;
@@ -181,7 +469,7 @@ bool AMDGPULateCodeGenPrepare::visitLoadInst(LoadInst &LI) {
auto *NewVal = IRB.CreateBitCast(
IRB.CreateTrunc(IRB.CreateLShr(NewLd, ShAmt), IntNTy), LI.getType());
LI.replaceAllUsesWith(NewVal);
- RecursivelyDeleteTriviallyDeadInstructions(&LI);
+ DeadInsts.emplace_back(&LI);
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 9162e110aa10b..f50a18ccc2188 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1197,10 +1197,10 @@ bool GCNPassConfig::addPreISel() {
AMDGPUPassConfig::addPreISel();
if (TM->getOptLevel() > CodeGenOptLevel::None)
- addPass(createAMDGPULateCodeGenPreparePass());
+ addPass(createSinkingPass());
if (TM->getOptLevel() > CodeGenOptLevel::None)
- addPass(createSinkingPass());
+ addPass(createAMDGPULateCodeGenPreparePass());
// Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
// regions formed by them.
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
new file mode 100644
index 0000000000000..83cb92210ec84
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
@@ -0,0 +1,636 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx906 < %s | FileCheck --check-prefix=GFX906 %s
+
+define amdgpu_kernel void @v3i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: v3i8_liveout:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX906-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; GFX906-NEXT: v_mov_b32_e32 v3, 8
+; GFX906-NEXT: v_mov_b32_e32 v5, 16
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dword v4, v2, s[4:5]
+; GFX906-NEXT: v_mov_b32_e32 v1, 0xff
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_and_b32_e32 v6, 0xff, v4
+; GFX906-NEXT: v_lshlrev_b32_sdwa v7, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX906-NEXT: v_lshlrev_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+; GFX906-NEXT: v_or3_b32 v4, v6, v7, v4
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB0_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dword v0, v2, s[6:7]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX906-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX906-NEXT: v_lshlrev_b32_sdwa v0, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+; GFX906-NEXT: v_or3_b32 v4, v2, v3, v0
+; GFX906-NEXT: .LBB0_2: ; %bb.2
+; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v4
+; GFX906-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX906-NEXT: v_or_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_and_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX906-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX906-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX906-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX906-NEXT: v_mov_b32_e32 v1, 0
+; GFX906-NEXT: global_store_short v1, v0, s[2:3]
+; GFX906-NEXT: global_store_byte_d16_hi v1, v0, s[2:3] offset:2
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <3 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <3 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <3 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <3 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <3 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <3 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v4i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: v4i8_liveout:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX906-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dword v1, v2, s[4:5]
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB1_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dword v1, v2, s[6:7]
+; GFX906-NEXT: .LBB1_2: ; %bb.2
+; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <4 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <4 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <4 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <4 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <4 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <4 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v5i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: v5i8_liveout:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX906-NEXT: v_lshlrev_b32_e32 v3, 3, v0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[4:5]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB2_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[6:7]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX906-NEXT: .LBB2_2: ; %bb.2
+; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX906-NEXT: v_mov_b32_e32 v4, 0
+; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v3, 24, v1
+; GFX906-NEXT: global_store_byte v4, v1, s[2:3]
+; GFX906-NEXT: global_store_byte v4, v0, s[2:3] offset:1
+; GFX906-NEXT: global_store_byte_d16_hi v4, v1, s[2:3] offset:2
+; GFX906-NEXT: global_store_byte v4, v3, s[2:3] offset:3
+; GFX906-NEXT: global_store_byte v4, v2, s[2:3] offset:4
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <5 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <5 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <5 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <5 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <5 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <5 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v8i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: v8i8_liveout:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX906-NEXT: v_lshlrev_b32_e32 v3, 3, v0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[4:5]
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB3_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[6:7]
+; GFX906-NEXT: .LBB3_2: ; %bb.2
+; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dwordx2 v0, v[1:2], s[2:3]
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <8 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <8 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <8 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <8 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v16i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: v16i8_liveout:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX906-NEXT: v_lshlrev_b32_e32 v5, 4, v0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx4 v[1:4], v5, s[4:5]
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB4_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dwordx4 v[1:4], v5, s[6:7]
+; GFX906-NEXT: .LBB4_2: ; %bb.2
+; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dwordx4 v0, v[1:4], s[2:3]
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <16 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <16 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <16 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <16 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <16 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <16 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v32i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: v32i8_liveout:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX906-NEXT: v_lshlrev_b32_e32 v9, 5, v0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx4 v[1:4], v9, s[4:5]
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v9, s[4:5] offset:16
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB5_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dwordx4 v[1:4], v9, s[6:7]
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v9, s[6:7] offset:16
+; GFX906-NEXT: .LBB5_2: ; %bb.2
+; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: s_waitcnt vmcnt(1)
+; GFX906-NEXT: global_store_dwordx4 v0, v[1:4], s[2:3]
+; GFX906-NEXT: s_waitcnt vmcnt(1)
+; GFX906-NEXT: global_store_dwordx4 v0, v[5:8], s[2:3] offset:16
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <32 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <32 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <32 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <32 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <32 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <32 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v256i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: v256i8_liveout:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX906-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX906-NEXT: s_mov_b32 s10, -1
+; GFX906-NEXT: s_mov_b32 s11, 0xe00000
+; GFX906-NEXT: s_add_u32 s8, s8, s3
+; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX906-NEXT: v_lshlrev_b32_e32 v4, 3, v0
+; GFX906-NEXT: s_addc_u32 s9, s9, 0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v4, s[4:5]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: buffer_store_dword v5, off, s[8:11], 0 ; 4-byte Folded Spill
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: buffer_store_dword v6, off, s[8:11], 0 offset:4 ; 4-byte Folded Spill
+; GFX906-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:8 ; 4-byte Folded Spill
+; GFX906-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:12 ; 4-byte Folded Spill
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v4, s[4:5] offset:16
+; GFX906-NEXT: s_nop 0
+; GFX906-NEXT: global_load_dwordx4 v[9:12], v4, s[4:5] offset:32
+; GFX906-NEXT: global_load_dwordx4 v[13:16], v4, s[4:5] offset:48
+; GFX906-NEXT: global_load_dwordx4 v[17:20], v4, s[4:5] offset:64
+; GFX906-NEXT: global_load_dwordx4 v[21:24], v4, s[4:5] offset:80
+; GFX906-NEXT: global_load_dwordx4 v[25:28], v4, s[4:5] offset:96
+; GFX906-NEXT: global_load_dwordx4 v[29:32], v4, s[4:5] offset:112
+; GFX906-NEXT: global_load_dwordx4 v[33:36], v4, s[4:5] offset:128
+; GFX906-NEXT: global_load_dwordx4 v[37:40], v4, s[4:5] offset:144
+; GFX906-NEXT: global_load_dwordx4 v[41:44], v4, s[4:5] offset:160
+; GFX906-NEXT: global_load_dwordx4 v[45:48], v4, s[4:5] offset:176
+; GFX906-NEXT: global_load_dwordx4 v[49:52], v4, s[4:5] offset:192
+; GFX906-NEXT: global_load_dwordx4 v[53:56], v4, s[4:5] offset:208
+; GFX906-NEXT: global_load_dwordx4 v[57:60], v4, s[4:5] offset:224
+; GFX906-NEXT: global_load_dwordx4 v[0:3], v4, s[4:5] offset:240
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB6_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dwordx4 v[0:3], v4, s[6:7]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 ; 4-byte Folded Spill
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: buffer_store_dword v1, off, s[8:11], 0 offset:4 ; 4-byte Folded Spill
+; GFX906-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:8 ; 4-byte Folded Spill
+; GFX906-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:12 ; 4-byte Folded Spill
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v4, s[6:7] offset:16
+; GFX906-NEXT: global_load_dwordx4 v[9:12], v4, s[6:7] offset:32
+; GFX906-NEXT: global_load_dwordx4 v[13:16], v4, s[6:7] offset:48
+; GFX906-NEXT: global_load_dwordx4 v[17:20], v4, s[6:7] offset:64
+; GFX906-NEXT: global_load_dwordx4 v[21:24], v4, s[6:7] offset:80
+; GFX906-NEXT: global_load_dwordx4 v[25:28], v4, s[6:7] offset:96
+; GFX906-NEXT: global_load_dwordx4 v[29:32], v4, s[6:7] offset:112
+; GFX906-NEXT: global_load_dwordx4 v[33:36], v4, s[6:7] offset:128
+; GFX906-NEXT: global_load_dwordx4 v[37:40], v4, s[6:7] offset:144
+; GFX906-NEXT: global_load_dwordx4 v[41:44], v4, s[6:7] offset:160
+; GFX906-NEXT: global_load_dwordx4 v[45:48], v4, s[6:7] offset:176
+; GFX906-NEXT: global_load_dwordx4 v[49:52], v4, s[6:7] offset:192
+; GFX906-NEXT: global_load_dwordx4 v[53:56], v4, s[6:7] offset:208
+; GFX906-NEXT: global_load_dwordx4 v[57:60], v4, s[6:7] offset:224
+; GFX906-NEXT: global_load_dwordx4 v[0:3], v4, s[6:7] offset:240
+; GFX906-NEXT: .LBB6_2: ; %bb.2
+; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:16 ; 4-byte Folded Spill
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: buffer_store_dword v1, off, s[8:11], 0 offset:20 ; 4-byte Folded Spill
+; GFX906-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:24 ; 4-byte Folded Spill
+; GFX906-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:28 ; 4-byte Folded Spill
+; GFX906-NEXT: v_mov_b32_e32 v0, v57
+; GFX906-NEXT: v_mov_b32_e32 v1, v58
+; GFX906-NEXT: v_mov_b32_e32 v2, v59
+; GFX906-NEXT: v_mov_b32_e32 v3, v60
+; GFX906-NEXT: v_mov_b32_e32 v60, v56
+; GFX906-NEXT: v_mov_b32_e32 v59, v55
+; GFX906-NEXT: v_mov_b32_e32 v58, v54
+; GFX906-NEXT: v_mov_b32_e32 v57, v53
+; GFX906-NEXT: v_mov_b32_e32 v56, v52
+; GFX906-NEXT: v_mov_b32_e32 v55, v51
+; GFX906-NEXT: v_mov_b32_e32 v54, v50
+; GFX906-NEXT: v_mov_b32_e32 v53, v49
+; GFX906-NEXT: v_mov_b32_e32 v52, v48
+; GFX906-NEXT: v_mov_b32_e32 v51, v47
+; GFX906-NEXT: v_mov_b32_e32 v50, v46
+; GFX906-NEXT: v_mov_b32_e32 v49, v45
+; GFX906-NEXT: v_mov_b32_e32 v48, v44
+; GFX906-NEXT: v_mov_b32_e32 v47, v43
+; GFX906-NEXT: v_mov_b32_e32 v46, v42
+; GFX906-NEXT: v_mov_b32_e32 v45, v41
+; GFX906-NEXT: v_mov_b32_e32 v44, v40
+; GFX906-NEXT: v_mov_b32_e32 v43, v39
+; GFX906-NEXT: v_mov_b32_e32 v42, v38
+; GFX906-NEXT: v_mov_b32_e32 v41, v37
+; GFX906-NEXT: v_mov_b32_e32 v40, v36
+; GFX906-NEXT: v_mov_b32_e32 v39, v35
+; GFX906-NEXT: v_mov_b32_e32 v38, v34
+; GFX906-NEXT: v_mov_b32_e32 v37, v33
+; GFX906-NEXT: v_mov_b32_e32 v36, v32
+; GFX906-NEXT: v_mov_b32_e32 v35, v31
+; GFX906-NEXT: v_mov_b32_e32 v34, v30
+; GFX906-NEXT: v_mov_b32_e32 v33, v29
+; GFX906-NEXT: v_mov_b32_e32 v32, v28
+; GFX906-NEXT: v_mov_b32_e32 v31, v27
+; GFX906-NEXT: v_mov_b32_e32 v30, v26
+; GFX906-NEXT: v_mov_b32_e32 v29, v25
+; GFX906-NEXT: v_mov_b32_e32 v28, v24
+; GFX906-NEXT: v_mov_b32_e32 v27, v23
+; GFX906-NEXT: v_mov_b32_e32 v26, v22
+; GFX906-NEXT: v_mov_b32_e32 v25, v21
+; GFX906-NEXT: v_mov_b32_e32 v24, v20
+; GFX906-NEXT: v_mov_b32_e32 v23, v19
+; GFX906-NEXT: v_mov_b32_e32 v22, v18
+; GFX906-NEXT: v_mov_b32_e32 v21, v17
+; GFX906-NEXT: v_mov_b32_e32 v20, v16
+; GFX906-NEXT: v_mov_b32_e32 v19, v15
+; GFX906-NEXT: v_mov_b32_e32 v18, v14
+; GFX906-NEXT: v_mov_b32_e32 v17, v13
+; GFX906-NEXT: v_mov_b32_e32 v16, v12
+; GFX906-NEXT: v_mov_b32_e32 v15, v11
+; GFX906-NEXT: v_mov_b32_e32 v14, v10
+; GFX906-NEXT: v_mov_b32_e32 v13, v9
+; GFX906-NEXT: v_mov_b32_e32 v12, v8
+; GFX906-NEXT: v_mov_b32_e32 v11, v7
+; GFX906-NEXT: v_mov_b32_e32 v10, v6
+; GFX906-NEXT: v_mov_b32_e32 v9, v5
+; GFX906-NEXT: buffer_load_dword v5, off, s[8:11], 0 ; 4-byte Folded Reload
+; GFX906-NEXT: buffer_load_dword v6, off, s[8:11], 0 offset:4 ; 4-byte Folded Reload
+; GFX906-NEXT: buffer_load_dword v7, off, s[8:11], 0 offset:8 ; 4-byte Folded Reload
+; GFX906-NEXT: buffer_load_dword v8, off, s[8:11], 0 offset:12 ; 4-byte Folded Reload
+; GFX906-NEXT: v_mov_b32_e32 v4, 0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dwordx4 v4, v[5:8], s[2:3]
+; GFX906-NEXT: global_store_dwordx4 v4, v[9:12], s[2:3] offset:16
+; GFX906-NEXT: global_store_dwordx4 v4, v[13:16], s[2:3] offset:32
+; GFX906-NEXT: global_store_dwordx4 v4, v[17:20], s[2:3] offset:48
+; GFX906-NEXT: global_store_dwordx4 v4, v[21:24], s[2:3] offset:64
+; GFX906-NEXT: global_store_dwordx4 v4, v[25:28], s[2:3] offset:80
+; GFX906-NEXT: global_store_dwordx4 v4, v[29:32], s[2:3] offset:96
+; GFX906-NEXT: global_store_dwordx4 v4, v[33:36], s[2:3] offset:112
+; GFX906-NEXT: global_store_dwordx4 v4, v[37:40], s[2:3] offset:128
+; GFX906-NEXT: global_store_dwordx4 v4, v[41:44], s[2:3] offset:144
+; GFX906-NEXT: global_store_dwordx4 v4, v[45:48], s[2:3] offset:160
+; GFX906-NEXT: global_store_dwordx4 v4, v[49:52], s[2:3] offset:176
+; GFX906-NEXT: global_store_dwordx4 v4, v[53:56], s[2:3] offset:192
+; GFX906-NEXT: global_store_dwordx4 v4, v[57:60], s[2:3] offset:208
+; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:224
+; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:16 ; 4-byte Folded Reload
+; GFX906-NEXT: s_nop 0
+; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:20 ; 4-byte Folded Reload
+; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:24 ; 4-byte Folded Reload
+; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:28 ; 4-byte Folded Reload
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:240
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <256 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <256 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <256 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <256 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+
+define amdgpu_kernel void @repeat_successor(i32 %in, ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: repeat_successor:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dword s2, s[0:1], 0x24
+; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x2c
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: s_cmp_lt_i32 s2, 3
+; GFX906-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX906-NEXT: ; %bb.1: ; %LeafBlock
+; GFX906-NEXT: s_cmp_ge_i32 s2, 1
+; GFX906-NEXT: s_cbranch_scc0 .LBB7_6
+; GFX906-NEXT: ; %bb.2:
+; GFX906-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX906-NEXT: global_load_dword v0, v0, s[4:5]
+; GFX906-NEXT: s_branch .LBB7_5
+; GFX906-NEXT: .LBB7_3: ; %LeafBlock5
+; GFX906-NEXT: s_cmp_eq_u32 s2, 3
+; GFX906-NEXT: s_cbranch_scc0 .LBB7_6
+; GFX906-NEXT: ; %bb.4: ; %sw.bb5
+; GFX906-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX906-NEXT: global_load_dword v0, v0, s[6:7]
+; GFX906-NEXT: .LBB7_5: ; %return.sink.split
+; GFX906-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x3c
+; GFX906-NEXT: v_mov_b32_e32 v1, 0
+; GFX906-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX906-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX906-NEXT: .LBB7_6: ; %return
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <4 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <4 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <4 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <4 x i8>, ptr addrspace(1) %gep2
+ switch i32 %in, label %return [
+ i32 1, label %return.sink.split
+ i32 2, label %return.sink.split
+ i32 3, label %sw.bb5
+ ]
+
+sw.bb5:
+ br label %return.sink.split
+
+return.sink.split:
+ %tmp5 = phi <4 x i8> [ %vec2, %sw.bb5 ], [ %vec1, %entry ], [ %vec1, %entry ]
+ store <4 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+
+return:
+ ret void
+}
+
+define amdgpu_kernel void @v8i8_phi_chain(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst0, ptr addrspace(1) nocapture %dst1) {
+; GFX906-LABEL: v8i8_phi_chain:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX906-NEXT: v_lshlrev_b32_e32 v3, 3, v0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[0:1]
+; GFX906-NEXT: s_xor_b64 s[0:1], vcc, -1
+; GFX906-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB8_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[2:3]
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 7, v0
+; GFX906-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
+; GFX906-NEXT: s_and_b64 s[2:3], exec, vcc
+; GFX906-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX906-NEXT: .LBB8_2: ; %Flow
+; GFX906-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX906-NEXT: s_and_saveexec_b64 s[2:3], s[0:1]
+; GFX906-NEXT: s_cbranch_execz .LBB8_4
+; GFX906-NEXT: ; %bb.3: ; %bb.2
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dwordx2 v0, v[1:2], s[4:5]
+; GFX906-NEXT: .LBB8_4: ; %bb.3
+; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dwordx2 v0, v[1:2], s[6:7]
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <8 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <8 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ %cmp2 = icmp ult i32 %idx, 7
+ br i1 %cmp2, label %bb.2, label %bb.3
+
+bb.2:
+ %tmp5 = phi <8 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <8 x i8> %tmp5, ptr addrspace(1) %dst0, align 4
+ br label %bb.3
+
+bb.3:
+ %tmp7 = phi <8 x i8> [ %vec2, %bb.1], [%tmp5, %bb.2]
+ store <8 x i8> %tmp7, ptr addrspace(1) %dst1, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v8i8_multi_block(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst0, ptr addrspace(1) nocapture %dst1) {
+; GFX906-LABEL: v8i8_multi_block:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX906-NEXT: v_lshlrev_b32_e32 v5, 3, v0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx2 v[3:4], v5, s[0:1]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_mov_b32_e32 v1, v3
+; GFX906-NEXT: v_mov_b32_e32 v2, v4
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB9_4
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v5, s[2:3]
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 7, v0
+; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB9_3
+; GFX906-NEXT: ; %bb.2: ; %bb.2
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: global_store_dwordx2 v0, v[3:4], s[4:5]
+; GFX906-NEXT: .LBB9_3: ; %Flow
+; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX906-NEXT: .LBB9_4: ; %bb.3
+; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dwordx2 v0, v[1:2], s[6:7]
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <8 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <8 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.3
+bb.1:
+ %cmp2 = icmp ult i32 %idx, 7
+ br i1 %cmp2, label %bb.2, label %bb.3
+
+bb.2:
+ store <8 x i8> %vec1, ptr addrspace(1) %dst0, align 4
+ br label %bb.3
+
+bb.3:
+ %tmp5 = phi <8 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ], [ %vec2, %bb.2]
+ store <8 x i8> %tmp5, ptr addrspace(1) %dst1, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v32i8_loop_carried(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: v32i8_loop_carried:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX906-NEXT: v_lshlrev_b32_e32 v1, 5, v0
+; GFX906-NEXT: v_mov_b32_e32 v3, 8
+; GFX906-NEXT: v_mov_b32_e32 v2, 0xff
+; GFX906-NEXT: v_cmp_le_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dword v1, v1, s[2:3]
+; GFX906-NEXT: s_mov_b64 s[2:3], 0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_lshlrev_b32_sdwa v0, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+; GFX906-NEXT: v_and_or_b32 v0, v1, v2, v0
+; GFX906-NEXT: v_mov_b32_e32 v2, 24
+; GFX906-NEXT: .LBB10_1: ; %bb.1
+; GFX906-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX906-NEXT: v_and_b32_e32 v3, 0xff, v1
+; GFX906-NEXT: s_and_b64 s[4:5], exec, vcc
+; GFX906-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX906-NEXT: v_lshlrev_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+; GFX906-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3]
+; GFX906-NEXT: v_or3_b32 v1, v0, v3, v1
+; GFX906-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX906-NEXT: s_cbranch_execnz .LBB10_1
+; GFX906-NEXT: ; %bb.2: ; %bb.2.loopexit
+; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX906-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <32 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <4 x i8>, ptr addrspace(1) %gep1
+ br label %bb.1
+
+bb.1:
+ %temp = phi <4 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ %vec2 = shufflevector <4 x i8> %vec1, <4 x i8> %temp, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+ br label %bb.2
+
+bb.2:
+ store <4 x i8> %vec2, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+
+declare i32 @llvm.amdgcn.workitem.id.x()
+
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll
index 93b9aeac3cd3f..11772d252a16f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll
@@ -987,8 +987,8 @@ define amdgpu_kernel void @phi_v7i16_switch(<7 x i16> %in, ptr %out, i8 %cond) {
; OPT-NEXT: entry:
; OPT-NEXT: [[X:%.*]] = insertelement <7 x i16> [[IN:%.*]], i16 3, i32 3
; OPT-NEXT: switch i8 [[COND:%.*]], label [[ELSE:%.*]] [
-; OPT-NEXT: i8 0, label [[THEN_1:%.*]]
-; OPT-NEXT: i8 3, label [[THEN_2:%.*]]
+; OPT-NEXT: i8 0, label [[THEN_1:%.*]]
+; OPT-NEXT: i8 3, label [[THEN_2:%.*]]
; OPT-NEXT: ]
; OPT: then.1:
; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE0:%.*]] = shufflevector <7 x i16> [[X]], <7 x i16> poison, <2 x i32> <i32 0, i32 1>
@@ -1025,8 +1025,8 @@ define amdgpu_kernel void @phi_v7i16_switch(<7 x i16> %in, ptr %out, i8 %cond) {
; NOOPT-NEXT: entry:
; NOOPT-NEXT: [[X:%.*]] = insertelement <7 x i16> [[IN:%.*]], i16 3, i32 3
; NOOPT-NEXT: switch i8 [[COND:%.*]], label [[ELSE:%.*]] [
-; NOOPT-NEXT: i8 0, label [[THEN_1:%.*]]
-; NOOPT-NEXT: i8 3, label [[THEN_2:%.*]]
+; NOOPT-NEXT: i8 0, label [[THEN_1:%.*]]
+; NOOPT-NEXT: i8 3, label [[THEN_2:%.*]]
; NOOPT-NEXT: ]
; NOOPT: then.1:
; NOOPT-NEXT: br label [[FINALLY:%.*]]
diff --git a/llvm/test/CodeGen/AMDGPU/dagcomb-extract-vec-elt-different-sizes.ll b/llvm/test/CodeGen/AMDGPU/dagcomb-extract-vec-elt-different-sizes.ll
index 53acbb6a7bceb..1e5ec361d154c 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcomb-extract-vec-elt-different-sizes.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcomb-extract-vec-elt-different-sizes.ll
@@ -8,29 +8,30 @@ define amdgpu_kernel void @eggs(i1 %arg, ptr addrspace(1) %arg1, ptr %arg2, ptr
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_dword s0, s[4:5], 0x0
; CHECK-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x8
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_bitcmp0_b32 s0, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB0_2
; CHECK-NEXT: ; %bb.1: ; %bb10
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: global_load_dwordx2 v[0:1], v0, s[8:9]
+; CHECK-NEXT: global_load_dwordx2 v[8:9], v0, s[8:9]
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_lshrrev_b32_e32 v7, 8, v0
-; CHECK-NEXT: v_lshrrev_b32_e32 v6, 16, v0
-; CHECK-NEXT: v_lshrrev_b32_e32 v5, 24, v0
-; CHECK-NEXT: v_lshrrev_b32_e32 v4, 8, v1
-; CHECK-NEXT: v_lshrrev_b32_e32 v3, 16, v1
-; CHECK-NEXT: v_lshrrev_b32_e32 v2, 24, v1
+; CHECK-NEXT: v_and_b32_e32 v7, 0xff, v8
+; CHECK-NEXT: v_bfe_u32 v6, v8, 8, 8
+; CHECK-NEXT: v_bfe_u32 v5, v8, 16, 8
+; CHECK-NEXT: v_lshrrev_b32_e32 v4, 24, v8
+; CHECK-NEXT: v_and_b32_e32 v3, 0xff, v9
+; CHECK-NEXT: v_bfe_u32 v2, v9, 8, 8
+; CHECK-NEXT: v_bfe_u32 v1, v9, 16, 8
+; CHECK-NEXT: v_lshrrev_b32_e32 v0, 24, v9
; CHECK-NEXT: s_branch .LBB0_3
; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: v_mov_b32_e32 v4, 0
-; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v5, 0
; CHECK-NEXT: v_mov_b32_e32 v6, 0
; CHECK-NEXT: v_mov_b32_e32 v7, 0
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: .LBB0_3: ; %bb41
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x48
; CHECK-NEXT: v_mov_b32_e32 v8, s10
@@ -47,16 +48,16 @@ define amdgpu_kernel void @eggs(i1 %arg, ptr addrspace(1) %arg1, ptr %arg2, ptr
; CHECK-NEXT: v_mov_b32_e32 v19, s21
; CHECK-NEXT: v_mov_b32_e32 v20, s22
; CHECK-NEXT: v_mov_b32_e32 v21, s23
-; CHECK-NEXT: flat_store_byte v[8:9], v0
-; CHECK-NEXT: flat_store_byte v[10:11], v7
-; CHECK-NEXT: flat_store_byte v[12:13], v6
-; CHECK-NEXT: flat_store_byte v[14:15], v5
-; CHECK-NEXT: flat_store_byte v[16:17], v1
-; CHECK-NEXT: flat_store_byte v[18:19], v4
-; CHECK-NEXT: flat_store_byte v[20:21], v3
+; CHECK-NEXT: flat_store_byte v[8:9], v7
+; CHECK-NEXT: flat_store_byte v[10:11], v6
+; CHECK-NEXT: flat_store_byte v[12:13], v5
+; CHECK-NEXT: flat_store_byte v[14:15], v4
+; CHECK-NEXT: flat_store_byte v[16:17], v3
+; CHECK-NEXT: flat_store_byte v[18:19], v2
+; CHECK-NEXT: flat_store_byte v[20:21], v1
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
-; CHECK-NEXT: flat_store_byte v[0:1], v2
+; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[0:1], s[0:1] op_sel:[0,1]
+; CHECK-NEXT: flat_store_byte v[2:3], v0
; CHECK-NEXT: s_endpgm
bb:
br i1 %arg, label %bb10, label %bb41
diff --git a/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll b/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
index 6dabd8c0b83ea..efbbe2b27f10f 100644
--- a/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
@@ -13,9 +13,9 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_ushort v5, v[2:3], s[4:7], 0 addr64 glc
+; SI-NEXT: buffer_load_ushort v4, v[2:3], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v4, v[2:3], s[4:7], 0 addr64 offset:2 glc
+; SI-NEXT: buffer_load_ushort v5, v[2:3], s[4:7], 0 addr64 offset:2 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v6, v[2:3], s[4:7], 0 addr64 offset:4 glc
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -30,27 +30,25 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; SI-NEXT: buffer_load_ushort v2, v[2:3], s[4:7], 0 addr64 offset:14 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v7
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; SI-NEXT: v_or_b32_e32 v2, v6, v2
-; SI-NEXT: v_or_b32_e32 v3, v5, v3
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_or_b32_e32 v3, v6, v2
+; SI-NEXT: v_or_b32_e32 v2, v4, v5
; SI-NEXT: s_mov_b64 vcc, exec
; SI-NEXT: s_cbranch_execz .LBB0_3
; SI-NEXT: s_branch .LBB0_4
; SI-NEXT: .LBB0_2:
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: ; implicit-def: $vgpr2
+; SI-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
; SI-NEXT: s_mov_b64 vcc, 0
; SI-NEXT: .LBB0_3: ; %T
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_ushort v3, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT: buffer_load_ushort v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v4, v[0:1], s[4:7], 0 addr64 offset:2 glc
+; SI-NEXT: buffer_load_ushort v3, v[0:1], s[4:7], 0 addr64 offset:2 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v2, v[0:1], s[4:7], 0 addr64 offset:4 glc
+; SI-NEXT: buffer_load_ushort v4, v[0:1], s[4:7], 0 addr64 offset:4 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v5, v[0:1], s[4:7], 0 addr64 offset:6 glc
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -63,29 +61,29 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; SI-NEXT: buffer_load_ushort v0, v[0:1], s[4:7], 0 addr64 offset:14 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT: v_or_b32_e32 v2, v2, v0
-; SI-NEXT: v_or_b32_e32 v3, v3, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; SI-NEXT: v_or_b32_e32 v3, v4, v0
+; SI-NEXT: v_or_b32_e32 v2, v2, v1
; SI-NEXT: .LBB0_4: ; %exit
-; SI-NEXT: v_bfe_i32 v0, v3, 0, 16
-; SI-NEXT: v_bfe_i32 v1, v4, 0, 16
-; SI-NEXT: v_bfe_i32 v2, v2, 0, 16
-; SI-NEXT: v_mov_b32_e32 v3, 0xffff
-; SI-NEXT: v_mov_b32_e32 v4, 0x8000
-; SI-NEXT: v_mov_b32_e32 v5, 0xffff0000
-; SI-NEXT: v_bfrev_b32_e32 v6, 1
+; SI-NEXT: v_ashrrev_i32_e32 v0, 16, v2
+; SI-NEXT: v_bfe_i32 v1, v2, 0, 16
+; SI-NEXT: v_bfe_i32 v2, v3, 0, 16
+; SI-NEXT: v_mov_b32_e32 v3, 0xffff0000
+; SI-NEXT: v_bfrev_b32_e32 v4, 1
+; SI-NEXT: v_mov_b32_e32 v5, 0xffff
+; SI-NEXT: v_mov_b32_e32 v6, 0x8000
; SI-NEXT: v_mov_b32_e32 v7, 0xffff8000
; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v0
-; SI-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; SI-NEXT: v_cndmask_b32_e32 v4, v3, v4, vcc
; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v1
-; SI-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; SI-NEXT: v_cndmask_b32_e32 v0, v5, v6, vcc
; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v2
-; SI-NEXT: v_cndmask_b32_e32 v2, -1, v7, vcc
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; SI-NEXT: v_and_b32_e32 v3, 0xffff, v2
-; SI-NEXT: v_or_b32_e32 v2, v3, v4
-; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; SI-NEXT: v_cndmask_b32_e32 v1, -1, v7, vcc
+; SI-NEXT: v_or_b32_e32 v0, v0, v4
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v1
+; SI-NEXT: v_or_b32_e32 v2, v3, v2
+; SI-NEXT: v_alignbit_b32 v1, v2, v4, 16
; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: vec_8xi16_extract_4xi16:
@@ -180,26 +178,23 @@ define <4 x i16> @vec_8xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace(
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v4, v[2:3], s[4:7], 0 addr64 offset:6 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v5, v[2:3], s[4:7], 0 addr64 offset:8 glc
+; SI-NEXT: buffer_load_ushort v4, v[2:3], s[4:7], 0 addr64 offset:8 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v4, v[2:3], s[4:7], 0 addr64 offset:10 glc
+; SI-NEXT: buffer_load_ushort v5, v[2:3], s[4:7], 0 addr64 offset:10 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v6, v[2:3], s[4:7], 0 addr64 offset:12 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v2, v[2:3], s[4:7], 0 addr64 offset:14 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v4
-; SI-NEXT: v_or_b32_e32 v3, v6, v3
-; SI-NEXT: v_or_b32_e32 v5, v5, v7
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v5
+; SI-NEXT: v_or_b32_e32 v5, v6, v2
+; SI-NEXT: v_or_b32_e32 v4, v4, v3
; SI-NEXT: s_mov_b64 vcc, exec
; SI-NEXT: s_cbranch_execz .LBB1_3
; SI-NEXT: s_branch .LBB1_4
; SI-NEXT: .LBB1_2:
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr2
+; SI-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
; SI-NEXT: s_mov_b64 vcc, 0
; SI-NEXT: .LBB1_3: ; %T
; SI-NEXT: s_mov_b32 s6, 0
@@ -214,39 +209,39 @@ define <4 x i16> @vec_8xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace(
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v2, v[0:1], s[4:7], 0 addr64 offset:6 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v5, v[0:1], s[4:7], 0 addr64 offset:8 glc
+; SI-NEXT: buffer_load_ushort v2, v[0:1], s[4:7], 0 addr64 offset:8 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v4, v[0:1], s[4:7], 0 addr64 offset:10 glc
+; SI-NEXT: buffer_load_ushort v3, v[0:1], s[4:7], 0 addr64 offset:10 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v3, v[0:1], s[4:7], 0 addr64 offset:12 glc
+; SI-NEXT: buffer_load_ushort v4, v[0:1], s[4:7], 0 addr64 offset:12 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v2, v[0:1], s[4:7], 0 addr64 offset:14 glc
+; SI-NEXT: buffer_load_ushort v0, v[0:1], s[4:7], 0 addr64 offset:14 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v2
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT: v_or_b32_e32 v3, v3, v0
-; SI-NEXT: v_or_b32_e32 v5, v5, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; SI-NEXT: v_or_b32_e32 v5, v4, v0
+; SI-NEXT: v_or_b32_e32 v4, v2, v1
; SI-NEXT: .LBB1_4: ; %exit
-; SI-NEXT: v_bfe_i32 v0, v5, 0, 16
+; SI-NEXT: v_ashrrev_i32_e32 v2, 16, v4
+; SI-NEXT: v_ashr_i64 v[0:1], v[4:5], 48
; SI-NEXT: v_bfe_i32 v1, v4, 0, 16
-; SI-NEXT: v_bfe_i32 v3, v3, 0, 16
-; SI-NEXT: v_bfe_i32 v2, v2, 0, 16
-; SI-NEXT: v_mov_b32_e32 v4, 0xffff
-; SI-NEXT: v_mov_b32_e32 v5, 0x8000
-; SI-NEXT: v_mov_b32_e32 v6, 0xffff0000
-; SI-NEXT: v_bfrev_b32_e32 v7, 1
-; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v0
-; SI-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; SI-NEXT: v_bfe_i32 v3, v5, 0, 16
+; SI-NEXT: v_mov_b32_e32 v4, 0xffff0000
+; SI-NEXT: v_bfrev_b32_e32 v5, 1
+; SI-NEXT: v_mov_b32_e32 v6, 0xffff
+; SI-NEXT: v_mov_b32_e32 v7, 0x8000
+; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v2
+; SI-NEXT: v_cndmask_b32_e32 v8, v4, v5, vcc
; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v1
; SI-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc
; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v3
+; SI-NEXT: v_cndmask_b32_e32 v2, v6, v7, vcc
+; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v0
; SI-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
-; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v2
-; SI-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_or_b32_e32 v2, v3, v4
-; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
-; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; SI-NEXT: v_or_b32_e32 v0, v1, v8
+; SI-NEXT: v_or_b32_e32 v2, v2, v3
+; SI-NEXT: v_alignbit_b32 v1, v2, v8, 16
+; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: vec_8xi16_extract_4xi16_2:
@@ -499,9 +494,9 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_ushort v5, v[2:3], s[4:7], 0 addr64 glc
+; SI-NEXT: buffer_load_ushort v4, v[2:3], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v4, v[2:3], s[4:7], 0 addr64 offset:2 glc
+; SI-NEXT: buffer_load_ushort v5, v[2:3], s[4:7], 0 addr64 offset:2 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v6, v[2:3], s[4:7], 0 addr64 offset:4 glc
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -532,27 +527,25 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; SI-NEXT: buffer_load_ushort v2, v[2:3], s[4:7], 0 addr64 offset:30 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v7
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; SI-NEXT: v_or_b32_e32 v2, v6, v2
-; SI-NEXT: v_or_b32_e32 v3, v5, v3
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_or_b32_e32 v3, v6, v2
+; SI-NEXT: v_or_b32_e32 v2, v4, v5
; SI-NEXT: s_mov_b64 vcc, exec
; SI-NEXT: s_cbranch_execz .LBB3_3
; SI-NEXT: s_branch .LBB3_4
; SI-NEXT: .LBB3_2:
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: ; implicit-def: $vgpr2
+; SI-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
; SI-NEXT: s_mov_b64 vcc, 0
; SI-NEXT: .LBB3_3: ; %T
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_ushort v3, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT: buffer_load_ushort v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v4, v[0:1], s[4:7], 0 addr64 offset:2 glc
+; SI-NEXT: buffer_load_ushort v3, v[0:1], s[4:7], 0 addr64 offset:2 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v2, v[0:1], s[4:7], 0 addr64 offset:4 glc
+; SI-NEXT: buffer_load_ushort v4, v[0:1], s[4:7], 0 addr64 offset:4 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v5, v[0:1], s[4:7], 0 addr64 offset:6 glc
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -581,29 +574,29 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; SI-NEXT: buffer_load_ushort v0, v[0:1], s[4:7], 0 addr64 offset:30 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT: v_or_b32_e32 v2, v2, v0
-; SI-NEXT: v_or_b32_e32 v3, v3, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; SI-NEXT: v_or_b32_e32 v3, v4, v0
+; SI-NEXT: v_or_b32_e32 v2, v2, v1
; SI-NEXT: .LBB3_4: ; %exit
-; SI-NEXT: v_bfe_i32 v0, v3, 0, 16
-; SI-NEXT: v_bfe_i32 v1, v4, 0, 16
-; SI-NEXT: v_bfe_i32 v2, v2, 0, 16
-; SI-NEXT: v_mov_b32_e32 v3, 0xffff
-; SI-NEXT: v_mov_b32_e32 v4, 0x8000
-; SI-NEXT: v_mov_b32_e32 v5, 0xffff0000
-; SI-NEXT: v_bfrev_b32_e32 v6, 1
+; SI-NEXT: v_ashrrev_i32_e32 v0, 16, v2
+; SI-NEXT: v_bfe_i32 v1, v2, 0, 16
+; SI-NEXT: v_bfe_i32 v2, v3, 0, 16
+; SI-NEXT: v_mov_b32_e32 v3, 0xffff0000
+; SI-NEXT: v_bfrev_b32_e32 v4, 1
+; SI-NEXT: v_mov_b32_e32 v5, 0xffff
+; SI-NEXT: v_mov_b32_e32 v6, 0x8000
; SI-NEXT: v_mov_b32_e32 v7, 0xffff8000
; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v0
-; SI-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; SI-NEXT: v_cndmask_b32_e32 v4, v3, v4, vcc
; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v1
-; SI-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; SI-NEXT: v_cndmask_b32_e32 v0, v5, v6, vcc
; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v2
-; SI-NEXT: v_cndmask_b32_e32 v2, -1, v7, vcc
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; SI-NEXT: v_and_b32_e32 v3, 0xffff, v2
-; SI-NEXT: v_or_b32_e32 v2, v3, v4
-; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; SI-NEXT: v_cndmask_b32_e32 v1, -1, v7, vcc
+; SI-NEXT: v_or_b32_e32 v0, v0, v4
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v1
+; SI-NEXT: v_or_b32_e32 v2, v3, v2
+; SI-NEXT: v_alignbit_b32 v1, v2, v4, 16
; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: vec_16xi16_extract_4xi16:
@@ -710,13 +703,13 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v4, v[2:3], s[4:7], 0 addr64 offset:6 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v6, v[2:3], s[4:7], 0 addr64 offset:8 glc
+; SI-NEXT: buffer_load_ushort v4, v[2:3], s[4:7], 0 addr64 offset:8 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v4, v[2:3], s[4:7], 0 addr64 offset:10 glc
+; SI-NEXT: buffer_load_ushort v5, v[2:3], s[4:7], 0 addr64 offset:10 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v7, v[2:3], s[4:7], 0 addr64 offset:12 glc
+; SI-NEXT: buffer_load_ushort v6, v[2:3], s[4:7], 0 addr64 offset:12 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v5, v[2:3], s[4:7], 0 addr64 offset:14 glc
+; SI-NEXT: buffer_load_ushort v7, v[2:3], s[4:7], 0 addr64 offset:14 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v8, v[2:3], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -734,18 +727,15 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v2, v[2:3], s[4:7], 0 addr64 offset:30 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v5
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; SI-NEXT: v_or_b32_e32 v2, v7, v2
-; SI-NEXT: v_or_b32_e32 v3, v6, v3
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v7
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v5
+; SI-NEXT: v_or_b32_e32 v5, v6, v2
+; SI-NEXT: v_or_b32_e32 v4, v4, v3
; SI-NEXT: s_mov_b64 vcc, exec
; SI-NEXT: s_cbranch_execz .LBB4_3
; SI-NEXT: s_branch .LBB4_4
; SI-NEXT: .LBB4_2:
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
; SI-NEXT: s_mov_b64 vcc, 0
; SI-NEXT: .LBB4_3: ; %T
; SI-NEXT: s_mov_b32 s6, 0
@@ -760,11 +750,11 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v2, v[0:1], s[4:7], 0 addr64 offset:6 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v3, v[0:1], s[4:7], 0 addr64 offset:8 glc
+; SI-NEXT: buffer_load_ushort v2, v[0:1], s[4:7], 0 addr64 offset:8 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v4, v[0:1], s[4:7], 0 addr64 offset:10 glc
+; SI-NEXT: buffer_load_ushort v3, v[0:1], s[4:7], 0 addr64 offset:10 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v2, v[0:1], s[4:7], 0 addr64 offset:12 glc
+; SI-NEXT: buffer_load_ushort v4, v[0:1], s[4:7], 0 addr64 offset:12 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v5, v[0:1], s[4:7], 0 addr64 offset:14 glc
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -785,29 +775,29 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; SI-NEXT: buffer_load_ushort v0, v[0:1], s[4:7], 0 addr64 offset:30 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT: v_or_b32_e32 v2, v2, v0
-; SI-NEXT: v_or_b32_e32 v3, v3, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; SI-NEXT: v_or_b32_e32 v5, v4, v0
+; SI-NEXT: v_or_b32_e32 v4, v2, v1
; SI-NEXT: .LBB4_4: ; %exit
-; SI-NEXT: v_bfe_i32 v0, v3, 0, 16
+; SI-NEXT: v_ashrrev_i32_e32 v2, 16, v4
+; SI-NEXT: v_ashr_i64 v[0:1], v[4:5], 48
; SI-NEXT: v_bfe_i32 v1, v4, 0, 16
-; SI-NEXT: v_bfe_i32 v2, v2, 0, 16
; SI-NEXT: v_bfe_i32 v3, v5, 0, 16
-; SI-NEXT: v_mov_b32_e32 v4, 0xffff
-; SI-NEXT: v_mov_b32_e32 v5, 0x8000
-; SI-NEXT: v_mov_b32_e32 v6, 0xffff0000
-; SI-NEXT: v_bfrev_b32_e32 v7, 1
-; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v0
-; SI-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; SI-NEXT: v_mov_b32_e32 v4, 0xffff0000
+; SI-NEXT: v_bfrev_b32_e32 v5, 1
+; SI-NEXT: v_mov_b32_e32 v6, 0xffff
+; SI-NEXT: v_mov_b32_e32 v7, 0x8000
+; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v2
+; SI-NEXT: v_cndmask_b32_e32 v8, v4, v5, vcc
; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v1
; SI-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc
-; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v2
-; SI-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v3
-; SI-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_cndmask_b32_e32 v2, v6, v7, vcc
+; SI-NEXT: v_cmp_lt_i32_e32 vcc, -1, v0
+; SI-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
+; SI-NEXT: v_or_b32_e32 v0, v1, v8
; SI-NEXT: v_or_b32_e32 v2, v2, v3
-; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; SI-NEXT: v_alignbit_b32 v1, v2, v8, 16
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1205,21 +1195,21 @@ define amdgpu_gfx <8 x i16> @vec_16xi16_extract_8xi16_0(i1 inreg %cond, ptr addr
; SI-NEXT: s_mov_b32 s39, 0xf000
; SI-NEXT: s_mov_b32 s36, s38
; SI-NEXT: s_mov_b32 s37, s38
-; SI-NEXT: buffer_load_ushort v9, v[2:3], s[36:39], 0 addr64 glc
+; SI-NEXT: buffer_load_ushort v6, v[2:3], s[36:39], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v5, v[2:3], s[36:39], 0 addr64 offset:2 glc
+; SI-NEXT: buffer_load_ushort v4, v[2:3], s[36:39], 0 addr64 offset:2 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v10, v[2:3], s[36:39], 0 addr64 offset:4 glc
+; SI-NEXT: buffer_load_ushort v7, v[2:3], s[36:39], 0 addr64 offset:4 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v4, v[2:3], s[36:39], 0 addr64 offset:6 glc
+; SI-NEXT: buffer_load_ushort v5, v[2:3], s[36:39], 0 addr64 offset:6 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v8, v[2:3], s[36:39], 0 addr64 offset:8 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v6, v[2:3], s[36:39], 0 addr64 offset:10 glc
+; SI-NEXT: buffer_load_ushort v9, v[2:3], s[36:39], 0 addr64 offset:10 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v11, v[2:3], s[36:39], 0 addr64 offset:12 glc
+; SI-NEXT: buffer_load_ushort v10, v[2:3], s[36:39], 0 addr64 offset:12 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v7, v[2:3], s[36:39], 0 addr64 offset:14 glc
+; SI-NEXT: buffer_load_ushort v11, v[2:3], s[36:39], 0 addr64 offset:14 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -1237,46 +1227,39 @@ define amdgpu_gfx <8 x i16> @vec_16xi16_extract_8xi16_0(i1 inreg %cond, ptr addr
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v2, v[2:3], s[36:39], 0 addr64 offset:30 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v7
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v6
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v5
-; SI-NEXT: v_or_b32_e32 v3, v11, v2
-; SI-NEXT: v_or_b32_e32 v8, v8, v12
-; SI-NEXT: v_or_b32_e32 v2, v10, v13
-; SI-NEXT: v_or_b32_e32 v9, v9, v14
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v11
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v9
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v4
+; SI-NEXT: v_or_b32_e32 v5, v10, v2
+; SI-NEXT: v_or_b32_e32 v4, v8, v3
+; SI-NEXT: v_or_b32_e32 v3, v7, v9
+; SI-NEXT: v_or_b32_e32 v2, v6, v11
; SI-NEXT: s_mov_b64 vcc, exec
; SI-NEXT: s_cbranch_execz .LBB7_3
; SI-NEXT: s_branch .LBB7_4
; SI-NEXT: .LBB7_2:
-; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: ; implicit-def: $vgpr6
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr7
+; SI-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
; SI-NEXT: s_mov_b64 vcc, 0
; SI-NEXT: .LBB7_3: ; %T
; SI-NEXT: s_mov_b32 s39, 0xf000
; SI-NEXT: s_mov_b32 s36, s38
; SI-NEXT: s_mov_b32 s37, s38
-; SI-NEXT: buffer_load_ushort v9, v[0:1], s[36:39], 0 addr64 glc
+; SI-NEXT: buffer_load_ushort v2, v[0:1], s[36:39], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v5, v[0:1], s[36:39], 0 addr64 offset:2 glc
+; SI-NEXT: buffer_load_ushort v3, v[0:1], s[36:39], 0 addr64 offset:2 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v2, v[0:1], s[36:39], 0 addr64 offset:4 glc
+; SI-NEXT: buffer_load_ushort v6, v[0:1], s[36:39], 0 addr64 offset:4 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v4, v[0:1], s[36:39], 0 addr64 offset:6 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v8, v[0:1], s[36:39], 0 addr64 offset:8 glc
+; SI-NEXT: buffer_load_ushort v7, v[0:1], s[36:39], 0 addr64 offset:8 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v6, v[0:1], s[36:39], 0 addr64 offset:10 glc
+; SI-NEXT: buffer_load_ushort v5, v[0:1], s[36:39], 0 addr64 offset:10 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v3, v[0:1], s[36:39], 0 addr64 offset:12 glc
+; SI-NEXT: buffer_load_ushort v8, v[0:1], s[36:39], 0 addr64 offset:12 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_load_ushort v7, v[0:1], s[36:39], 0 addr64 offset:14 glc
+; SI-NEXT: buffer_load_ushort v9, v[0:1], s[36:39], 0 addr64 offset:14 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -1294,52 +1277,52 @@ define amdgpu_gfx <8 x i16> @vec_16xi16_extract_8xi16_0(i1 inreg %cond, ptr addr
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_load_ushort v0, v[0:1], s[36:39], 0 addr64 offset:30 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v7
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v5
-; SI-NEXT: v_or_b32_e32 v3, v3, v0
-; SI-NEXT: v_or_b32_e32 v8, v8, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v4
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v3
+; SI-NEXT: v_or_b32_e32 v5, v8, v0
+; SI-NEXT: v_or_b32_e32 v4, v7, v1
+; SI-NEXT: v_or_b32_e32 v3, v6, v9
; SI-NEXT: v_or_b32_e32 v2, v2, v10
-; SI-NEXT: v_or_b32_e32 v9, v9, v11
; SI-NEXT: .LBB7_4: ; %exit
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v9
-; SI-NEXT: v_and_b32_e32 v1, 0xffff, v5
-; SI-NEXT: v_and_b32_e32 v5, 0xffff, v8
-; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4
; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v5
+; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: s_movk_i32 s34, 0x3800
-; SI-NEXT: v_mov_b32_e32 v8, 0x3d00
-; SI-NEXT: v_mov_b32_e32 v9, 0x3900
-; SI-NEXT: v_mov_b32_e32 v10, 0x3d000000
-; SI-NEXT: v_mov_b32_e32 v11, 0x39000000
+; SI-NEXT: v_mov_b32_e32 v8, 0x3d000000
+; SI-NEXT: v_mov_b32_e32 v9, 0x39000000
+; SI-NEXT: v_mov_b32_e32 v10, 0x3d00
+; SI-NEXT: v_mov_b32_e32 v11, 0x3900
; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v0
-; SI-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; SI-NEXT: v_cndmask_b32_e32 v12, v8, v9, vcc
; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v1
+; SI-NEXT: v_cndmask_b32_e32 v0, v10, v11, vcc
+; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v2
+; SI-NEXT: v_cndmask_b32_e32 v13, v8, v9, vcc
+; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v4
; SI-NEXT: v_cndmask_b32_e32 v1, v10, v11, vcc
+; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v6
+; SI-NEXT: v_cndmask_b32_e32 v14, v8, v9, vcc
; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v5
+; SI-NEXT: v_cndmask_b32_e32 v2, v10, v11, vcc
+; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v7
; SI-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v6
-; SI-NEXT: v_cndmask_b32_e32 v12, v10, v11, vcc
; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v3
-; SI-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc
-; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v7
-; SI-NEXT: v_cndmask_b32_e32 v7, v10, v11, vcc
-; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v2
-; SI-NEXT: v_cndmask_b32_e32 v2, v8, v9, vcc
-; SI-NEXT: v_cmp_lt_u32_e32 vcc, s34, v4
-; SI-NEXT: v_cndmask_b32_e32 v8, v10, v11, vcc
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_or_b32_e32 v4, v5, v12
-; SI-NEXT: v_or_b32_e32 v6, v3, v7
-; SI-NEXT: v_or_b32_e32 v2, v2, v8
-; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v8
-; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
-; SI-NEXT: v_alignbit_b32 v5, v6, v12, 16
-; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; SI-NEXT: v_cndmask_b32_e32 v3, v10, v11, vcc
+; SI-NEXT: v_or_b32_e32 v0, v0, v12
+; SI-NEXT: v_or_b32_e32 v4, v1, v13
+; SI-NEXT: v_or_b32_e32 v6, v2, v14
+; SI-NEXT: v_or_b32_e32 v2, v3, v5
+; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; SI-NEXT: v_alignbit_b32 v1, v2, v12, 16
+; SI-NEXT: v_alignbit_b32 v5, v6, v13, 16
+; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v14
; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: vec_16xi16_extract_8xi16_0:
diff --git a/llvm/test/CodeGen/AMDGPU/extract-subvector.ll b/llvm/test/CodeGen/AMDGPU/extract-subvector.ll
index 15abf44f3a0ea..36a93bd2511ce 100644
--- a/llvm/test/CodeGen/AMDGPU/extract-subvector.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract-subvector.ll
@@ -1,26 +1,82 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=amdgcn-- -verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s
-; GCN-LABEL: extract_2xi16
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: v_bfe_i32
-; GCN: v_bfe_i32
-
define <2 x i16> @extract_2xi16(ptr addrspace(1) %p0, ptr addrspace(1) %p1, i1 %c0) {
+; GCN-LABEL: extract_2xi16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v4, 1, v4
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4
+; GCN-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7
+; GCN-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-NEXT: s_xor_b64 s[4:5], exec, s[6:7]
+; GCN-NEXT: s_cbranch_execz .LBB0_2
+; GCN-NEXT: ; %bb.1: ; %F
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: buffer_load_ushort v0, v[2:3], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v1, v[2:3], s[8:11], 0 addr64 offset:2 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v4, v[2:3], s[8:11], 0 addr64 offset:4 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v4, v[2:3], s[8:11], 0 addr64 offset:6 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v4, v[2:3], s[8:11], 0 addr64 offset:8 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v4, v[2:3], s[8:11], 0 addr64 offset:10 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v4, v[2:3], s[8:11], 0 addr64 offset:12 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v2, v[2:3], s[8:11], 0 addr64 offset:14 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT: v_or_b32_e32 v4, v0, v1
+; GCN-NEXT: ; implicit-def: $vgpr0
+; GCN-NEXT: .LBB0_2: ; %Flow
+; GCN-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; GCN-NEXT: s_cbranch_execz .LBB0_4
+; GCN-NEXT: ; %bb.3: ; %T
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: buffer_load_ushort v2, v[0:1], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v3, v[0:1], s[8:11], 0 addr64 offset:2 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v4, v[0:1], s[8:11], 0 addr64 offset:4 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v4, v[0:1], s[8:11], 0 addr64 offset:6 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v4, v[0:1], s[8:11], 0 addr64 offset:8 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v4, v[0:1], s[8:11], 0 addr64 offset:10 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v4, v[0:1], s[8:11], 0 addr64 offset:12 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_ushort v0, v[0:1], s[8:11], 0 addr64 offset:14 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v3
+; GCN-NEXT: v_or_b32_e32 v4, v2, v0
+; GCN-NEXT: .LBB0_4: ; %exit
+; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN-NEXT: v_ashrrev_i32_e32 v0, 16, v4
+; GCN-NEXT: v_bfe_i32 v1, v4, 0, 16
+; GCN-NEXT: v_mov_b32_e32 v2, 0xffff
+; GCN-NEXT: v_mov_b32_e32 v3, 0x8000
+; GCN-NEXT: v_mov_b32_e32 v4, 0xffff8000
+; GCN-NEXT: v_cmp_lt_i32_e32 vcc, -1, v1
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GCN-NEXT: v_cmp_lt_i32_e32 vcc, -1, v0
+; GCN-NEXT: v_cndmask_b32_e32 v2, -1, v4, vcc
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v2
+; GCN-NEXT: v_or_b32_e32 v0, v1, v0
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GCN-NEXT: s_setpc_b64 s[30:31]
br i1 %c0, label %T, label %F
T:
@@ -39,9 +95,59 @@ exit:
ret <2 x i16> %r2
}
-; GCN-LABEL: extract_2xi64
-; GCN-COUNT-2: v_cndmask_b32
define <2 x i64> @extract_2xi64(ptr addrspace(1) %p0, ptr addrspace(1) %p1, i1 %c0) {
+; GCN-LABEL: extract_2xi64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v4, 1, v4
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4
+; GCN-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19
+; GCN-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-NEXT: s_xor_b64 s[4:5], exec, s[6:7]
+; GCN-NEXT: s_cbranch_execz .LBB1_2
+; GCN-NEXT: ; %bb.1: ; %F
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[2:3], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[2:3], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[0:3], v[2:3], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: ; implicit-def: $vgpr0
+; GCN-NEXT: .LBB1_2: ; %Flow
+; GCN-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; GCN-NEXT: s_cbranch_execz .LBB1_4
+; GCN-NEXT: ; %bb.3: ; %T
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[0:1], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[0:1], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[0:1], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: .LBB1_4: ; %exit
+; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, 0xffff8000
+; GCN-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[4:5]
+; GCN-NEXT: v_cndmask_b32_e32 v0, -1, v1, vcc
+; GCN-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[6:7]
+; GCN-NEXT: v_cndmask_b32_e32 v2, -1, v1, vcc
+; GCN-NEXT: v_mov_b32_e32 v1, -1
+; GCN-NEXT: v_mov_b32_e32 v3, -1
+; GCN-NEXT: s_setpc_b64 s[30:31]
br i1 %c0, label %T, label %F
T:
@@ -60,9 +166,65 @@ exit:
ret <2 x i64> %r2
}
-; GCN-LABEL: extract_4xi64
-; GCN-COUNT-4: v_cndmask_b32
define <4 x i64> @extract_4xi64(ptr addrspace(1) %p0, ptr addrspace(1) %p1, i1 %c0) {
+; GCN-LABEL: extract_4xi64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v4, 1, v4
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4
+; GCN-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19
+; GCN-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-NEXT: s_xor_b64 s[4:5], exec, s[6:7]
+; GCN-NEXT: s_cbranch_execz .LBB2_2
+; GCN-NEXT: ; %bb.1: ; %F
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[2:3], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[12:15], v[2:3], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[0:3], v[2:3], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: ; implicit-def: $vgpr0
+; GCN-NEXT: .LBB2_2: ; %Flow
+; GCN-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; GCN-NEXT: s_cbranch_execz .LBB2_4
+; GCN-NEXT: ; %bb.3: ; %T
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[0:1], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[0:1], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[12:15], v[0:1], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: .LBB2_4: ; %exit
+; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, 0xffff8000
+; GCN-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, v1, -1, vcc
+; GCN-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[6:7]
+; GCN-NEXT: v_cndmask_b32_e64 v2, v1, -1, vcc
+; GCN-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[8:9]
+; GCN-NEXT: v_cndmask_b32_e64 v4, v1, -1, vcc
+; GCN-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[10:11]
+; GCN-NEXT: v_cndmask_b32_e64 v6, v1, -1, vcc
+; GCN-NEXT: v_mov_b32_e32 v1, -1
+; GCN-NEXT: v_mov_b32_e32 v3, -1
+; GCN-NEXT: v_mov_b32_e32 v5, -1
+; GCN-NEXT: v_mov_b32_e32 v7, -1
+; GCN-NEXT: s_setpc_b64 s[30:31]
br i1 %c0, label %T, label %F
T:
@@ -81,9 +243,92 @@ exit:
ret <4 x i64> %r2
}
-; GCN-LABEL: extract_8xi64
-; GCN-COUNT-8: v_cndmask_b32
define <8 x i64> @extract_8xi64(ptr addrspace(1) %p0, ptr addrspace(1) %p1, i1 %c0) {
+; GCN-LABEL: extract_8xi64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v4, 1, v4
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4
+; GCN-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
+; GCN-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-NEXT: s_xor_b64 s[4:5], exec, s[6:7]
+; GCN-NEXT: s_cbranch_execz .LBB3_2
+; GCN-NEXT: ; %bb.1: ; %F
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 offset:112 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 offset:96 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 offset:80 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 offset:64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[2:3], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[12:15], v[2:3], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[16:19], v[2:3], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: ; implicit-def: $vgpr0
+; GCN-NEXT: .LBB3_2: ; %Flow
+; GCN-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; GCN-NEXT: s_cbranch_execz .LBB3_4
+; GCN-NEXT: ; %bb.3: ; %T
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: buffer_load_dwordx4 v[2:5], v[0:1], s[8:11], 0 addr64 offset:112 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[2:5], v[0:1], s[8:11], 0 addr64 offset:96 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[2:5], v[0:1], s[8:11], 0 addr64 offset:80 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[2:5], v[0:1], s[8:11], 0 addr64 offset:64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[0:1], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[0:1], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[12:15], v[0:1], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[16:19], v[0:1], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: .LBB3_4: ; %exit
+; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN-NEXT: v_mov_b32_e32 v1, 0xffff8000
+; GCN-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[6:7]
+; GCN-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[8:9]
+; GCN-NEXT: v_cmp_gt_i64_e64 s[6:7], 0, v[10:11]
+; GCN-NEXT: v_cmp_gt_i64_e64 s[8:9], 0, v[12:13]
+; GCN-NEXT: v_cmp_gt_i64_e64 s[10:11], 0, v[14:15]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_cmp_gt_i64_e64 s[12:13], 0, v[16:17]
+; GCN-NEXT: v_cmp_gt_i64_e64 s[14:15], 0, v[18:19]
+; GCN-NEXT: v_cmp_gt_i64_e64 s[16:17], 0, v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, v1, -1, s[16:17]
+; GCN-NEXT: v_cndmask_b32_e64 v2, v1, -1, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v4, v1, -1, s[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v6, v1, -1, s[6:7]
+; GCN-NEXT: v_cndmask_b32_e64 v8, v1, -1, s[8:9]
+; GCN-NEXT: v_cndmask_b32_e64 v10, v1, -1, s[10:11]
+; GCN-NEXT: v_cndmask_b32_e64 v12, v1, -1, s[12:13]
+; GCN-NEXT: v_cndmask_b32_e64 v14, v1, -1, s[14:15]
+; GCN-NEXT: v_mov_b32_e32 v1, -1
+; GCN-NEXT: v_mov_b32_e32 v3, -1
+; GCN-NEXT: v_mov_b32_e32 v5, -1
+; GCN-NEXT: v_mov_b32_e32 v7, -1
+; GCN-NEXT: v_mov_b32_e32 v9, -1
+; GCN-NEXT: v_mov_b32_e32 v11, -1
+; GCN-NEXT: v_mov_b32_e32 v13, -1
+; GCN-NEXT: v_mov_b32_e32 v15, -1
+; GCN-NEXT: s_setpc_b64 s[30:31]
br i1 %c0, label %T, label %F
T:
@@ -102,9 +347,59 @@ exit:
ret <8 x i64> %r2
}
-; GCN-LABEL: extract_2xf64
-; GCN-COUNT-2: v_cndmask_b32
define <2 x double> @extract_2xf64(ptr addrspace(1) %p0, ptr addrspace(1) %p1, i1 %c0) {
+; GCN-LABEL: extract_2xf64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v4, 1, v4
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4
+; GCN-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19
+; GCN-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-NEXT: s_xor_b64 s[4:5], exec, s[6:7]
+; GCN-NEXT: s_cbranch_execz .LBB4_2
+; GCN-NEXT: ; %bb.1: ; %F
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[2:3], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[2:3], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[0:3], v[2:3], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: ; implicit-def: $vgpr0
+; GCN-NEXT: .LBB4_2: ; %Flow
+; GCN-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; GCN-NEXT: s_cbranch_execz .LBB4_4
+; GCN-NEXT: ; %bb.3: ; %T
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[0:1], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[0:1], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[0:1], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: .LBB4_4: ; %exit
+; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, 0xbff00000
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc, -1.0, v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v1, v0, -2.0, vcc
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc, -1.0, v[6:7]
+; GCN-NEXT: v_cndmask_b32_e64 v3, v0, -2.0, vcc
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: v_mov_b32_e32 v2, 0
+; GCN-NEXT: s_setpc_b64 s[30:31]
br i1 %c0, label %T, label %F
T:
@@ -123,9 +418,65 @@ exit:
ret <2 x double> %r2
}
-; GCN-LABEL: extract_4xf64
-; GCN-COUNT-4: v_cndmask_b32
define <4 x double> @extract_4xf64(ptr addrspace(1) %p0, ptr addrspace(1) %p1, i1 %c0) {
+; GCN-LABEL: extract_4xf64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v4, 1, v4
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4
+; GCN-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19
+; GCN-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-NEXT: s_xor_b64 s[4:5], exec, s[6:7]
+; GCN-NEXT: s_cbranch_execz .LBB5_2
+; GCN-NEXT: ; %bb.1: ; %F
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[2:3], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[12:15], v[2:3], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[0:3], v[2:3], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: ; implicit-def: $vgpr0
+; GCN-NEXT: .LBB5_2: ; %Flow
+; GCN-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; GCN-NEXT: s_cbranch_execz .LBB5_4
+; GCN-NEXT: ; %bb.3: ; %T
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[0:1], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[0:1], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[12:15], v[0:1], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: .LBB5_4: ; %exit
+; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, 0xbff00000
+; GCN-NEXT: v_cmp_nlt_f64_e32 vcc, -1.0, v[4:5]
+; GCN-NEXT: v_cndmask_b32_e32 v1, -2.0, v0, vcc
+; GCN-NEXT: v_cmp_nlt_f64_e32 vcc, -1.0, v[6:7]
+; GCN-NEXT: v_cndmask_b32_e32 v3, -2.0, v0, vcc
+; GCN-NEXT: v_cmp_nlt_f64_e32 vcc, -1.0, v[8:9]
+; GCN-NEXT: v_cndmask_b32_e32 v5, -2.0, v0, vcc
+; GCN-NEXT: v_cmp_nlt_f64_e32 vcc, -1.0, v[10:11]
+; GCN-NEXT: v_cndmask_b32_e32 v7, -2.0, v0, vcc
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: v_mov_b32_e32 v2, 0
+; GCN-NEXT: v_mov_b32_e32 v4, 0
+; GCN-NEXT: v_mov_b32_e32 v6, 0
+; GCN-NEXT: s_setpc_b64 s[30:31]
br i1 %c0, label %T, label %F
T:
@@ -144,9 +495,92 @@ exit:
ret <4 x double> %r2
}
-; GCN-LABEL: extract_8xf64
-; GCN-COUNT-8: v_cndmask_b32
define <8 x double> @extract_8xf64(ptr addrspace(1) %p0, ptr addrspace(1) %p1, i1 %c0) {
+; GCN-LABEL: extract_8xf64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v4, 1, v4
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4
+; GCN-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
+; GCN-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-NEXT: s_xor_b64 s[4:5], exec, s[6:7]
+; GCN-NEXT: s_cbranch_execz .LBB6_2
+; GCN-NEXT: ; %bb.1: ; %F
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 offset:112 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 offset:96 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 offset:80 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 offset:64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[2:3], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[2:3], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[12:15], v[2:3], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[16:19], v[2:3], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: ; implicit-def: $vgpr0
+; GCN-NEXT: .LBB6_2: ; %Flow
+; GCN-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; GCN-NEXT: s_cbranch_execz .LBB6_4
+; GCN-NEXT: ; %bb.3: ; %T
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: buffer_load_dwordx4 v[2:5], v[0:1], s[8:11], 0 addr64 offset:112 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[2:5], v[0:1], s[8:11], 0 addr64 offset:96 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[2:5], v[0:1], s[8:11], 0 addr64 offset:80 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[2:5], v[0:1], s[8:11], 0 addr64 offset:64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], v[0:1], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[8:11], v[0:1], s[8:11], 0 addr64 offset:16 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[12:15], v[0:1], s[8:11], 0 addr64 offset:32 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_dwordx4 v[16:19], v[0:1], s[8:11], 0 addr64 offset:48 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: .LBB6_4: ; %exit
+; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN-NEXT: v_mov_b32_e32 v0, 0xbff00000
+; GCN-NEXT: v_cmp_nlt_f64_e32 vcc, -1.0, v[6:7]
+; GCN-NEXT: v_cmp_nlt_f64_e64 s[4:5], -1.0, v[8:9]
+; GCN-NEXT: v_cmp_nlt_f64_e64 s[6:7], -1.0, v[10:11]
+; GCN-NEXT: v_cmp_nlt_f64_e64 s[8:9], -1.0, v[12:13]
+; GCN-NEXT: v_cmp_nlt_f64_e64 s[10:11], -1.0, v[14:15]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_cmp_nlt_f64_e64 s[12:13], -1.0, v[16:17]
+; GCN-NEXT: v_cmp_nlt_f64_e64 s[14:15], -1.0, v[18:19]
+; GCN-NEXT: v_cmp_nlt_f64_e64 s[16:17], -1.0, v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v1, -2.0, v0, s[16:17]
+; GCN-NEXT: v_cndmask_b32_e32 v3, -2.0, v0, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v5, -2.0, v0, s[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v7, -2.0, v0, s[6:7]
+; GCN-NEXT: v_cndmask_b32_e64 v9, -2.0, v0, s[8:9]
+; GCN-NEXT: v_cndmask_b32_e64 v11, -2.0, v0, s[10:11]
+; GCN-NEXT: v_cndmask_b32_e64 v13, -2.0, v0, s[12:13]
+; GCN-NEXT: v_cndmask_b32_e64 v15, -2.0, v0, s[14:15]
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: v_mov_b32_e32 v2, 0
+; GCN-NEXT: v_mov_b32_e32 v4, 0
+; GCN-NEXT: v_mov_b32_e32 v6, 0
+; GCN-NEXT: v_mov_b32_e32 v8, 0
+; GCN-NEXT: v_mov_b32_e32 v10, 0
+; GCN-NEXT: v_mov_b32_e32 v12, 0
+; GCN-NEXT: v_mov_b32_e32 v14, 0
+; GCN-NEXT: s_setpc_b64 s[30:31]
br i1 %c0, label %T, label %F
T:
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 08cf83fd2bd0f..952e89edeb799 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -255,13 +255,13 @@
; GCN-O1-NEXT: Function Alias Analysis Results
; GCN-O1-NEXT: Flatten the CFG
; GCN-O1-NEXT: Dominator Tree Construction
-; GCN-O1-NEXT: Cycle Info Analysis
-; GCN-O1-NEXT: Uniformity Analysis
-; GCN-O1-NEXT: AMDGPU IR late optimizations
; GCN-O1-NEXT: Basic Alias Analysis (stateless AA impl)
; GCN-O1-NEXT: Function Alias Analysis Results
; GCN-O1-NEXT: Natural Loop Information
; GCN-O1-NEXT: Code sinking
+; GCN-O1-NEXT: Cycle Info Analysis
+; GCN-O1-NEXT: Uniformity Analysis
+; GCN-O1-NEXT: AMDGPU IR late optimizations
; GCN-O1-NEXT: Post-Dominator Tree Construction
; GCN-O1-NEXT: Unify divergent function exit nodes
; GCN-O1-NEXT: Dominator Tree Construction
@@ -552,13 +552,13 @@
; GCN-O1-OPTS-NEXT: Function Alias Analysis Results
; GCN-O1-OPTS-NEXT: Flatten the CFG
; GCN-O1-OPTS-NEXT: Dominator Tree Construction
-; GCN-O1-OPTS-NEXT: Cycle Info Analysis
-; GCN-O1-OPTS-NEXT: Uniformity Analysis
-; GCN-O1-OPTS-NEXT: AMDGPU IR late optimizations
; GCN-O1-OPTS-NEXT: Basic Alias Analysis (stateless AA impl)
; GCN-O1-OPTS-NEXT: Function Alias Analysis Results
; GCN-O1-OPTS-NEXT: Natural Loop Information
; GCN-O1-OPTS-NEXT: Code sinking
+; GCN-O1-OPTS-NEXT: Cycle Info Analysis
+; GCN-O1-OPTS-NEXT: Uniformity Analysis
+; GCN-O1-OPTS-NEXT: AMDGPU IR late optimizations
; GCN-O1-OPTS-NEXT: Post-Dominator Tree Construction
; GCN-O1-OPTS-NEXT: Unify divergent function exit nodes
; GCN-O1-OPTS-NEXT: Dominator Tree Construction
@@ -861,13 +861,13 @@
; GCN-O2-NEXT: Function Alias Analysis Results
; GCN-O2-NEXT: Flatten the CFG
; GCN-O2-NEXT: Dominator Tree Construction
-; GCN-O2-NEXT: Cycle Info Analysis
-; GCN-O2-NEXT: Uniformity Analysis
-; GCN-O2-NEXT: AMDGPU IR late optimizations
; GCN-O2-NEXT: Basic Alias Analysis (stateless AA impl)
; GCN-O2-NEXT: Function Alias Analysis Results
; GCN-O2-NEXT: Natural Loop Information
; GCN-O2-NEXT: Code sinking
+; GCN-O2-NEXT: Cycle Info Analysis
+; GCN-O2-NEXT: Uniformity Analysis
+; GCN-O2-NEXT: AMDGPU IR late optimizations
; GCN-O2-NEXT: Post-Dominator Tree Construction
; GCN-O2-NEXT: Unify divergent function exit nodes
; GCN-O2-NEXT: Dominator Tree Construction
@@ -1184,13 +1184,13 @@
; GCN-O3-NEXT: Function Alias Analysis Results
; GCN-O3-NEXT: Flatten the CFG
; GCN-O3-NEXT: Dominator Tree Construction
-; GCN-O3-NEXT: Cycle Info Analysis
-; GCN-O3-NEXT: Uniformity Analysis
-; GCN-O3-NEXT: AMDGPU IR late optimizations
; GCN-O3-NEXT: Basic Alias Analysis (stateless AA impl)
; GCN-O3-NEXT: Function Alias Analysis Results
; GCN-O3-NEXT: Natural Loop Information
; GCN-O3-NEXT: Code sinking
+; GCN-O3-NEXT: Cycle Info Analysis
+; GCN-O3-NEXT: Uniformity Analysis
+; GCN-O3-NEXT: AMDGPU IR late optimizations
; GCN-O3-NEXT: Post-Dominator Tree Construction
; GCN-O3-NEXT: Unify divergent function exit nodes
; GCN-O3-NEXT: Dominator Tree Construction
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll b/llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll
index 0f2eedb1923d6..911bb44078d51 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll
@@ -2101,10 +2101,7 @@ define void @crash_lshlrevb16_not_reg_op() {
; NOSDWA: ; %bb.0: ; %bb0
; NOSDWA-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; NOSDWA-NEXT: s_mov_b64 s[4:5], 0
-; NOSDWA-NEXT: v_mov_b32_e32 v0, 0xff
-; NOSDWA-NEXT: v_and_b32_e32 v0, s4, v0
-; NOSDWA-NEXT: v_lshlrev_b16_e64 v1, 8, 1
-; NOSDWA-NEXT: v_or_b32_e32 v0, v0, v1
+; NOSDWA-NEXT: v_mov_b32_e32 v0, 0x100
; NOSDWA-NEXT: s_and_b64 vcc, exec, -1
; NOSDWA-NEXT: .LBB22_1: ; %bb1
; NOSDWA-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -2124,9 +2121,7 @@ define void @crash_lshlrevb16_not_reg_op() {
; GFX89: ; %bb.0: ; %bb0
; GFX89-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX89-NEXT: s_mov_b64 s[4:5], 0
-; GFX89-NEXT: v_lshlrev_b16_e64 v0, 8, 1
-; GFX89-NEXT: v_mov_b32_e32 v1, s4
-; GFX89-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX89-NEXT: v_mov_b32_e32 v0, 0x100
; GFX89-NEXT: s_and_b64 vcc, exec, -1
; GFX89-NEXT: .LBB22_1: ; %bb1
; GFX89-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -2146,8 +2141,7 @@ define void @crash_lshlrevb16_not_reg_op() {
; GFX9: ; %bb.0: ; %bb0
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: v_lshlrev_b16_e64 v0, 8, 1
-; GFX9-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v0, 0x100
; GFX9-NEXT: s_and_b64 vcc, exec, -1
; GFX9-NEXT: .LBB22_1: ; %bb1
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -2166,18 +2160,16 @@ define void @crash_lshlrevb16_not_reg_op() {
; GFX10-LABEL: crash_lshlrevb16_not_reg_op:
; GFX10: ; %bb.0: ; %bb0
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b16 v0, 8, 1
-; GFX10-NEXT: s_mov_b32 vcc_lo, exec_lo
-; GFX10-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX10-NEXT: s_mov_b64 s[4:5], 0
+; GFX10-NEXT: s_mov_b32 vcc_lo, exec_lo
; GFX10-NEXT: .LBB22_1: ; %bb1
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_lshl_b32 s6, s4, 3
-; GFX10-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-NEXT: v_lshrrev_b16 v3, s6, v0
+; GFX10-NEXT: v_mov_b32_e32 v0, s4
+; GFX10-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-NEXT: v_lshrrev_b16 v2, s6, 0x100
; GFX10-NEXT: s_mov_b64 s[4:5], 1
-; GFX10-NEXT: flat_store_byte v[1:2], v3
+; GFX10-NEXT: flat_store_byte v[0:1], v2
; GFX10-NEXT: s_cbranch_vccnz .LBB22_1
; GFX10-NEXT: ; %bb.2: ; %DummyReturnBlock
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll b/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll
index f78b408d78255..441f00faf329e 100644
--- a/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll
+++ b/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=amdgcn -mcpu=gfx906 < %s | FileCheck --check-prefix=GFX906 %s
define amdgpu_kernel void @v3i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
@@ -6,27 +6,31 @@ define amdgpu_kernel void @v3i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v5, 2, v0
+; GFX906-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; GFX906-NEXT: v_mov_b32_e32 v3, 8
; GFX906-NEXT: v_mov_b32_e32 v1, 0
-; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dword v2, v5, s[4:5]
+; GFX906-NEXT: global_load_dword v4, v2, s[4:5]
+; GFX906-NEXT: s_mov_b32 s4, 0xff0000
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v4, 8, v2
+; GFX906-NEXT: v_lshrrev_b32_sdwa v5, v3, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v5, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX906-NEXT: v_and_or_b32 v4, v4, s4, v5
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB0_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dword v2, v5, s[6:7]
+; GFX906-NEXT: global_load_dword v0, v2, s[6:7]
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v4, 8, v2
+; GFX906-NEXT: v_lshrrev_b32_sdwa v2, v3, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX906-NEXT: v_and_or_b32 v4, v0, s4, v2
; GFX906-NEXT: .LBB0_2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v4
-; GFX906-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_byte v1, v3, s[2:3] offset:2
-; GFX906-NEXT: global_store_short v1, v0, s[2:3]
+; GFX906-NEXT: global_store_byte_d16_hi v1, v4, s[2:3] offset:2
+; GFX906-NEXT: global_store_short v1, v4, s[2:3]
; GFX906-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
@@ -50,31 +54,19 @@ define amdgpu_kernel void @v4i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; GFX906-NEXT: v_lshlrev_b32_e32 v3, 2, v0
; GFX906-NEXT: v_mov_b32_e32 v1, 0
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dword v2, v6, s[4:5]
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b32_e32 v3, 24, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX906-NEXT: global_load_dword v2, v3, s[4:5]
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB1_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dword v2, v6, s[6:7]
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b32_e32 v3, 24, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX906-NEXT: global_load_dword v2, v3, s[6:7]
; GFX906-NEXT: .LBB1_2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v5
-; GFX906-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v2, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dword v1, v0, s[2:3]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dword v1, v2, s[2:3]
; GFX906-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
@@ -98,32 +90,23 @@ define amdgpu_kernel void @v5i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v7, 3, v0
-; GFX906-NEXT: v_mov_b32_e32 v5, 0
+; GFX906-NEXT: v_lshlrev_b32_e32 v4, 3, v0
+; GFX906-NEXT: v_mov_b32_e32 v3, 0
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dwordx2 v[1:2], v7, s[4:5]
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v4, s[4:5]
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
-; GFX906-NEXT: v_lshrrev_b32_e32 v4, 16, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v6, 8, v1
+; GFX906-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB2_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dwordx2 v[1:2], v7, s[6:7]
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v4, s[6:7]
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
-; GFX906-NEXT: v_lshrrev_b32_e32 v4, 16, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v6, 8, v1
+; GFX906-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX906-NEXT: .LBB2_2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v6
-; GFX906-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_byte v5, v2, s[2:3] offset:4
-; GFX906-NEXT: global_store_dword v5, v0, s[2:3]
+; GFX906-NEXT: global_store_byte v3, v2, s[2:3] offset:4
+; GFX906-NEXT: global_store_dword v3, v1, s[2:3]
; GFX906-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
@@ -147,42 +130,19 @@ define amdgpu_kernel void @v8i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v10, 3, v0
+; GFX906-NEXT: v_lshlrev_b32_e32 v4, 3, v0
; GFX906-NEXT: v_mov_b32_e32 v3, 0
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dwordx2 v[1:2], v10, s[4:5]
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b32_e32 v4, 24, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v5, 16, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v6, 8, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v7, 24, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v8, 16, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v9, 8, v1
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v4, s[4:5]
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB3_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dwordx2 v[1:2], v10, s[6:7]
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b32_e32 v4, 24, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v5, 16, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v6, 8, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v7, 24, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v8, 16, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v9, 8, v1
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v4, s[6:7]
; GFX906-NEXT: .LBB3_2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v9
-; GFX906-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v7
-; GFX906-NEXT: v_or_b32_sdwa v1, v8, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v6
-; GFX906-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v4
-; GFX906-NEXT: v_or_b32_sdwa v2, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx2 v3, v[0:1], s[2:3]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dwordx2 v3, v[1:2], s[2:3]
; GFX906-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
@@ -206,64 +166,19 @@ define amdgpu_kernel void @v16i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v18, 4, v0
+; GFX906-NEXT: v_lshlrev_b32_e32 v6, 4, v0
; GFX906-NEXT: v_mov_b32_e32 v5, 0
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dwordx4 v[1:4], v18, s[4:5]
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b32_e32 v6, 24, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v7, 16, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v8, 8, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v9, 24, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v10, 16, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v11, 8, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v12, 24, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v13, 16, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v14, 8, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v15, 24, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v16, 16, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 8, v1
+; GFX906-NEXT: global_load_dwordx4 v[1:4], v6, s[4:5]
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB4_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dwordx4 v[1:4], v18, s[6:7]
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b32_e32 v6, 24, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v7, 16, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v8, 8, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v9, 24, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v10, 16, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v11, 8, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v12, 24, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v13, 16, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v14, 8, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v15, 24, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v16, 16, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 8, v1
+; GFX906-NEXT: global_load_dwordx4 v[1:4], v6, s[6:7]
; GFX906-NEXT: .LBB4_2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v17
-; GFX906-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v15
-; GFX906-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v14
-; GFX906-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v12
-; GFX906-NEXT: v_or_b32_sdwa v2, v13, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v11
-; GFX906-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v9
-; GFX906-NEXT: v_or_b32_sdwa v3, v10, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v8
-; GFX906-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v4, 8, v6
-; GFX906-NEXT: v_or_b32_sdwa v4, v7, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v5, v[0:3], s[2:3]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dwordx4 v5, v[1:4], s[2:3]
; GFX906-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
@@ -286,114 +201,24 @@ define amdgpu_kernel void @v32i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1
; GFX906-LABEL: v32i8_liveout:
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX906-NEXT: v_lshlrev_b32_e32 v31, 5, v0
-; GFX906-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX906-NEXT: v_lshlrev_b32_e32 v10, 5, v0
; GFX906-NEXT: v_mov_b32_e32 v9, 0
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dwordx4 v[1:4], v31, s[4:5] offset:16
-; GFX906-NEXT: global_load_dwordx4 v[5:8], v31, s[4:5]
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v10, 16, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v11, 8, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v12, 24, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v13, 16, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v14, 8, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v15, 24, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v16, 16, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 8, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v18, 24, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v20, 8, v1
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b32_e32 v21, 24, v8
-; GFX906-NEXT: v_lshrrev_b32_e32 v22, 16, v8
-; GFX906-NEXT: v_lshrrev_b32_e32 v23, 8, v8
-; GFX906-NEXT: v_lshrrev_b32_e32 v24, 24, v7
-; GFX906-NEXT: v_lshrrev_b32_e32 v25, 16, v7
-; GFX906-NEXT: v_lshrrev_b32_e32 v26, 8, v7
-; GFX906-NEXT: v_lshrrev_b32_e32 v27, 24, v6
-; GFX906-NEXT: v_lshrrev_b32_e32 v28, 16, v6
-; GFX906-NEXT: v_lshrrev_b32_e32 v29, 8, v6
-; GFX906-NEXT: v_lshrrev_b32_e32 v30, 24, v5
-; GFX906-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX906-NEXT: v_lshrrev_b32_e32 v33, 8, v5
-; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX906-NEXT: global_load_dwordx4 v[1:4], v10, s[4:5] offset:16
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v10, s[4:5]
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB5_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dwordx4 v[1:4], v31, s[6:7] offset:16
-; GFX906-NEXT: global_load_dwordx4 v[5:8], v31, s[6:7]
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v10, 16, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v11, 8, v4
-; GFX906-NEXT: v_lshrrev_b32_e32 v12, 24, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v13, 16, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v14, 8, v3
-; GFX906-NEXT: v_lshrrev_b32_e32 v15, 24, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v16, 16, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 8, v2
-; GFX906-NEXT: v_lshrrev_b32_e32 v18, 24, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v20, 8, v1
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshrrev_b32_e32 v21, 24, v8
-; GFX906-NEXT: v_lshrrev_b32_e32 v22, 16, v8
-; GFX906-NEXT: v_lshrrev_b32_e32 v23, 8, v8
-; GFX906-NEXT: v_lshrrev_b32_e32 v24, 24, v7
-; GFX906-NEXT: v_lshrrev_b32_e32 v25, 16, v7
-; GFX906-NEXT: v_lshrrev_b32_e32 v26, 8, v7
-; GFX906-NEXT: v_lshrrev_b32_e32 v27, 24, v6
-; GFX906-NEXT: v_lshrrev_b32_e32 v28, 16, v6
-; GFX906-NEXT: v_lshrrev_b32_e32 v29, 8, v6
-; GFX906-NEXT: v_lshrrev_b32_e32 v30, 24, v5
-; GFX906-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX906-NEXT: v_lshrrev_b32_e32 v33, 8, v5
+; GFX906-NEXT: global_load_dwordx4 v[1:4], v10, s[6:7] offset:16
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v10, s[6:7]
; GFX906-NEXT: .LBB5_2: ; %bb.2
-; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX906-NEXT: v_lshlrev_b16_e32 v30, 8, v30
-; GFX906-NEXT: v_lshlrev_b16_e32 v31, 8, v33
-; GFX906-NEXT: v_lshlrev_b16_e32 v29, 8, v29
-; GFX906-NEXT: v_lshlrev_b16_e32 v27, 8, v27
-; GFX906-NEXT: v_lshlrev_b16_e32 v26, 8, v26
-; GFX906-NEXT: v_lshlrev_b16_e32 v24, 8, v24
-; GFX906-NEXT: v_lshlrev_b16_e32 v23, 8, v23
-; GFX906-NEXT: v_lshlrev_b16_e32 v21, 8, v21
-; GFX906-NEXT: v_or_b32_sdwa v30, v32, v30 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v5, v5, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v6, v6, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v27, v28, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v7, v7, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v24, v25, v24 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v8, v8, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v21, v22, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v5, v5, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v6, v6, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v7, v7, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v8, v8, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v9, v[5:8], s[0:1]
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_lshlrev_b16_e32 v5, 8, v20
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v5, 8, v18
-; GFX906-NEXT: v_or_b32_sdwa v5, v19, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v5, 8, v17
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v5, 8, v15
-; GFX906-NEXT: v_or_b32_sdwa v5, v16, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v5, 8, v14
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v5, 8, v12
-; GFX906-NEXT: v_or_b32_sdwa v5, v13, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: v_lshlrev_b16_e32 v5, 8, v11
-; GFX906-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v0, v10, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v4, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v9, v[1:4], s[0:1] offset:16
+; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX906-NEXT: s_waitcnt vmcnt(1)
+; GFX906-NEXT: global_store_dwordx4 v9, v[1:4], s[2:3] offset:16
+; GFX906-NEXT: s_waitcnt vmcnt(1)
+; GFX906-NEXT: global_store_dwordx4 v9, v[5:8], s[2:3]
; GFX906-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
@@ -415,1572 +240,631 @@ bb.2:
define amdgpu_kernel void @v256i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
; GFX906-LABEL: v256i8_liveout:
; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX906-NEXT: v_lshlrev_b32_e32 v61, 3, v0
; GFX906-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX906-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
; GFX906-NEXT: s_mov_b32 s10, -1
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v61, s[4:5] offset:240
; GFX906-NEXT: s_mov_b32 s11, 0xe00000
; GFX906-NEXT: s_add_u32 s8, s8, s3
-; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v63, 3, v0
; GFX906-NEXT: s_addc_u32 s9, s9, 0
-; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dwordx4 v[17:20], v63, s[4:5] offset:240
-; GFX906-NEXT: global_load_dwordx4 v[5:8], v63, s[4:5] offset:224
-; GFX906-NEXT: global_load_dwordx4 v[9:12], v63, s[4:5] offset:208
-; GFX906-NEXT: global_load_dwordx4 v[13:16], v63, s[4:5] offset:192
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
; GFX906-NEXT: v_mov_b32_e32 v4, 0
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v20
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:16 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v20
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:20 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v20
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:24 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v19
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:28 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v19
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:32 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v19
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:36 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v18
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:40 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v18
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:44 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v18
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:48 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v17
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:52 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v17
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:56 ; 4-byte Folded Spill
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 ; 4-byte Folded Spill
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:4 ; 4-byte Folded Spill
-; GFX906-NEXT: buffer_store_dword v19, off, s[8:11], 0 offset:8 ; 4-byte Folded Spill
-; GFX906-NEXT: buffer_store_dword v20, off, s[8:11], 0 offset:12 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v17
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:60 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v8
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:64 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v8
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:68 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v8
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:72 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v7
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:76 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v7
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:80 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v7
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:84 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v6
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:88 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v6
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:92 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v6
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:96 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v5
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:100 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:104 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v5
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:108 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v12
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:112 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v12
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:116 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v12
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:120 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v11
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:124 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v11
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:128 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v11
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:132 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v10
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:136 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v10
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:140 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v10
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:144 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v9
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:148 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v9
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:152 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v9
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:156 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v16
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:160 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v16
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:164 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v16
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:168 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v15
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:176 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v15
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:180 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v15
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:172 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v14
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:188 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v14
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:192 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v14
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:184 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v13
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:200 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v13
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:204 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v13
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:196 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[17:20], v63, s[4:5] offset:176
-; GFX906-NEXT: global_load_dwordx4 v[21:24], v63, s[4:5] offset:160
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v20
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:208 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v20
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:212 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v20
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:224 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v19
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:216 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v19
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:220 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v19
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:236 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v18
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:228 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v18
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:232 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v18
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:248 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v17
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:240 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v17
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:244 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v17
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:252 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v24
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:256 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v24
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:260 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v24
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:272 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v23
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:264 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v23
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:268 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v23
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:284 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v22
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:276 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v22
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:280 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v22
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:296 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v21
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:288 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v21
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:292 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v21
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:300 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[25:28], v63, s[4:5] offset:144
-; GFX906-NEXT: global_load_dwordx4 v[29:32], v63, s[4:5] offset:128
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v28
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:304 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v28
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:308 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v28
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:320 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v27
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:312 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:316 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v27
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:332 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v26
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:324 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v26
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:328 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v26
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:344 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v25
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:336 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:340 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v25
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:348 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v32
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:352 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v32
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:356 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v32
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:368 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v31
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:360 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v31
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:364 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v31
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:380 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v30
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:372 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v30
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:376 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v30
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:392 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v29
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:384 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:388 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v29
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:396 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[33:36], v63, s[4:5] offset:112
-; GFX906-NEXT: global_load_dwordx4 v[37:40], v63, s[4:5] offset:96
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v36
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:400 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v36
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:404 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v36
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:416 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v35
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:408 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v35
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:412 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v35
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:428 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v34
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:420 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v34
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:424 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v34
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:440 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v33
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:432 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v33
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:436 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v33
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:444 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v40
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:448 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v40
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:452 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v40
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:464 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v39
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:456 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v39
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:460 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v39
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:476 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v38
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:468 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v38
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:472 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v38
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:488 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v37
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:480 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v37
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:484 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v37
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:492 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[41:44], v63, s[4:5] offset:80
-; GFX906-NEXT: global_load_dwordx4 v[45:48], v63, s[4:5] offset:64
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v44
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:496 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v44
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:500 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v44
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:512 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v43
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:504 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v43
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:508 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v43
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:524 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v42
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:516 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v42
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:520 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v42
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:536 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v41
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:528 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v41
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:532 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v41
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:540 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v48
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:544 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v48
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:548 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v48
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:560 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v47
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:552 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v47
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:556 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v47
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:572 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v46
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:564 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v46
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:568 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v46
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:584 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v45
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:576 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v45
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:580 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v45
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:588 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[49:52], v63, s[4:5] offset:48
-; GFX906-NEXT: global_load_dwordx4 v[53:56], v63, s[4:5] offset:32
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v52
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:592 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v52
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:596 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v52
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:608 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v51
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:600 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v51
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:604 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v51
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:620 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v50
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:612 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v50
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:616 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v50
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:632 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v49
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:624 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v49
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:628 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v49
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:636 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v56
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:640 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v56
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:644 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v56
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:656 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v55
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:648 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v55
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:652 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v55
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:668 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v54
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:660 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v54
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:664 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v54
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:680 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v53
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:672 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v53
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:676 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v53
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:684 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[57:60], v63, s[4:5] offset:16
+; GFX906-NEXT: buffer_store_dword v5, off, s[8:11], 0 ; 4-byte Folded Spill
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: buffer_store_dword v6, off, s[8:11], 0 offset:4 ; 4-byte Folded Spill
+; GFX906-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:8 ; 4-byte Folded Spill
+; GFX906-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:12 ; 4-byte Folded Spill
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v61, s[4:5] offset:224
; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: global_load_dwordx4 v[0:3], v63, s[4:5]
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v60
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:688 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v60
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:692 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v60
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:704 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v59
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:696 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v59
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:700 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v59
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:716 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v58
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:708 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v58
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:712 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v58
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:728 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v57
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:720 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v57
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:724 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v57
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:732 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v3
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:736 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v3
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:740 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v3
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:752 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v2
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:744 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v2
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:748 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v2
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:764 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v62, 24, v0
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:756 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v1
-; GFX906-NEXT: buffer_store_dword v62, off, s[8:11], 0 offset:768 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v62, 16, v0
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:760 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v1
-; GFX906-NEXT: buffer_store_dword v62, off, s[8:11], 0 offset:772 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v62, 8, v0
-; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX906-NEXT: global_load_dwordx4 v[9:12], v61, s[4:5] offset:208
+; GFX906-NEXT: global_load_dwordx4 v[13:16], v61, s[4:5] offset:192
+; GFX906-NEXT: global_load_dwordx4 v[17:20], v61, s[4:5] offset:176
+; GFX906-NEXT: global_load_dwordx4 v[21:24], v61, s[4:5] offset:160
+; GFX906-NEXT: global_load_dwordx4 v[25:28], v61, s[4:5] offset:144
+; GFX906-NEXT: global_load_dwordx4 v[29:32], v61, s[4:5] offset:128
+; GFX906-NEXT: global_load_dwordx4 v[33:36], v61, s[4:5] offset:112
+; GFX906-NEXT: global_load_dwordx4 v[37:40], v61, s[4:5] offset:96
+; GFX906-NEXT: global_load_dwordx4 v[41:44], v61, s[4:5] offset:80
+; GFX906-NEXT: global_load_dwordx4 v[45:48], v61, s[4:5] offset:64
+; GFX906-NEXT: global_load_dwordx4 v[49:52], v61, s[4:5] offset:48
+; GFX906-NEXT: global_load_dwordx4 v[53:56], v61, s[4:5] offset:32
+; GFX906-NEXT: global_load_dwordx4 v[57:60], v61, s[4:5] offset:16
+; GFX906-NEXT: global_load_dwordx4 v[0:3], v61, s[4:5]
+; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX906-NEXT: s_cbranch_execz .LBB6_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dwordx4 v[0:3], v63, s[6:7] offset:240
-; GFX906-NEXT: global_load_dwordx4 v[5:8], v63, s[6:7] offset:224
-; GFX906-NEXT: global_load_dwordx4 v[9:12], v63, s[6:7] offset:208
-; GFX906-NEXT: global_load_dwordx4 v[13:16], v63, s[6:7] offset:192
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 24, v3
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:16 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 16, v3
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:20 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 8, v3
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:24 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 24, v2
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:28 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 16, v2
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:32 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 8, v2
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:36 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 24, v1
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:40 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 16, v1
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:44 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 8, v1
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:48 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 24, v0
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:52 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v17, 16, v0
-; GFX906-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:56 ; 4-byte Folded Spill
+; GFX906-NEXT: global_load_dwordx4 v[0:3], v61, s[6:7] offset:240
+; GFX906-NEXT: s_waitcnt vmcnt(0)
; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 ; 4-byte Folded Spill
; GFX906-NEXT: s_waitcnt vmcnt(0)
; GFX906-NEXT: buffer_store_dword v1, off, s[8:11], 0 offset:4 ; 4-byte Folded Spill
; GFX906-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:8 ; 4-byte Folded Spill
; GFX906-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:12 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v0
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:60 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v8
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:64 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v8
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:68 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v8
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:72 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v7
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:76 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v7
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:80 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v7
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:84 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v6
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:88 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v6
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:92 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v6
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:96 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v5
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:100 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:104 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v5
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:108 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v12
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:112 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v12
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:116 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v12
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:120 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v11
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:124 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v11
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:128 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v11
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:132 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v10
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:136 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v10
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:140 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v10
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:144 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v9
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:148 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v9
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:152 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v9
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:156 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v16
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:160 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v16
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:164 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v16
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:168 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v15
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:176 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v15
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:180 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v15
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:172 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v14
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:188 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v14
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:192 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v14
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:184 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v13
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:200 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v13
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:204 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v13
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:196 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[17:20], v63, s[6:7] offset:176
-; GFX906-NEXT: global_load_dwordx4 v[21:24], v63, s[6:7] offset:160
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v20
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:208 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v20
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:212 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v20
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:224 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v19
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:216 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v19
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:220 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v19
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:236 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v18
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:228 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v18
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:232 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v18
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:248 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v17
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:240 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v17
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:244 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v17
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:252 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v24
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:256 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v24
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:260 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v24
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:272 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v23
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:264 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v23
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:268 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v23
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:284 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v22
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:276 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v22
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:280 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v22
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:296 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v21
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:288 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v21
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:292 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v21
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:300 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[25:28], v63, s[6:7] offset:144
-; GFX906-NEXT: global_load_dwordx4 v[29:32], v63, s[6:7] offset:128
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v28
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:304 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v28
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:308 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v28
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:320 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v27
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:312 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:316 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v27
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:332 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v26
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:324 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v26
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:328 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v26
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:344 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v25
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:336 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:340 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v25
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:348 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v32
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:352 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v32
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:356 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v32
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:368 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v31
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:360 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v31
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:364 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v31
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:380 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v30
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:372 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v30
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:376 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v30
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:392 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v29
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:384 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:388 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v29
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:396 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[33:36], v63, s[6:7] offset:112
-; GFX906-NEXT: global_load_dwordx4 v[37:40], v63, s[6:7] offset:96
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v36
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:400 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v36
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:404 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v36
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:416 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v35
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:408 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v35
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:412 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v35
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:428 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v34
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:420 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v34
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:424 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v34
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:440 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v33
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:432 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v33
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:436 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v33
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:444 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v40
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:448 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v40
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:452 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v40
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:464 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v39
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:456 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v39
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:460 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v39
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:476 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v38
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:468 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v38
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:472 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v38
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:488 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v37
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:480 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v37
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:484 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v37
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:492 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[41:44], v63, s[6:7] offset:80
-; GFX906-NEXT: global_load_dwordx4 v[45:48], v63, s[6:7] offset:64
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v44
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:496 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v44
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:500 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v44
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:512 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v43
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:504 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v43
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:508 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v43
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:524 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v42
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:516 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v42
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:520 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v42
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:536 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v41
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:528 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v41
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:532 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v41
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:540 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v48
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:544 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v48
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:548 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v48
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:560 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v47
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:552 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v47
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:556 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v47
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:572 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v46
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:564 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v46
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:568 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v46
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:584 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v45
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:576 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v45
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:580 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v45
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:588 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[49:52], v63, s[6:7] offset:48
-; GFX906-NEXT: global_load_dwordx4 v[53:56], v63, s[6:7] offset:32
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v52
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:592 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v52
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:596 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v52
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:608 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v51
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:600 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v51
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:604 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v51
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:620 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v50
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:612 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v50
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:616 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v50
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:632 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v49
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:624 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v49
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:628 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v49
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:636 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v56
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:640 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v56
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:644 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v56
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:656 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v55
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:648 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v55
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:652 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v55
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:668 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v54
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:660 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v54
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:664 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v54
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:680 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 24, v53
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:672 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 16, v53
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:676 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v53
-; GFX906-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:684 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[57:60], v63, s[6:7] offset:16
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: global_load_dwordx4 v[0:3], v63, s[6:7]
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v60
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:688 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v60
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:692 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v60
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:704 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v59
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:696 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v59
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:700 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v59
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:716 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v58
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:708 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v58
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:712 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v58
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:728 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v57
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:720 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v57
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:724 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v57
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:732 ; 4-byte Folded Spill
-; GFX906-NEXT: s_waitcnt vmcnt(12)
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v3
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:736 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v3
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:740 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v3
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:752 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v2
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:744 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v2
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:748 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v2
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:764 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 24, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v62, 24, v0
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:756 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 16, v1
-; GFX906-NEXT: buffer_store_dword v62, off, s[8:11], 0 offset:768 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v62, 16, v0
-; GFX906-NEXT: buffer_store_dword v61, off, s[8:11], 0 offset:760 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v61, 8, v1
-; GFX906-NEXT: buffer_store_dword v62, off, s[8:11], 0 offset:772 ; 4-byte Folded Spill
-; GFX906-NEXT: v_lshrrev_b32_e32 v62, 8, v0
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v61, s[6:7] offset:224
+; GFX906-NEXT: global_load_dwordx4 v[9:12], v61, s[6:7] offset:208
+; GFX906-NEXT: global_load_dwordx4 v[13:16], v61, s[6:7] offset:192
+; GFX906-NEXT: global_load_dwordx4 v[17:20], v61, s[6:7] offset:176
+; GFX906-NEXT: global_load_dwordx4 v[21:24], v61, s[6:7] offset:160
+; GFX906-NEXT: global_load_dwordx4 v[25:28], v61, s[6:7] offset:144
+; GFX906-NEXT: global_load_dwordx4 v[29:32], v61, s[6:7] offset:128
+; GFX906-NEXT: global_load_dwordx4 v[33:36], v61, s[6:7] offset:112
+; GFX906-NEXT: global_load_dwordx4 v[37:40], v61, s[6:7] offset:96
+; GFX906-NEXT: global_load_dwordx4 v[41:44], v61, s[6:7] offset:80
+; GFX906-NEXT: global_load_dwordx4 v[45:48], v61, s[6:7] offset:64
+; GFX906-NEXT: global_load_dwordx4 v[49:52], v61, s[6:7] offset:48
+; GFX906-NEXT: global_load_dwordx4 v[53:56], v61, s[6:7] offset:32
+; GFX906-NEXT: global_load_dwordx4 v[57:60], v61, s[6:7] offset:16
+; GFX906-NEXT: global_load_dwordx4 v[0:3], v61, s[6:7]
; GFX906-NEXT: .LBB6_2: ; %bb.2
-; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
-; GFX906-NEXT: v_lshlrev_b16_e32 v61, 8, v61
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v61, off, s[8:11], 0 offset:764 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v62, 8, v62
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v62, off, s[8:11], 0 offset:772 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v63, off, s[8:11], 0 offset:760 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v61, 8, v61
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v61, off, s[8:11], 0 offset:752 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshlrev_b16_e32 v61, 8, v61
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v61, off, s[8:11], 0 offset:768 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshlrev_b16_e32 v61, 8, v61
-; GFX906-NEXT: v_or_b32_sdwa v61, v62, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v62, off, s[8:11], 0 offset:756 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v61, off, s[8:11], 0 offset:744 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v62, 8, v62
-; GFX906-NEXT: v_or_b32_sdwa v62, v63, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v62, off, s[8:11], 0 offset:748 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v61, 8, v61
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v61, v62, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v61, off, s[8:11], 0 offset:736 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v62, off, s[8:11], 0 offset:740 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v61, 8, v61
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v61, v62, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3]
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:732 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:728 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:716 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:704 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: buffer_load_dword v57, off, s[8:11], 0 offset:720 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v58, off, s[8:11], 0 offset:724 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v59, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v59, off, s[8:11], 0 offset:712 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v57, 8, v57
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v57, v58, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v58, off, s[8:11], 0 offset:708 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v57, off, s[8:11], 0 offset:696 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v58, 8, v58
-; GFX906-NEXT: v_or_b32_sdwa v58, v59, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v58, off, s[8:11], 0 offset:700 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v57, 8, v57
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v57, v58, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v57, off, s[8:11], 0 offset:688 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v58, off, s[8:11], 0 offset:692 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v60, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v57, 8, v57
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v57, v58, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:684 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:680 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:668 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:656 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: buffer_load_dword v53, off, s[8:11], 0 offset:672 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v54, off, s[8:11], 0 offset:676 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v55, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v55, off, s[8:11], 0 offset:664 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v53, 8, v53
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v53, v54, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v54, off, s[8:11], 0 offset:660 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v53, off, s[8:11], 0 offset:648 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v54, 8, v54
-; GFX906-NEXT: v_or_b32_sdwa v54, v55, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v54, off, s[8:11], 0 offset:652 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v53, 8, v53
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v53, v54, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v53, off, s[8:11], 0 offset:640 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v54, off, s[8:11], 0 offset:644 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v53, 8, v53
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v53, v54, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:32
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:636 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:632 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:620 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:608 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: buffer_load_dword v49, off, s[8:11], 0 offset:624 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v50, off, s[8:11], 0 offset:628 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v51, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v51, off, s[8:11], 0 offset:616 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v49, 8, v49
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v49, v50, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v50, off, s[8:11], 0 offset:612 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v49, off, s[8:11], 0 offset:600 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v50, 8, v50
-; GFX906-NEXT: v_or_b32_sdwa v50, v51, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v50, off, s[8:11], 0 offset:604 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v49, 8, v49
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v49, v50, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v49, off, s[8:11], 0 offset:592 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v50, off, s[8:11], 0 offset:596 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v52, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v49, 8, v49
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v49, v50, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:48
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:588 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:584 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:572 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:560 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: buffer_load_dword v45, off, s[8:11], 0 offset:576 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v46, off, s[8:11], 0 offset:580 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v47, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v47, off, s[8:11], 0 offset:568 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v45, 8, v45
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v45, v46, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v46, off, s[8:11], 0 offset:564 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v45, off, s[8:11], 0 offset:552 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v46, 8, v46
-; GFX906-NEXT: v_or_b32_sdwa v46, v47, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v46, off, s[8:11], 0 offset:556 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v45, 8, v45
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v45, v46, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v45, off, s[8:11], 0 offset:544 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v46, off, s[8:11], 0 offset:548 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v48, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v45, 8, v45
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v45, v46, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:64
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:540 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:536 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:524 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:512 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: buffer_load_dword v41, off, s[8:11], 0 offset:528 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v42, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v42, off, s[8:11], 0 offset:532 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v43, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v43, off, s[8:11], 0 offset:520 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v41, 8, v41
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v41, v42, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v42, off, s[8:11], 0 offset:516 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v41, off, s[8:11], 0 offset:504 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v42, 8, v42
-; GFX906-NEXT: v_or_b32_sdwa v42, v43, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v42, off, s[8:11], 0 offset:508 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v41, 8, v41
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v41, v42, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v41, off, s[8:11], 0 offset:496 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v42, off, s[8:11], 0 offset:500 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v44, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v41, 8, v41
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v41, v42, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:80
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:492 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:488 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:476 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:464 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: buffer_load_dword v37, off, s[8:11], 0 offset:480 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v38, off, s[8:11], 0 offset:484 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v39, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v39, off, s[8:11], 0 offset:472 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v37, 8, v37
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v37, v38, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v38, off, s[8:11], 0 offset:468 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v37, off, s[8:11], 0 offset:456 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v38, 8, v38
-; GFX906-NEXT: v_or_b32_sdwa v38, v39, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v38, off, s[8:11], 0 offset:460 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v37, 8, v37
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v37, v38, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v37, off, s[8:11], 0 offset:448 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v38, off, s[8:11], 0 offset:452 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v40, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v37, 8, v37
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v37, v38, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:96
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:444 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:440 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:428 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:416 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: buffer_load_dword v33, off, s[8:11], 0 offset:432 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v34, off, s[8:11], 0 offset:436 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v35, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v35, off, s[8:11], 0 offset:424 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v33, 8, v33
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v33, v34, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v34, off, s[8:11], 0 offset:420 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v33, off, s[8:11], 0 offset:408 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v34, 8, v34
-; GFX906-NEXT: v_or_b32_sdwa v34, v35, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v34, off, s[8:11], 0 offset:412 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v33, 8, v33
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v33, v34, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v33, off, s[8:11], 0 offset:400 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v34, off, s[8:11], 0 offset:404 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v33, 8, v33
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v33, v34, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:112
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:396 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:392 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:380 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:368 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v29, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: buffer_load_dword v29, off, s[8:11], 0 offset:384 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v30, off, s[8:11], 0 offset:388 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v31, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v31, off, s[8:11], 0 offset:376 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v29, 8, v29
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v29, v30, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v30, off, s[8:11], 0 offset:372 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v29, off, s[8:11], 0 offset:360 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v30, 8, v30
-; GFX906-NEXT: v_or_b32_sdwa v30, v31, v30 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v30, off, s[8:11], 0 offset:364 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v29, 8, v29
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v29, v30, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v29, off, s[8:11], 0 offset:352 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v30, off, s[8:11], 0 offset:356 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v32, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v29, 8, v29
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v29, v30, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:128
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:348 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:344 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:332 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:320 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: buffer_load_dword v25, off, s[8:11], 0 offset:336 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v26, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v26, off, s[8:11], 0 offset:340 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v27, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v27, off, s[8:11], 0 offset:328 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v25, 8, v25
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v25, v26, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v26, off, s[8:11], 0 offset:324 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v25, off, s[8:11], 0 offset:312 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v26, 8, v26
-; GFX906-NEXT: v_or_b32_sdwa v26, v27, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v26, off, s[8:11], 0 offset:316 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v25, 8, v25
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v25, v26, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v25, off, s[8:11], 0 offset:304 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v26, off, s[8:11], 0 offset:308 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v28, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v25, 8, v25
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v25, v26, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:144
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:300 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:296 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:284 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:272 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: buffer_load_dword v21, off, s[8:11], 0 offset:288 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v22, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v22, off, s[8:11], 0 offset:292 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v23, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v23, off, s[8:11], 0 offset:280 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v21, 8, v21
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v21, v22, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v22, off, s[8:11], 0 offset:276 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v21, off, s[8:11], 0 offset:264 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v22, 8, v22
-; GFX906-NEXT: v_or_b32_sdwa v22, v23, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v22, off, s[8:11], 0 offset:268 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v21, 8, v21
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v21, v22, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v21, off, s[8:11], 0 offset:256 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v22, off, s[8:11], 0 offset:260 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v24, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v21, 8, v21
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v21, v22, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:160
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:252 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:248 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:236 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:224 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: buffer_load_dword v17, off, s[8:11], 0 offset:240 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v18, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v18, off, s[8:11], 0 offset:244 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v19, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v19, off, s[8:11], 0 offset:232 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v17, 8, v17
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v17, v18, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v18, off, s[8:11], 0 offset:228 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v17, off, s[8:11], 0 offset:216 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v18, 8, v18
-; GFX906-NEXT: v_or_b32_sdwa v18, v19, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v18, off, s[8:11], 0 offset:220 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v17, 8, v17
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v17, v18, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v17, off, s[8:11], 0 offset:208 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v18, off, s[8:11], 0 offset:212 ; 4-byte Folded Reload
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v17, 8, v17
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_or_b32_sdwa v17, v18, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:176
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:200 ; 4-byte Folded Reload
+; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX906-NEXT: s_waitcnt vmcnt(7) lgkmcnt(0)
+; GFX906-NEXT: global_store_dwordx4 v4, v[33:36], s[0:1] offset:112
+; GFX906-NEXT: s_waitcnt vmcnt(7)
+; GFX906-NEXT: global_store_dwordx4 v4, v[37:40], s[0:1] offset:96
+; GFX906-NEXT: s_waitcnt vmcnt(7)
+; GFX906-NEXT: global_store_dwordx4 v4, v[41:44], s[0:1] offset:80
+; GFX906-NEXT: s_waitcnt vmcnt(7)
+; GFX906-NEXT: global_store_dwordx4 v4, v[45:48], s[0:1] offset:64
+; GFX906-NEXT: s_waitcnt vmcnt(7)
+; GFX906-NEXT: global_store_dwordx4 v4, v[49:52], s[0:1] offset:48
+; GFX906-NEXT: s_waitcnt vmcnt(7)
+; GFX906-NEXT: global_store_dwordx4 v4, v[53:56], s[0:1] offset:32
+; GFX906-NEXT: s_waitcnt vmcnt(7)
+; GFX906-NEXT: global_store_dwordx4 v4, v[57:60], s[0:1] offset:16
+; GFX906-NEXT: s_waitcnt vmcnt(7)
+; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
+; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 ; 4-byte Folded Reload
; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:204 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:192 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:184 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:188 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v14, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v14, off, s[8:11], 0 offset:164 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:196 ; 4-byte Folded Reload
-; GFX906-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:180 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v13, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:176 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v13, off, s[8:11], 0 offset:160 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:172 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v13, 8, v13
-; GFX906-NEXT: v_or_b32_sdwa v13, v14, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v15, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:168 ; 4-byte Folded Reload
+; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:4 ; 4-byte Folded Reload
+; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:8 ; 4-byte Folded Reload
+; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:12 ; 4-byte Folded Reload
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:240
+; GFX906-NEXT: global_store_dwordx4 v4, v[5:8], s[0:1] offset:224
+; GFX906-NEXT: global_store_dwordx4 v4, v[9:12], s[0:1] offset:208
+; GFX906-NEXT: global_store_dwordx4 v4, v[13:16], s[0:1] offset:192
+; GFX906-NEXT: global_store_dwordx4 v4, v[17:20], s[0:1] offset:176
+; GFX906-NEXT: global_store_dwordx4 v4, v[21:24], s[0:1] offset:160
+; GFX906-NEXT: global_store_dwordx4 v4, v[25:28], s[0:1] offset:144
+; GFX906-NEXT: global_store_dwordx4 v4, v[29:32], s[0:1] offset:128
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <256 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <256 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <256 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <256 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @repeat_successor(i32 %in, ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: repeat_successor:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dword s8, s[0:1], 0x24
+; GFX906-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x2c
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: s_cmp_lt_i32 s8, 3
+; GFX906-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX906-NEXT: ; %bb.1: ; %LeafBlock
+; GFX906-NEXT: s_cmp_gt_i32 s8, 0
+; GFX906-NEXT: s_cbranch_scc0 .LBB7_6
+; GFX906-NEXT: ; %bb.2:
+; GFX906-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX906-NEXT: global_load_dword v0, v0, s[4:5]
+; GFX906-NEXT: s_branch .LBB7_5
+; GFX906-NEXT: .LBB7_3: ; %LeafBlock5
+; GFX906-NEXT: s_cmp_eq_u32 s8, 3
+; GFX906-NEXT: s_cbranch_scc0 .LBB7_6
+; GFX906-NEXT: ; %bb.4: ; %sw.bb5
+; GFX906-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX906-NEXT: global_load_dword v0, v0, s[6:7]
+; GFX906-NEXT: .LBB7_5: ; %return.sink.split
+; GFX906-NEXT: v_mov_b32_e32 v1, 0
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v3, v16, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:192
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:156 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:152 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:148 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:140 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v9, off, s[8:11], 0 offset:128 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:144 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:136 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX906-NEXT: global_store_dword v1, v0, s[2:3]
+; GFX906-NEXT: .LBB7_6: ; %return
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <4 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <4 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <4 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <4 x i8>, ptr addrspace(1) %gep2
+ switch i32 %in, label %return [
+ i32 1, label %return.sink.split
+ i32 2, label %return.sink.split
+ i32 3, label %sw.bb5
+ ]
+
+sw.bb5:
+ br label %return.sink.split
+
+return.sink.split:
+ %tmp5 = phi <4 x i8> [ %vec2, %sw.bb5 ], [ %vec1, %entry ], [ %vec1, %entry ]
+ store <4 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+
+return:
+ ret void
+}
+
+define amdgpu_kernel void @v8i8_phi_chain(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst0, ptr addrspace(1) nocapture %dst1) {
+; GFX906-LABEL: v8i8_phi_chain:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX906-NEXT: v_lshlrev_b32_e32 v3, 3, v0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[0:1]
+; GFX906-NEXT: v_cmp_lt_u32_e64 s[0:1], 14, v0
+; GFX906-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB8_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[2:3]
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 7, v0
+; GFX906-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
+; GFX906-NEXT: s_and_b64 s[2:3], vcc, exec
+; GFX906-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX906-NEXT: .LBB8_2: ; %Flow
+; GFX906-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX906-NEXT: s_and_saveexec_b64 s[2:3], s[0:1]
+; GFX906-NEXT: s_cbranch_execz .LBB8_4
+; GFX906-NEXT: ; %bb.3: ; %bb.2
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: global_store_dwordx2 v0, v[1:2], s[4:5]
+; GFX906-NEXT: .LBB8_4: ; %bb.3
+; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v1, v10, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:132 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:124 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v10, off, s[8:11], 0 offset:116 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v2, v11, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v9, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:120 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v9, off, s[8:11], 0 offset:112 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
+; GFX906-NEXT: global_store_dwordx2 v0, v[1:2], s[6:7]
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <8 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <8 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ %cmp2 = icmp ult i32 %idx, 7
+ br i1 %cmp2, label %bb.2, label %bb.3
+
+bb.2:
+ %tmp5 = phi <8 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <8 x i8> %tmp5, ptr addrspace(1) %dst0, align 4
+ br label %bb.3
+
+bb.3:
+ %tmp7 = phi <8 x i8> [ %vec2, %bb.1], [%tmp5, %bb.2]
+ store <8 x i8> %tmp7, ptr addrspace(1) %dst1, align 4
+ ret void
+}
+
+
+define amdgpu_kernel void @v8i8_phi_zeroinit(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst0, ptr addrspace(1) nocapture %dst1) {
+; GFX906-LABEL: v8i8_phi_zeroinit:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX906-NEXT: v_lshlrev_b32_e32 v5, 3, v0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: ; implicit-def: $vgpr1_vgpr2
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx2 v[3:4], v5, s[0:1]
+; GFX906-NEXT: v_cmp_lt_u32_e64 s[0:1], 14, v0
+; GFX906-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB9_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v5, s[2:3]
+; GFX906-NEXT: s_mov_b32 s2, 0
+; GFX906-NEXT: s_mov_b32 s3, s2
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 7, v0
+; GFX906-NEXT: s_waitcnt vmcnt(1)
+; GFX906-NEXT: v_mov_b32_e32 v4, s3
+; GFX906-NEXT: v_mov_b32_e32 v3, s2
+; GFX906-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
+; GFX906-NEXT: s_and_b64 s[2:3], vcc, exec
+; GFX906-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX906-NEXT: .LBB9_2: ; %Flow
+; GFX906-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX906-NEXT: s_and_saveexec_b64 s[2:3], s[0:1]
+; GFX906-NEXT: s_cbranch_execz .LBB9_4
+; GFX906-NEXT: ; %bb.3: ; %bb.2
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_mov_b32_e32 v1, v3
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: v_mov_b32_e32 v2, v4
+; GFX906-NEXT: global_store_dwordx2 v0, v[3:4], s[4:5]
+; GFX906-NEXT: .LBB9_4: ; %bb.3
+; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshlrev_b16_e32 v9, 8, v9
-; GFX906-NEXT: v_or_b32_sdwa v3, v12, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v9, v10, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:208
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:108 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:104 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:100 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:92 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v5, off, s[8:11], 0 offset:80 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: global_store_dwordx2 v0, v[1:2], s[6:7]
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <8 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <8 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ %cmp2 = icmp ult i32 %idx, 7
+ br i1 %cmp2, label %bb.2, label %bb.3
+
+bb.2:
+ %tmp5 = phi <8 x i8> [ %vec1, %entry ], [ zeroinitializer, %bb.1 ]
+ store <8 x i8> %tmp5, ptr addrspace(1) %dst0, align 4
+ br label %bb.3
+
+bb.3:
+ %tmp7 = phi <8 x i8> [ %vec2, %bb.1], [%tmp5, %bb.2]
+ store <8 x i8> %tmp7, ptr addrspace(1) %dst1, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v8i8_phi_const(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst0, ptr addrspace(1) nocapture %dst1) {
+; GFX906-LABEL: v8i8_phi_const:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX906-NEXT: v_lshlrev_b32_e32 v4, 3, v0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: ; implicit-def: $vgpr3
+; GFX906-NEXT: ; implicit-def: $vgpr13
+; GFX906-NEXT: ; implicit-def: $vgpr11
+; GFX906-NEXT: ; implicit-def: $vgpr14
+; GFX906-NEXT: ; implicit-def: $vgpr15
+; GFX906-NEXT: ; implicit-def: $vgpr12
+; GFX906-NEXT: ; implicit-def: $vgpr16
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v4, s[0:1]
+; GFX906-NEXT: v_cmp_lt_u32_e64 s[0:1], 14, v0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_lshrrev_b32_e32 v5, 24, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v7, 8, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v8, 24, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v10, 8, v1
+; GFX906-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB10_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dwordx2 v[3:4], v4, s[2:3]
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 7, v0
+; GFX906-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
+; GFX906-NEXT: s_and_b64 s[2:3], vcc, exec
+; GFX906-NEXT: v_mov_b32_e32 v1, 1
+; GFX906-NEXT: v_mov_b32_e32 v10, 2
+; GFX906-NEXT: v_mov_b32_e32 v9, 3
+; GFX906-NEXT: v_mov_b32_e32 v8, 4
+; GFX906-NEXT: v_mov_b32_e32 v2, 5
+; GFX906-NEXT: v_mov_b32_e32 v7, 6
+; GFX906-NEXT: v_mov_b32_e32 v6, 7
+; GFX906-NEXT: v_mov_b32_e32 v5, 8
+; GFX906-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_lshrrev_b32_e32 v16, 24, v4
+; GFX906-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX906-NEXT: v_lshrrev_b32_e32 v15, 8, v4
+; GFX906-NEXT: v_lshrrev_b32_e32 v14, 24, v3
+; GFX906-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX906-NEXT: v_lshrrev_b32_e32 v13, 8, v3
+; GFX906-NEXT: .LBB10_2: ; %Flow
+; GFX906-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX906-NEXT: s_and_saveexec_b64 s[2:3], s[0:1]
+; GFX906-NEXT: s_cbranch_execz .LBB10_4
+; GFX906-NEXT: ; %bb.3: ; %bb.2
+; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v10
+; GFX906-NEXT: v_lshlrev_b16_e32 v4, 8, v8
+; GFX906-NEXT: v_or_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v4, v9, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX906-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX906-NEXT: v_lshlrev_b16_e32 v11, 8, v5
+; GFX906-NEXT: v_or_b32_sdwa v4, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v11, v6, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: v_or_b32_sdwa v4, v4, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX906-NEXT: global_store_dwordx2 v0, v[3:4], s[4:5]
+; GFX906-NEXT: v_mov_b32_e32 v3, v1
+; GFX906-NEXT: v_mov_b32_e32 v13, v10
+; GFX906-NEXT: v_mov_b32_e32 v11, v9
+; GFX906-NEXT: v_mov_b32_e32 v14, v8
+; GFX906-NEXT: v_mov_b32_e32 v4, v2
+; GFX906-NEXT: v_mov_b32_e32 v15, v7
+; GFX906-NEXT: v_mov_b32_e32 v12, v6
+; GFX906-NEXT: v_mov_b32_e32 v16, v5
+; GFX906-NEXT: .LBB10_4: ; %bb.3
+; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v13
+; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v14
+; GFX906-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX906-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:96 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:88 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v15
+; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v16
+; GFX906-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v3, v12, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_mov_b32_e32 v2, 0
+; GFX906-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX906-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <8 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <8 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ %cmp2 = icmp ult i32 %idx, 7
+ br i1 %cmp2, label %bb.2, label %bb.3
+
+bb.2:
+ %tmp5 = phi <8 x i8> [ %vec1, %entry ], [<i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, %bb.1 ]
+ store <8 x i8> %tmp5, ptr addrspace(1) %dst0, align 4
+ br label %bb.3
+
+bb.3:
+ %tmp7 = phi <8 x i8> [ %vec2, %bb.1], [%tmp5, %bb.2]
+ store <8 x i8> %tmp7, ptr addrspace(1) %dst1, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v8i8_multi_block(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst0, ptr addrspace(1) nocapture %dst1) {
+; GFX906-LABEL: v8i8_multi_block:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX906-NEXT: v_lshlrev_b32_e32 v6, 3, v0
+; GFX906-NEXT: v_mov_b32_e32 v5, 0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx2 v[3:4], v6, s[0:1]
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v1, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:84 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:76 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v6, off, s[8:11], 0 offset:68 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v5, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:72 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v5, off, s[8:11], 0 offset:64 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
+; GFX906-NEXT: v_mov_b32_e32 v1, v3
+; GFX906-NEXT: v_mov_b32_e32 v2, v4
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB11_4
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v6, s[2:3]
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 7, v0
+; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB11_3
+; GFX906-NEXT: ; %bb.2: ; %bb.2
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: global_store_dwordx2 v0, v[3:4], s[4:5]
+; GFX906-NEXT: .LBB11_3: ; %Flow
+; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX906-NEXT: .LBB11_4: ; %bb.3
+; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshlrev_b16_e32 v5, 8, v5
-; GFX906-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v5, v6, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:224
-; GFX906-NEXT: buffer_load_dword v0, off, s[8:11], 0 offset:60 ; 4-byte Folded Reload
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: buffer_load_dword v5, off, s[8:11], 0 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v6, off, s[8:11], 0 offset:4 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v7, off, s[8:11], 0 offset:8 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v8, off, s[8:11], 0 offset:12 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:52 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:56 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:44 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(7)
-; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: s_waitcnt vmcnt(3)
-; GFX906-NEXT: v_or_b32_sdwa v0, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v1, off, s[8:11], 0 offset:48 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:40 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v5, off, s[8:11], 0 offset:32 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: v_or_b32_sdwa v1, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:36 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:28 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v6, off, s[8:11], 0 offset:20 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(2)
-; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
-; GFX906-NEXT: v_or_b32_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v5, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:24 ; 4-byte Folded Reload
-; GFX906-NEXT: buffer_load_dword v5, off, s[8:11], 0 offset:16 ; 4-byte Folded Reload
-; GFX906-NEXT: s_waitcnt vmcnt(1)
-; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v3
+; GFX906-NEXT: global_store_dwordx2 v5, v[1:2], s[6:7]
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <8 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <8 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.3
+bb.1:
+ %cmp2 = icmp ult i32 %idx, 7
+ br i1 %cmp2, label %bb.2, label %bb.3
+
+bb.2:
+ store <8 x i8> %vec1, ptr addrspace(1) %dst0, align 4
+ br label %bb.3
+
+bb.3:
+ %tmp5 = phi <8 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ], [ %vec2, %bb.2]
+ store <8 x i8> %tmp5, ptr addrspace(1) %dst1, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v32i8_loop_carried(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: v32i8_loop_carried:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX906-NEXT: v_lshlrev_b32_e32 v1, 5, v0
+; GFX906-NEXT: v_cmp_lt_u32_e32 vcc, 14, v0
+; GFX906-NEXT: s_mov_b32 s4, 0x2000604
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dword v1, v1, s[2:3]
+; GFX906-NEXT: s_mov_b64 s[2:3], 0
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_mov_b32_e32 v0, v1
+; GFX906-NEXT: .LBB12_1: ; %bb.1
+; GFX906-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX906-NEXT: s_and_b64 s[6:7], exec, vcc
+; GFX906-NEXT: s_or_b64 s[2:3], s[6:7], s[2:3]
+; GFX906-NEXT: v_perm_b32 v0, v1, v0, s4
+; GFX906-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX906-NEXT: s_cbranch_execnz .LBB12_1
+; GFX906-NEXT: ; %bb.2: ; %bb.2.loopexit
+; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX906-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX906-NEXT: v_mov_b32_e32 v1, 0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX906-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <32 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <4 x i8>, ptr addrspace(1) %gep1
+ br label %bb.1
+
+bb.1:
+ %temp = phi <4 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ %vec2 = shufflevector <4 x i8> %vec1, <4 x i8> %temp, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+ br label %bb.2
+
+bb.2:
+ store <4 x i8> %vec2, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+; Should not have instances of "Instruction does not dominate all uses!"
+
+define amdgpu_kernel void @v8i8_multiuse_multiblock(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst1, ptr addrspace(1) nocapture %dst2, ptr addrspace(1) nocapture %dst3) {
+; GFX906-LABEL: v8i8_multiuse_multiblock:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x24
+; GFX906-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX906-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x44
+; GFX906-NEXT: v_cmp_lt_u32_e64 s[2:3], 14, v0
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v1, s[4:5]
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_lshlrev_b16_e32 v5, 8, v5
-; GFX906-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v5, v6, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_or_b32_sdwa v3, v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:240
+; GFX906-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX906-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX906-NEXT: s_cbranch_execz .LBB13_2
+; GFX906-NEXT: ; %bb.1: ; %bb.1
+; GFX906-NEXT: s_movk_i32 s6, 0xff00
+; GFX906-NEXT: v_mov_b32_e32 v5, 8
+; GFX906-NEXT: v_and_b32_sdwa v6, v1, s6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX906-NEXT: s_mov_b32 s6, 0x6070504
+; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 7, v0
+; GFX906-NEXT: v_and_b32_e32 v4, 0xffffff00, v1
+; GFX906-NEXT: v_lshlrev_b16_sdwa v5, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX906-NEXT: v_perm_b32 v7, v1, v1, s6
+; GFX906-NEXT: s_andn2_b64 s[2:3], s[2:3], exec
+; GFX906-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX906-NEXT: v_mov_b32_e32 v3, 0
+; GFX906-NEXT: v_or_b32_sdwa v4, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v5, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v6, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX906-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
+; GFX906-NEXT: v_or_b32_sdwa v6, v5, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX906-NEXT: global_store_dword v3, v1, s[8:9]
+; GFX906-NEXT: global_store_dword v3, v7, s[8:9] offset:8
+; GFX906-NEXT: global_store_dword v3, v6, s[8:9] offset:16
+; GFX906-NEXT: global_store_dword v3, v4, s[8:9] offset:24
+; GFX906-NEXT: .LBB13_2: ; %Flow
+; GFX906-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX906-NEXT: s_and_saveexec_b64 s[4:5], s[2:3]
+; GFX906-NEXT: s_cbranch_execz .LBB13_4
+; GFX906-NEXT: ; %bb.3: ; %bb.2
+; GFX906-NEXT: v_lshlrev_b16_e32 v3, 8, v2
+; GFX906-NEXT: v_and_b32_e32 v4, 0xffffff00, v2
+; GFX906-NEXT: v_and_b32_e32 v5, 0xffffff00, v1
+; GFX906-NEXT: s_mov_b32 s2, 0xc0c0001
+; GFX906-NEXT: v_or_b32_sdwa v3, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v5, v2, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_perm_b32 v2, 0, v2, s2
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX906-NEXT: v_perm_b32 v6, 0, v1, s2
+; GFX906-NEXT: s_mov_b32 s3, 0xffff0000
+; GFX906-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX906-NEXT: v_and_or_b32 v7, v1, s3, v6
+; GFX906-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_e32 v2, v6, v2
+; GFX906-NEXT: global_store_dword v0, v3, s[10:11]
+; GFX906-NEXT: global_store_dword v0, v4, s[10:11] offset:8
+; GFX906-NEXT: global_store_dword v0, v7, s[10:11] offset:16
+; GFX906-NEXT: global_store_dword v0, v2, s[10:11] offset:24
+; GFX906-NEXT: .LBB13_4: ; %bb.3
+; GFX906-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX906-NEXT: s_movk_i32 s3, 0xff00
+; GFX906-NEXT: v_mov_b32_e32 v4, 8
+; GFX906-NEXT: s_movk_i32 s2, 0xff
+; GFX906-NEXT: v_and_b32_sdwa v2, v1, s3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX906-NEXT: v_lshlrev_b16_sdwa v4, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX906-NEXT: v_or_b32_sdwa v3, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v5, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX906-NEXT: v_lshlrev_b16_e32 v6, 8, v1
+; GFX906-NEXT: v_and_b32_sdwa v7, v1, s2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: v_or_b32_sdwa v3, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v7, v7, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v2, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v4, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v4, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v2, v2, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX906-NEXT: global_store_dword v0, v3, s[0:1]
+; GFX906-NEXT: global_store_dword v0, v1, s[0:1] offset:8
+; GFX906-NEXT: global_store_dword v0, v4, s[0:1] offset:16
+; GFX906-NEXT: global_store_dword v0, v2, s[0:1] offset:24
; GFX906-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
%gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
- %vec1 = load <256 x i8>, ptr addrspace(1) %gep1
+ %vec1 = load <8 x i8>, ptr addrspace(1) %gep1
%gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
- %vec2 = load <256 x i8>, ptr addrspace(1) %gep2
+ %vec2 = load <8 x i8>, ptr addrspace(1) %gep2
%cmp = icmp ult i32 %idx, 15
br i1 %cmp, label %bb.1, label %bb.2
bb.1:
- br label %bb.2
+ %s1 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %s2 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 0, i32 1, i32 3, i32 2>
+ %s3 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ %s4 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ %gep4 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst1, i32 0
+ %gep5 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst1, i32 1
+ %gep6 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst1, i32 2
+ %gep7 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst1, i32 3
+ store <4 x i8> %s1, ptr addrspace(1) %gep4, align 4
+ store <4 x i8> %s2, ptr addrspace(1) %gep5, align 4
+ store <4 x i8> %s3, ptr addrspace(1) %gep6, align 4
+ store <4 x i8> %s4, ptr addrspace(1) %gep7, align 4
+ %cmp2 = icmp ult i32 %idx, 7
+ br i1 %cmp2, label %bb.2, label %bb.3
bb.2:
- %tmp5 = phi <256 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
- store <256 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ %s5 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ %s6 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 0, i32 3, i32 2, i32 1>
+ %s7 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
+ %s8 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ %gep8 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst2, i32 0
+ %gep9 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst2, i32 1
+ %gep10 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst2, i32 2
+ %gep11 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst2, i32 3
+ store <4 x i8> %s5, ptr addrspace(1) %gep8, align 4
+ store <4 x i8> %s6, ptr addrspace(1) %gep9, align 4
+ store <4 x i8> %s7, ptr addrspace(1) %gep10, align 4
+ store <4 x i8> %s8, ptr addrspace(1) %gep11, align 4
+ br label %bb.3
+
+bb.3:
+ %s9 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ %s10 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %s11 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ %s12 = shufflevector <8 x i8> %vec1, <8 x i8> %vec2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %gep12 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst3, i32 0
+ %gep13 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst3, i32 1
+ %gep14 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst3, i32 2
+ %gep15 = getelementptr ptr addrspace(1), ptr addrspace(1) %dst3, i32 3
+ store <4 x i8> %s9, ptr addrspace(1) %gep12, align 4
+ store <4 x i8> %s10, ptr addrspace(1) %gep13, align 4
+ store <4 x i8> %s11, ptr addrspace(1) %gep14, align 4
+ store <4 x i8> %s12, ptr addrspace(1) %gep15, align 4
ret void
}
-declare i32 @llvm.amdgcn.workitem.id.x()
+declare i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/CodeGen/AMDGPU/vni8-live-reg-opt.ll b/llvm/test/CodeGen/AMDGPU/vni8-live-reg-opt.ll
new file mode 100644
index 0000000000000..5d2e299aa854a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/vni8-live-reg-opt.ll
@@ -0,0 +1,352 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=amdgcn -mcpu=gfx906 -amdgpu-late-codegenprepare -S -o - %s | FileCheck --check-prefix=GFX906 %s
+
+define amdgpu_kernel void @v3i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: define amdgpu_kernel void @v3i8_liveout(
+; GFX906-SAME: ptr addrspace(1) [[SRC1:%.*]], ptr addrspace(1) [[SRC2:%.*]], ptr addrspace(1) nocapture [[DST:%.*]]) #[[ATTR0:[0-9]+]] {
+; GFX906-NEXT: entry:
+; GFX906-NEXT: [[IDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX906-NEXT: [[GEP1:%.*]] = getelementptr <3 x i8>, ptr addrspace(1) [[SRC1]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC1:%.*]] = load <3 x i8>, ptr addrspace(1) [[GEP1]], align 4
+; GFX906-NEXT: [[TMP0:%.*]] = shufflevector <3 x i8> [[VEC1]], <3 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX906-NEXT: [[VEC1_BC:%.*]] = bitcast <4 x i8> [[TMP0]] to i32
+; GFX906-NEXT: [[GEP2:%.*]] = getelementptr <3 x i8>, ptr addrspace(1) [[SRC2]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC2:%.*]] = load <3 x i8>, ptr addrspace(1) [[GEP2]], align 4
+; GFX906-NEXT: [[TMP1:%.*]] = shufflevector <3 x i8> [[VEC2]], <3 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX906-NEXT: [[VEC2_BC:%.*]] = bitcast <4 x i8> [[TMP1]] to i32
+; GFX906-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 15
+; GFX906-NEXT: br i1 [[CMP]], label [[BB_1:%.*]], label [[BB_2:%.*]]
+; GFX906: bb.1:
+; GFX906-NEXT: br label [[BB_2]]
+; GFX906: bb.2:
+; GFX906-NEXT: [[TMP5_TC:%.*]] = phi i32 [ [[VEC1_BC]], [[ENTRY:%.*]] ], [ [[VEC2_BC]], [[BB_1]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP5_TC]] to i24
+; GFX906-NEXT: [[TMP3:%.*]] = bitcast i24 [[TMP2]] to <3 x i8>
+; GFX906-NEXT: store <3 x i8> [[TMP3]], ptr addrspace(1) [[DST]], align 4
+; GFX906-NEXT: ret void
+;
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <3 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <3 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <3 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <3 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <3 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <3 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v4i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: define amdgpu_kernel void @v4i8_liveout(
+; GFX906-SAME: ptr addrspace(1) [[SRC1:%.*]], ptr addrspace(1) [[SRC2:%.*]], ptr addrspace(1) nocapture [[DST:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: entry:
+; GFX906-NEXT: [[IDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX906-NEXT: [[GEP1:%.*]] = getelementptr <4 x i8>, ptr addrspace(1) [[SRC1]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC1:%.*]] = load <4 x i8>, ptr addrspace(1) [[GEP1]], align 4
+; GFX906-NEXT: [[VEC1_BC:%.*]] = bitcast <4 x i8> [[VEC1]] to i32
+; GFX906-NEXT: [[GEP2:%.*]] = getelementptr <4 x i8>, ptr addrspace(1) [[SRC2]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC2:%.*]] = load <4 x i8>, ptr addrspace(1) [[GEP2]], align 4
+; GFX906-NEXT: [[VEC2_BC:%.*]] = bitcast <4 x i8> [[VEC2]] to i32
+; GFX906-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 15
+; GFX906-NEXT: br i1 [[CMP]], label [[BB_1:%.*]], label [[BB_2:%.*]]
+; GFX906: bb.1:
+; GFX906-NEXT: br label [[BB_2]]
+; GFX906: bb.2:
+; GFX906-NEXT: [[TMP5_TC:%.*]] = phi i32 [ [[VEC1_BC]], [[ENTRY:%.*]] ], [ [[VEC2_BC]], [[BB_1]] ]
+; GFX906-NEXT: [[TMP5_TC_BC:%.*]] = bitcast i32 [[TMP5_TC]] to <4 x i8>
+; GFX906-NEXT: store <4 x i8> [[TMP5_TC_BC]], ptr addrspace(1) [[DST]], align 4
+; GFX906-NEXT: ret void
+;
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <4 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <4 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <4 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <4 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <4 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <4 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v5i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: define amdgpu_kernel void @v5i8_liveout(
+; GFX906-SAME: ptr addrspace(1) [[SRC1:%.*]], ptr addrspace(1) [[SRC2:%.*]], ptr addrspace(1) nocapture [[DST:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: entry:
+; GFX906-NEXT: [[IDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX906-NEXT: [[GEP1:%.*]] = getelementptr <5 x i8>, ptr addrspace(1) [[SRC1]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC1:%.*]] = load <5 x i8>, ptr addrspace(1) [[GEP1]], align 8
+; GFX906-NEXT: [[TMP0:%.*]] = shufflevector <5 x i8> [[VEC1]], <5 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 5, i32 5>
+; GFX906-NEXT: [[VEC1_BC:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+; GFX906-NEXT: [[GEP2:%.*]] = getelementptr <5 x i8>, ptr addrspace(1) [[SRC2]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC2:%.*]] = load <5 x i8>, ptr addrspace(1) [[GEP2]], align 8
+; GFX906-NEXT: [[TMP1:%.*]] = shufflevector <5 x i8> [[VEC2]], <5 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 5, i32 5>
+; GFX906-NEXT: [[VEC2_BC:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
+; GFX906-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 15
+; GFX906-NEXT: br i1 [[CMP]], label [[BB_1:%.*]], label [[BB_2:%.*]]
+; GFX906: bb.1:
+; GFX906-NEXT: br label [[BB_2]]
+; GFX906: bb.2:
+; GFX906-NEXT: [[TMP5_TC:%.*]] = phi <2 x i32> [ [[VEC1_BC]], [[ENTRY:%.*]] ], [ [[VEC2_BC]], [[BB_1]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP5_TC]] to <8 x i8>
+; GFX906-NEXT: [[TMP3:%.*]] = shufflevector <8 x i8> [[TMP2]], <8 x i8> poison, <5 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4>
+; GFX906-NEXT: store <5 x i8> [[TMP3]], ptr addrspace(1) [[DST]], align 4
+; GFX906-NEXT: ret void
+;
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <5 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <5 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <5 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <5 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <5 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <5 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v8i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: define amdgpu_kernel void @v8i8_liveout(
+; GFX906-SAME: ptr addrspace(1) [[SRC1:%.*]], ptr addrspace(1) [[SRC2:%.*]], ptr addrspace(1) nocapture [[DST:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: entry:
+; GFX906-NEXT: [[IDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX906-NEXT: [[GEP1:%.*]] = getelementptr <8 x i8>, ptr addrspace(1) [[SRC1]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC1:%.*]] = load <8 x i8>, ptr addrspace(1) [[GEP1]], align 8
+; GFX906-NEXT: [[VEC1_BC:%.*]] = bitcast <8 x i8> [[VEC1]] to <2 x i32>
+; GFX906-NEXT: [[GEP2:%.*]] = getelementptr <8 x i8>, ptr addrspace(1) [[SRC2]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC2:%.*]] = load <8 x i8>, ptr addrspace(1) [[GEP2]], align 8
+; GFX906-NEXT: [[VEC2_BC:%.*]] = bitcast <8 x i8> [[VEC2]] to <2 x i32>
+; GFX906-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 15
+; GFX906-NEXT: br i1 [[CMP]], label [[BB_1:%.*]], label [[BB_2:%.*]]
+; GFX906: bb.1:
+; GFX906-NEXT: br label [[BB_2]]
+; GFX906: bb.2:
+; GFX906-NEXT: [[TMP5_TC:%.*]] = phi <2 x i32> [ [[VEC1_BC]], [[ENTRY:%.*]] ], [ [[VEC2_BC]], [[BB_1]] ]
+; GFX906-NEXT: [[TMP5_TC_BC:%.*]] = bitcast <2 x i32> [[TMP5_TC]] to <8 x i8>
+; GFX906-NEXT: store <8 x i8> [[TMP5_TC_BC]], ptr addrspace(1) [[DST]], align 4
+; GFX906-NEXT: ret void
+;
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <8 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <8 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+
+bb.2:
+ %tmp5 = phi <8 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <8 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @repeat_successor(i32 %in, ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: define amdgpu_kernel void @repeat_successor(
+; GFX906-SAME: i32 [[IN:%.*]], ptr addrspace(1) [[SRC1:%.*]], ptr addrspace(1) [[SRC2:%.*]], ptr addrspace(1) nocapture [[DST:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: entry:
+; GFX906-NEXT: [[IDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX906-NEXT: [[GEP1:%.*]] = getelementptr <4 x i8>, ptr addrspace(1) [[SRC1]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC1:%.*]] = load <4 x i8>, ptr addrspace(1) [[GEP1]], align 4
+; GFX906-NEXT: [[VEC1_BC:%.*]] = bitcast <4 x i8> [[VEC1]] to i32
+; GFX906-NEXT: [[GEP2:%.*]] = getelementptr <4 x i8>, ptr addrspace(1) [[SRC2]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC2:%.*]] = load <4 x i8>, ptr addrspace(1) [[GEP2]], align 4
+; GFX906-NEXT: [[VEC2_BC:%.*]] = bitcast <4 x i8> [[VEC2]] to i32
+; GFX906-NEXT: switch i32 [[IN]], label [[RETURN:%.*]] [
+; GFX906-NEXT: i32 1, label [[RETURN_SINK_SPLIT:%.*]]
+; GFX906-NEXT: i32 2, label [[RETURN_SINK_SPLIT]]
+; GFX906-NEXT: i32 3, label [[SW_BB5:%.*]]
+; GFX906-NEXT: ]
+; GFX906: sw.bb5:
+; GFX906-NEXT: br label [[RETURN_SINK_SPLIT]]
+; GFX906: return.sink.split:
+; GFX906-NEXT: [[TMP5_TC:%.*]] = phi i32 [ [[VEC2_BC]], [[SW_BB5]] ], [ [[VEC1_BC]], [[ENTRY:%.*]] ], [ [[VEC1_BC]], [[ENTRY]] ]
+; GFX906-NEXT: [[TMP5_TC_BC:%.*]] = bitcast i32 [[TMP5_TC]] to <4 x i8>
+; GFX906-NEXT: store <4 x i8> [[TMP5_TC_BC]], ptr addrspace(1) [[DST]], align 4
+; GFX906-NEXT: ret void
+; GFX906: return:
+; GFX906-NEXT: ret void
+;
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <4 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <4 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <4 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <4 x i8>, ptr addrspace(1) %gep2
+ switch i32 %in, label %return [
+ i32 1, label %return.sink.split
+ i32 2, label %return.sink.split
+ i32 3, label %sw.bb5
+ ]
+
+sw.bb5:
+ br label %return.sink.split
+
+return.sink.split:
+ %tmp5 = phi <4 x i8> [ %vec2, %sw.bb5 ], [ %vec1, %entry ], [ %vec1, %entry ]
+ store <4 x i8> %tmp5, ptr addrspace(1) %dst, align 4
+ ret void
+
+return:
+ ret void
+}
+
+define amdgpu_kernel void @v8i8_phi_chain(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst0, ptr addrspace(1) nocapture %dst1) {
+; GFX906-LABEL: define amdgpu_kernel void @v8i8_phi_chain(
+; GFX906-SAME: ptr addrspace(1) [[SRC1:%.*]], ptr addrspace(1) [[SRC2:%.*]], ptr addrspace(1) nocapture [[DST0:%.*]], ptr addrspace(1) nocapture [[DST1:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: entry:
+; GFX906-NEXT: [[IDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX906-NEXT: [[GEP1:%.*]] = getelementptr <8 x i8>, ptr addrspace(1) [[SRC1]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC1:%.*]] = load <8 x i8>, ptr addrspace(1) [[GEP1]], align 8
+; GFX906-NEXT: [[VEC1_BC:%.*]] = bitcast <8 x i8> [[VEC1]] to <2 x i32>
+; GFX906-NEXT: [[GEP2:%.*]] = getelementptr <8 x i8>, ptr addrspace(1) [[SRC2]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC2:%.*]] = load <8 x i8>, ptr addrspace(1) [[GEP2]], align 8
+; GFX906-NEXT: [[VEC2_BC:%.*]] = bitcast <8 x i8> [[VEC2]] to <2 x i32>
+; GFX906-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 15
+; GFX906-NEXT: br i1 [[CMP]], label [[BB_1:%.*]], label [[BB_2:%.*]]
+; GFX906: bb.1:
+; GFX906-NEXT: [[CMP2:%.*]] = icmp ult i32 [[IDX]], 7
+; GFX906-NEXT: br i1 [[CMP2]], label [[BB_2]], label [[BB_3:%.*]]
+; GFX906: bb.2:
+; GFX906-NEXT: [[TMP5_TC:%.*]] = phi <2 x i32> [ [[VEC1_BC]], [[ENTRY:%.*]] ], [ [[VEC2_BC]], [[BB_1]] ]
+; GFX906-NEXT: [[TMP5_TC_BC:%.*]] = bitcast <2 x i32> [[TMP5_TC]] to <8 x i8>
+; GFX906-NEXT: store <8 x i8> [[TMP5_TC_BC]], ptr addrspace(1) [[DST0]], align 4
+; GFX906-NEXT: br label [[BB_3]]
+; GFX906: bb.3:
+; GFX906-NEXT: [[TMP7_TC:%.*]] = phi <2 x i32> [ [[VEC2_BC]], [[BB_1]] ], [ [[TMP5_TC]], [[BB_2]] ]
+; GFX906-NEXT: [[TMP7_TC_BC:%.*]] = bitcast <2 x i32> [[TMP7_TC]] to <8 x i8>
+; GFX906-NEXT: store <8 x i8> [[TMP7_TC_BC]], ptr addrspace(1) [[DST1]], align 4
+; GFX906-NEXT: ret void
+;
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <8 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <8 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+bb.1:
+ %cmp2 = icmp ult i32 %idx, 7
+ br i1 %cmp2, label %bb.2, label %bb.3
+
+bb.2:
+ %tmp5 = phi <8 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ store <8 x i8> %tmp5, ptr addrspace(1) %dst0, align 4
+ br label %bb.3
+
+bb.3:
+ %tmp7 = phi <8 x i8> [ %vec2, %bb.1], [%tmp5, %bb.2]
+ store <8 x i8> %tmp7, ptr addrspace(1) %dst1, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v8i8_multi_block(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst0, ptr addrspace(1) nocapture %dst1) {
+; GFX906-LABEL: define amdgpu_kernel void @v8i8_multi_block(
+; GFX906-SAME: ptr addrspace(1) [[SRC1:%.*]], ptr addrspace(1) [[SRC2:%.*]], ptr addrspace(1) nocapture [[DST0:%.*]], ptr addrspace(1) nocapture [[DST1:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: entry:
+; GFX906-NEXT: [[IDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX906-NEXT: [[GEP1:%.*]] = getelementptr <8 x i8>, ptr addrspace(1) [[SRC1]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC1:%.*]] = load <8 x i8>, ptr addrspace(1) [[GEP1]], align 8
+; GFX906-NEXT: [[VEC1_BC:%.*]] = bitcast <8 x i8> [[VEC1]] to <2 x i32>
+; GFX906-NEXT: [[GEP2:%.*]] = getelementptr <8 x i8>, ptr addrspace(1) [[SRC2]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC2:%.*]] = load <8 x i8>, ptr addrspace(1) [[GEP2]], align 8
+; GFX906-NEXT: [[VEC2_BC:%.*]] = bitcast <8 x i8> [[VEC2]] to <2 x i32>
+; GFX906-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 15
+; GFX906-NEXT: br i1 [[CMP]], label [[BB_1:%.*]], label [[BB_3:%.*]]
+; GFX906: bb.1:
+; GFX906-NEXT: [[CMP2:%.*]] = icmp ult i32 [[IDX]], 7
+; GFX906-NEXT: br i1 [[CMP2]], label [[BB_2:%.*]], label [[BB_3]]
+; GFX906: bb.2:
+; GFX906-NEXT: [[VEC1_BC_BC:%.*]] = bitcast <2 x i32> [[VEC1_BC]] to <8 x i8>
+; GFX906-NEXT: store <8 x i8> [[VEC1_BC_BC]], ptr addrspace(1) [[DST0]], align 4
+; GFX906-NEXT: br label [[BB_3]]
+; GFX906: bb.3:
+; GFX906-NEXT: [[TMP5_TC:%.*]] = phi <2 x i32> [ [[VEC1_BC]], [[ENTRY:%.*]] ], [ [[VEC2_BC]], [[BB_1]] ], [ [[VEC2_BC]], [[BB_2]] ]
+; GFX906-NEXT: [[TMP5_TC_BC:%.*]] = bitcast <2 x i32> [[TMP5_TC]] to <8 x i8>
+; GFX906-NEXT: store <8 x i8> [[TMP5_TC_BC]], ptr addrspace(1) [[DST1]], align 4
+; GFX906-NEXT: ret void
+;
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <8 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <8 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <8 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <8 x i8>, ptr addrspace(1) %gep2
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.3
+bb.1:
+ %cmp2 = icmp ult i32 %idx, 7
+ br i1 %cmp2, label %bb.2, label %bb.3
+
+bb.2:
+ store <8 x i8> %vec1, ptr addrspace(1) %dst0, align 4
+ br label %bb.3
+
+bb.3:
+ %tmp5 = phi <8 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ], [ %vec2, %bb.2]
+ store <8 x i8> %tmp5, ptr addrspace(1) %dst1, align 4
+ ret void
+}
+
+define amdgpu_kernel void @v32i8_loop_carried(ptr addrspace(1) %src1, ptr addrspace(1) %src2, ptr addrspace(1) nocapture %dst) {
+; GFX906-LABEL: define amdgpu_kernel void @v32i8_loop_carried(
+; GFX906-SAME: ptr addrspace(1) [[SRC1:%.*]], ptr addrspace(1) [[SRC2:%.*]], ptr addrspace(1) nocapture [[DST:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: entry:
+; GFX906-NEXT: [[IDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX906-NEXT: [[GEP1:%.*]] = getelementptr <32 x i8>, ptr addrspace(1) [[SRC1]], i32 [[IDX]]
+; GFX906-NEXT: [[VEC1:%.*]] = load <4 x i8>, ptr addrspace(1) [[GEP1]], align 4
+; GFX906-NEXT: [[VEC1_BC:%.*]] = bitcast <4 x i8> [[VEC1]] to i32
+; GFX906-NEXT: br label [[BB_1:%.*]]
+; GFX906: bb.1:
+; GFX906-NEXT: [[TEMP_TC:%.*]] = phi i32 [ [[VEC1_BC]], [[ENTRY:%.*]] ], [ [[VEC2_BC:%.*]], [[BB_1]] ]
+; GFX906-NEXT: [[TEMP_TC_BC:%.*]] = bitcast i32 [[TEMP_TC]] to <4 x i8>
+; GFX906-NEXT: [[VEC1_BC_BC:%.*]] = bitcast i32 [[VEC1_BC]] to <4 x i8>
+; GFX906-NEXT: [[VEC2:%.*]] = shufflevector <4 x i8> [[VEC1_BC_BC]], <4 x i8> [[TEMP_TC_BC]], <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; GFX906-NEXT: [[VEC2_BC]] = bitcast <4 x i8> [[VEC2]] to i32
+; GFX906-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 15
+; GFX906-NEXT: br i1 [[CMP]], label [[BB_1]], label [[BB_2:%.*]]
+; GFX906: 0:
+; GFX906-NEXT: br label [[BB_2]]
+; GFX906: bb.2:
+; GFX906-NEXT: [[VEC2_BC_BC:%.*]] = bitcast i32 [[VEC2_BC]] to <4 x i8>
+; GFX906-NEXT: store <4 x i8> [[VEC2_BC_BC]], ptr addrspace(1) [[DST]], align 4
+; GFX906-NEXT: ret void
+;
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <32 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <4 x i8>, ptr addrspace(1) %gep1
+ br label %bb.1
+
+bb.1:
+ %temp = phi <4 x i8> [ %vec1, %entry ], [ %vec2, %bb.1 ]
+ %vec2 = shufflevector <4 x i8> %vec1, <4 x i8> %temp, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %cmp = icmp ult i32 %idx, 15
+ br i1 %cmp, label %bb.1, label %bb.2
+ br label %bb.2
+
+bb.2:
+ store <4 x i8> %vec2, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x()
>From 9667e6044a2fce85f2fb35cd1731cbc8491f1871 Mon Sep 17 00:00:00 2001
From: Jon Roelofs <jonathan_roelofs at apple.com>
Date: Wed, 3 Jul 2024 10:01:08 -0700
Subject: [PATCH 153/246] [llvm][AArch64] Drop unused&redundant field in the
TargetParserTest. NFC (#97367)
There were a couple of cases where this field was just plain wrong
because we weren't actually testing against it. Instead, drop the
`CPUAttr` field on AArch64 tests.
---
.../TargetParser/TargetParserTest.cpp | 1518 ++++++++---------
1 file changed, 690 insertions(+), 828 deletions(-)
diff --git a/llvm/unittests/TargetParser/TargetParserTest.cpp b/llvm/unittests/TargetParser/TargetParserTest.cpp
index fe20099382859..97d09d677fbdd 100644
--- a/llvm/unittests/TargetParser/TargetParserTest.cpp
+++ b/llvm/unittests/TargetParser/TargetParserTest.cpp
@@ -1071,10 +1071,31 @@ TEST(TargetParserTest, ARMPrintSupportedExtensions) {
EXPECT_EQ(std::string::npos, captured.find("xscale"));
}
-class AArch64CPUTestFixture : public ::testing::TestWithParam<
- ARMCPUTestParams<AArch64::ExtensionBitset>> {
+struct AArch64CPUTestParams
+ : public ARMCPUTestParams<AArch64::ExtensionBitset> {
+ AArch64CPUTestParams(StringRef CPUName, StringRef ExpectedArch,
+ StringRef ExpectedFPU,
+ AArch64::ExtensionBitset ExpectedFlags)
+ : ARMCPUTestParams<AArch64::ExtensionBitset>(CPUName, ExpectedArch,
+ ExpectedFPU, ExpectedFlags,
+ /*ignored*/ "") {}
+ /// Print a gtest-compatible facsimile of the CPUName, to make the test's name
+ /// human-readable.
+ ///
+ /// https://github.com/google/googletest/blob/main/docs/advanced.md#specifying-names-for-value-parameterized-test-parameters
+ static std::string PrintToStringParamName(
+ const testing::TestParamInfo<AArch64CPUTestParams> &Info) {
+ std::string Name = Info.param.CPUName.str();
+ for (char &C : Name)
+ if (!std::isalnum(C))
+ C = '_';
+ return Name;
+ }
};
+class AArch64CPUTestFixture
+ : public ::testing::TestWithParam<AArch64CPUTestParams> {};
+
TEST_P(AArch64CPUTestFixture, testAArch64CPU) {
auto params = GetParam();
@@ -1090,865 +1111,707 @@ TEST_P(AArch64CPUTestFixture, testAArch64CPU) {
INSTANTIATE_TEST_SUITE_P(
AArch64CPUTests, AArch64CPUTestFixture,
::testing::Values(
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a34", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a35", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a53", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ AArch64CPUTestParams("cortex-a34", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("cortex-a35", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("cortex-a53", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-a55", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_RAS,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_RCPC, AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a510", "armv9-a", "neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_RAS,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
- AArch64::AEK_BF16, AArch64::AEK_I8MM,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_PAUTH,
- AArch64::AEK_MTE, AArch64::AEK_SSBS,
- AArch64::AEK_FP16, AArch64::AEK_FP16FML,
- AArch64::AEK_SB, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE, AArch64::AEK_AM}),
- "9-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a520", "armv9.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_BF16, AArch64::AEK_I8MM,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_RCPC,
- AArch64::AEK_RAS, AArch64::AEK_CRC,
- AArch64::AEK_FP, AArch64::AEK_SB,
- AArch64::AEK_SSBS, AArch64::AEK_MTE,
- AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
- AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PERFMON, AArch64::AEK_AM,
- AArch64::AEK_ETE}),
- "9.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a520ae", "armv9.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_BF16, AArch64::AEK_I8MM,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_RCPC,
- AArch64::AEK_RAS, AArch64::AEK_CRC,
- AArch64::AEK_FP, AArch64::AEK_SB,
- AArch64::AEK_SSBS, AArch64::AEK_MTE,
- AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
- AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PERFMON, AArch64::AEK_AM,
- AArch64::AEK_ETE}),
- "9.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a57", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_FP, AArch64::AEK_SIMD, AArch64::AEK_RAS,
+ AArch64::AEK_LSE, AArch64::AEK_RDM, AArch64::AEK_FP16,
+ AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("cortex-a510", "armv9-a", "neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_RAS,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
+ AArch64::AEK_BF16, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_PAUTH,
+ AArch64::AEK_MTE, AArch64::AEK_SSBS,
+ AArch64::AEK_FP16, AArch64::AEK_FP16FML,
+ AArch64::AEK_SB, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
+ AArch64::AEK_ETE, AArch64::AEK_AM}),
+ AArch64CPUTestParams("cortex-a520", "armv9.2-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_BF16, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RCPC,
+ AArch64::AEK_RAS, AArch64::AEK_CRC,
+ AArch64::AEK_FP, AArch64::AEK_SB,
+ AArch64::AEK_SSBS, AArch64::AEK_MTE,
+ AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
+ AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
+ AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PERFMON, AArch64::AEK_AM,
+ AArch64::AEK_ETE}),
+ AArch64CPUTestParams("cortex-a520ae", "armv9.2-a",
+ "crypto-neon-fp-armv8",
+ {AArch64::AEK_BF16, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RCPC,
+ AArch64::AEK_RAS, AArch64::AEK_CRC,
+ AArch64::AEK_FP, AArch64::AEK_SB,
+ AArch64::AEK_SSBS, AArch64::AEK_MTE,
+ AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
+ AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
+ AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PERFMON, AArch64::AEK_AM,
+ AArch64::AEK_ETE}),
+ AArch64CPUTestParams("cortex-a57", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-a65", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
- AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RCPC,
- AArch64::AEK_RDM, AArch64::AEK_SIMD, AArch64::AEK_SSBS,
- AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RCPC,
+ AArch64::AEK_RDM, AArch64::AEK_SIMD, AArch64::AEK_SSBS,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-a65ae", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
- AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RCPC,
- AArch64::AEK_RDM, AArch64::AEK_SIMD, AArch64::AEK_SSBS,
- AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a72", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a73", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RCPC,
+ AArch64::AEK_RDM, AArch64::AEK_SIMD, AArch64::AEK_SSBS,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("cortex-a72", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("cortex-a73", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-a75", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_RAS,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_RCPC, AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_FP, AArch64::AEK_SIMD, AArch64::AEK_RAS,
+ AArch64::AEK_LSE, AArch64::AEK_RDM, AArch64::AEK_FP16,
+ AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-a76", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
- AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
- AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
- AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
+ AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
+ AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-a76ae", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
- AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
- AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
- AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
+ AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
+ AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-a77", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
- AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
- AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
- AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
+ AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
+ AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-a78", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
- AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
- AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
- AArch64::AEK_PROFILE, AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
+ AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
+ AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
+ AArch64::AEK_PROFILE, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-a78ae", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
- AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
- AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
- AArch64::AEK_PROFILE, AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
+ AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
+ AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
+ AArch64::AEK_PROFILE, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-a78c", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_RAS, AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD, AArch64::AEK_RCPC,
- AArch64::AEK_SSBS, AArch64::AEK_PROFILE, AArch64::AEK_FLAGM,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_RAS, AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD, AArch64::AEK_RCPC,
+ AArch64::AEK_SSBS, AArch64::AEK_PROFILE, AArch64::AEK_FLAGM,
+ AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-a710", "armv9-a", "neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_RAS,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
- AArch64::AEK_MTE, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_SVE,
- AArch64::AEK_SVE2, AArch64::AEK_SVE2BITPERM,
- AArch64::AEK_PAUTH, AArch64::AEK_FLAGM,
- AArch64::AEK_SB, AArch64::AEK_I8MM,
- AArch64::AEK_BF16, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE}),
- "9-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a715", "armv9-a", "neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_FP,
- AArch64::AEK_BF16, AArch64::AEK_SIMD,
- AArch64::AEK_RAS, AArch64::AEK_LSE,
- AArch64::AEK_RDM, AArch64::AEK_RCPC,
- AArch64::AEK_DOTPROD, AArch64::AEK_MTE,
- AArch64::AEK_PAUTH, AArch64::AEK_SVE,
- AArch64::AEK_SVE2, AArch64::AEK_SVE2BITPERM,
- AArch64::AEK_SSBS, AArch64::AEK_SB,
- AArch64::AEK_I8MM, AArch64::AEK_PERFMON,
- AArch64::AEK_PREDRES, AArch64::AEK_PROFILE,
- AArch64::AEK_FP16FML, AArch64::AEK_FP16,
- AArch64::AEK_FLAGM, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE, AArch64::AEK_TRBE}),
- "9-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a720", "armv9.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_BF16, AArch64::AEK_I8MM,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_RCPC,
- AArch64::AEK_RAS, AArch64::AEK_CRC,
- AArch64::AEK_FP, AArch64::AEK_SB,
- AArch64::AEK_SSBS, AArch64::AEK_MTE,
- AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
- AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
- AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE, AArch64::AEK_SPE_EEF,
- AArch64::AEK_TRBE}),
- "9.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a720ae", "armv9.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_BF16, AArch64::AEK_I8MM,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_RCPC,
- AArch64::AEK_RAS, AArch64::AEK_CRC,
- AArch64::AEK_FP, AArch64::AEK_SB,
- AArch64::AEK_SSBS, AArch64::AEK_MTE,
- AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
- AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
- AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE, AArch64::AEK_SPE_EEF,
- AArch64::AEK_TRBE}),
- "9.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-a725", "armv9.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_BF16, AArch64::AEK_I8MM,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_RCPC,
- AArch64::AEK_RAS, AArch64::AEK_CRC,
- AArch64::AEK_FP, AArch64::AEK_SB,
- AArch64::AEK_SSBS, AArch64::AEK_MTE,
- AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
- AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
- AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_ETE,
- AArch64::AEK_SPE_EEF, AArch64::AEK_TRBE}),
- "9.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_MTE,
+ AArch64::AEK_FP16, AArch64::AEK_FP16FML, AArch64::AEK_SVE,
+ AArch64::AEK_SVE2, AArch64::AEK_SVE2BITPERM, AArch64::AEK_PAUTH,
+ AArch64::AEK_FLAGM, AArch64::AEK_SB, AArch64::AEK_I8MM,
+ AArch64::AEK_BF16, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PERFMON, AArch64::AEK_ETE}),
+ AArch64CPUTestParams("cortex-a715", "armv9-a", "neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_FP,
+ AArch64::AEK_BF16, AArch64::AEK_SIMD,
+ AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_RDM, AArch64::AEK_RCPC,
+ AArch64::AEK_DOTPROD, AArch64::AEK_MTE,
+ AArch64::AEK_PAUTH, AArch64::AEK_SVE,
+ AArch64::AEK_SVE2, AArch64::AEK_SVE2BITPERM,
+ AArch64::AEK_SSBS, AArch64::AEK_SB,
+ AArch64::AEK_I8MM, AArch64::AEK_PERFMON,
+ AArch64::AEK_PREDRES, AArch64::AEK_PROFILE,
+ AArch64::AEK_FP16FML, AArch64::AEK_FP16,
+ AArch64::AEK_FLAGM, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
+ AArch64::AEK_ETE, AArch64::AEK_TRBE}),
+ AArch64CPUTestParams("cortex-a720", "armv9.2-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_BF16, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RCPC,
+ AArch64::AEK_RAS, AArch64::AEK_CRC,
+ AArch64::AEK_FP, AArch64::AEK_SB,
+ AArch64::AEK_SSBS, AArch64::AEK_MTE,
+ AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
+ AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
+ AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
+ AArch64::AEK_ETE, AArch64::AEK_SPE_EEF,
+ AArch64::AEK_TRBE}),
+ AArch64CPUTestParams("cortex-a720ae", "armv9.2-a",
+ "crypto-neon-fp-armv8",
+ {AArch64::AEK_BF16, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RCPC,
+ AArch64::AEK_RAS, AArch64::AEK_CRC,
+ AArch64::AEK_FP, AArch64::AEK_SB,
+ AArch64::AEK_SSBS, AArch64::AEK_MTE,
+ AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
+ AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
+ AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
+ AArch64::AEK_ETE, AArch64::AEK_SPE_EEF,
+ AArch64::AEK_TRBE}),
+ AArch64CPUTestParams("cortex-a725", "armv9.2-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_BF16, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RCPC,
+ AArch64::AEK_RAS, AArch64::AEK_CRC,
+ AArch64::AEK_FP, AArch64::AEK_SB,
+ AArch64::AEK_SSBS, AArch64::AEK_MTE,
+ AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
+ AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
+ AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_ETE,
+ AArch64::AEK_SPE_EEF, AArch64::AEK_TRBE}),
+ AArch64CPUTestParams(
"neoverse-v1", "armv8.4-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_RAS, AArch64::AEK_SVE,
- AArch64::AEK_SSBS, AArch64::AEK_RCPC,
- AArch64::AEK_CRC, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_RAS,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
- AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_SHA3, AArch64::AEK_SM4,
- AArch64::AEK_FP16, AArch64::AEK_BF16,
- AArch64::AEK_PROFILE, AArch64::AEK_RAND,
- AArch64::AEK_FP16FML, AArch64::AEK_I8MM,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON,
- AArch64::AEK_CCDP}),
- "8.4-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "neoverse-v2", "armv9-a", "neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_RAS, AArch64::AEK_SVE,
- AArch64::AEK_SSBS, AArch64::AEK_RCPC,
- AArch64::AEK_CRC, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_MTE,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
- AArch64::AEK_FP16, AArch64::AEK_BF16,
- AArch64::AEK_SVE2, AArch64::AEK_PROFILE,
- AArch64::AEK_FP16FML, AArch64::AEK_I8MM,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_RAND,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE}),
- "9-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "neoverse-v3", "armv9.2-a", "neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_BF16, AArch64::AEK_I8MM,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_RCPC,
- AArch64::AEK_RAS, AArch64::AEK_CRC,
- AArch64::AEK_FP, AArch64::AEK_PROFILE,
- AArch64::AEK_MTE, AArch64::AEK_SSBS,
- AArch64::AEK_SB, AArch64::AEK_PREDRES,
- AArch64::AEK_LS64, AArch64::AEK_BRBE,
- AArch64::AEK_PAUTH, AArch64::AEK_FLAGM,
- AArch64::AEK_PERFMON, AArch64::AEK_RAND,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_FP16FML,
- AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE, AArch64::AEK_SPE_EEF}),
- "9.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "neoverse-v3ae", "armv9.2-a", "neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_BF16, AArch64::AEK_I8MM,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_RCPC,
- AArch64::AEK_RAS, AArch64::AEK_CRC,
- AArch64::AEK_FP, AArch64::AEK_PROFILE,
- AArch64::AEK_MTE, AArch64::AEK_SSBS,
- AArch64::AEK_SB, AArch64::AEK_PREDRES,
- AArch64::AEK_LS64, AArch64::AEK_BRBE,
- AArch64::AEK_PAUTH, AArch64::AEK_FLAGM,
- AArch64::AEK_PERFMON, AArch64::AEK_RAND,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_FP16FML,
- AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE, AArch64::AEK_SPE_EEF}),
- "9.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_RAS, AArch64::AEK_SVE, AArch64::AEK_SSBS,
+ AArch64::AEK_RCPC, AArch64::AEK_CRC, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_RDM, AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
+ AArch64::AEK_AES, AArch64::AEK_SHA2, AArch64::AEK_SHA3,
+ AArch64::AEK_SM4, AArch64::AEK_FP16, AArch64::AEK_BF16,
+ AArch64::AEK_PROFILE, AArch64::AEK_RAND, AArch64::AEK_FP16FML,
+ AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH, AArch64::AEK_PERFMON, AArch64::AEK_CCDP}),
+ AArch64CPUTestParams("neoverse-v2", "armv9-a", "neon-fp-armv8",
+ {AArch64::AEK_RAS, AArch64::AEK_SVE,
+ AArch64::AEK_SSBS, AArch64::AEK_RCPC,
+ AArch64::AEK_CRC, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_MTE,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
+ AArch64::AEK_FP16, AArch64::AEK_BF16,
+ AArch64::AEK_SVE2, AArch64::AEK_PROFILE,
+ AArch64::AEK_FP16FML, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_RAND,
+ AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH, AArch64::AEK_PERFMON,
+ AArch64::AEK_ETE}),
+ AArch64CPUTestParams("neoverse-v3", "armv9.2-a", "neon-fp-armv8",
+ {AArch64::AEK_BF16, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RCPC,
+ AArch64::AEK_RAS, AArch64::AEK_CRC,
+ AArch64::AEK_FP, AArch64::AEK_PROFILE,
+ AArch64::AEK_MTE, AArch64::AEK_SSBS,
+ AArch64::AEK_SB, AArch64::AEK_PREDRES,
+ AArch64::AEK_LS64, AArch64::AEK_BRBE,
+ AArch64::AEK_PAUTH, AArch64::AEK_FLAGM,
+ AArch64::AEK_PERFMON, AArch64::AEK_RAND,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_FP16FML,
+ AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
+ AArch64::AEK_ETE, AArch64::AEK_SPE_EEF}),
+ AArch64CPUTestParams("neoverse-v3ae", "armv9.2-a", "neon-fp-armv8",
+ {AArch64::AEK_BF16, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RCPC,
+ AArch64::AEK_RAS, AArch64::AEK_CRC,
+ AArch64::AEK_FP, AArch64::AEK_PROFILE,
+ AArch64::AEK_MTE, AArch64::AEK_SSBS,
+ AArch64::AEK_SB, AArch64::AEK_PREDRES,
+ AArch64::AEK_LS64, AArch64::AEK_BRBE,
+ AArch64::AEK_PAUTH, AArch64::AEK_FLAGM,
+ AArch64::AEK_PERFMON, AArch64::AEK_RAND,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_FP16FML,
+ AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
+ AArch64::AEK_ETE, AArch64::AEK_SPE_EEF}),
+ AArch64CPUTestParams(
"cortex-r82", "armv8-r", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_RDM, AArch64::AEK_SSBS,
- AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_FP16, AArch64::AEK_FP16FML, AArch64::AEK_RAS,
- AArch64::AEK_RCPC, AArch64::AEK_LSE, AArch64::AEK_SB,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
- AArch64::AEK_FLAGM, AArch64::AEK_PERFMON,
- AArch64::AEK_PREDRES, AArch64::AEK_CCDP}),
- "8-R"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_RDM, AArch64::AEK_SSBS,
+ AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_FP16, AArch64::AEK_FP16FML, AArch64::AEK_RAS,
+ AArch64::AEK_RCPC, AArch64::AEK_LSE, AArch64::AEK_SB,
+ AArch64::AEK_JSCVT, AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
+ AArch64::AEK_FLAGM, AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
+ AArch64::AEK_CCDP}),
+ AArch64CPUTestParams(
"cortex-r82ae", "armv8-r", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_RDM, AArch64::AEK_SSBS,
- AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_FP16, AArch64::AEK_FP16FML, AArch64::AEK_RAS,
- AArch64::AEK_RCPC, AArch64::AEK_LSE, AArch64::AEK_SB,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
- AArch64::AEK_FLAGM, AArch64::AEK_PERFMON,
- AArch64::AEK_PREDRES, AArch64::AEK_CCDP}),
- "8-R"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_RDM, AArch64::AEK_SSBS,
+ AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_FP16, AArch64::AEK_FP16FML, AArch64::AEK_RAS,
+ AArch64::AEK_RCPC, AArch64::AEK_LSE, AArch64::AEK_SB,
+ AArch64::AEK_JSCVT, AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
+ AArch64::AEK_FLAGM, AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
+ AArch64::AEK_CCDP}),
+ AArch64CPUTestParams(
"cortex-x1", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
- AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
- AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
- AArch64::AEK_PROFILE, AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD, AArch64::AEK_RCPC,
+ AArch64::AEK_SSBS, AArch64::AEK_PROFILE, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"cortex-x1c", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_RDM, AArch64::AEK_SIMD,
- AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_FP16,
- AArch64::AEK_DOTPROD, AArch64::AEK_RCPC, AArch64::AEK_SSBS,
- AArch64::AEK_PAUTH, AArch64::AEK_PROFILE, AArch64::AEK_FLAGM,
- AArch64::AEK_PERFMON, AArch64::AEK_LSE2, AArch64::AEK_RCPC_IMMO}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-x2", "armv9-a", "neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_RAS,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
- AArch64::AEK_MTE, AArch64::AEK_PAUTH,
- AArch64::AEK_I8MM, AArch64::AEK_BF16,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_SSBS,
- AArch64::AEK_SB, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_FLAGM,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PERFMON, AArch64::AEK_AM,
- AArch64::AEK_ETE}),
- "9-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-x3", "armv9-a", "neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_FP,
- AArch64::AEK_BF16, AArch64::AEK_SIMD,
- AArch64::AEK_RAS, AArch64::AEK_LSE,
- AArch64::AEK_RDM, AArch64::AEK_RCPC,
- AArch64::AEK_DOTPROD, AArch64::AEK_MTE,
- AArch64::AEK_PAUTH, AArch64::AEK_SVE,
- AArch64::AEK_SVE2, AArch64::AEK_SVE2BITPERM,
- AArch64::AEK_SB, AArch64::AEK_PROFILE,
- AArch64::AEK_PERFMON, AArch64::AEK_I8MM,
- AArch64::AEK_FP16, AArch64::AEK_FP16FML,
- AArch64::AEK_PREDRES, AArch64::AEK_FLAGM,
- AArch64::AEK_SSBS, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE, AArch64::AEK_TRBE}),
- "9-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-x4", "armv9.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_BF16, AArch64::AEK_I8MM,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_RCPC,
- AArch64::AEK_RAS, AArch64::AEK_CRC,
- AArch64::AEK_FP, AArch64::AEK_SB,
- AArch64::AEK_SSBS, AArch64::AEK_MTE,
- AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
- AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
- AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE, AArch64::AEK_SPE_EEF,
- AArch64::AEK_TRBE}),
- "9.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cortex-x925", "armv9.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_BF16, AArch64::AEK_I8MM,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_RCPC,
- AArch64::AEK_RAS, AArch64::AEK_CRC,
- AArch64::AEK_FP, AArch64::AEK_SB,
- AArch64::AEK_SSBS, AArch64::AEK_MTE,
- AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
- AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
- AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_ETE,
- AArch64::AEK_SPE_EEF, AArch64::AEK_TRBE}),
- "9.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "cyclone", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "apple-a7", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "apple-a8", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "apple-a9", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "apple-a10", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_RDM, AArch64::AEK_SIMD,
- AArch64::AEK_PERFMON, AArch64::AEK_PAN,
- AArch64::AEK_LOR, AArch64::AEK_VH}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "apple-a11", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_LSE, AArch64::AEK_RAS,
- AArch64::AEK_RDM, AArch64::AEK_SIMD,
- AArch64::AEK_FP16, AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD, AArch64::AEK_RCPC,
+ AArch64::AEK_SSBS, AArch64::AEK_PAUTH, AArch64::AEK_PROFILE,
+ AArch64::AEK_FLAGM, AArch64::AEK_PERFMON, AArch64::AEK_LSE2,
+ AArch64::AEK_RCPC_IMMO}),
+ AArch64CPUTestParams("cortex-x2", "armv9-a", "neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_RAS,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
+ AArch64::AEK_MTE, AArch64::AEK_PAUTH,
+ AArch64::AEK_I8MM, AArch64::AEK_BF16,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_SSBS,
+ AArch64::AEK_SB, AArch64::AEK_FP16,
+ AArch64::AEK_FP16FML, AArch64::AEK_FLAGM,
+ AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PERFMON, AArch64::AEK_AM,
+ AArch64::AEK_ETE}),
+ AArch64CPUTestParams("cortex-x3", "armv9-a", "neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_FP,
+ AArch64::AEK_BF16, AArch64::AEK_SIMD,
+ AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_RDM, AArch64::AEK_RCPC,
+ AArch64::AEK_DOTPROD, AArch64::AEK_MTE,
+ AArch64::AEK_PAUTH, AArch64::AEK_SVE,
+ AArch64::AEK_SVE2, AArch64::AEK_SVE2BITPERM,
+ AArch64::AEK_SB, AArch64::AEK_PROFILE,
+ AArch64::AEK_PERFMON, AArch64::AEK_I8MM,
+ AArch64::AEK_FP16, AArch64::AEK_FP16FML,
+ AArch64::AEK_PREDRES, AArch64::AEK_FLAGM,
+ AArch64::AEK_SSBS, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
+ AArch64::AEK_ETE, AArch64::AEK_TRBE}),
+ AArch64CPUTestParams("cortex-x4", "armv9.2-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_BF16, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RCPC,
+ AArch64::AEK_RAS, AArch64::AEK_CRC,
+ AArch64::AEK_FP, AArch64::AEK_SB,
+ AArch64::AEK_SSBS, AArch64::AEK_MTE,
+ AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
+ AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
+ AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
+ AArch64::AEK_ETE, AArch64::AEK_SPE_EEF,
+ AArch64::AEK_TRBE}),
+ AArch64CPUTestParams("cortex-x925", "armv9.2-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_BF16, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RCPC,
+ AArch64::AEK_RAS, AArch64::AEK_CRC,
+ AArch64::AEK_FP, AArch64::AEK_SB,
+ AArch64::AEK_SSBS, AArch64::AEK_MTE,
+ AArch64::AEK_FP16FML, AArch64::AEK_PAUTH,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_FLAGM,
+ AArch64::AEK_PERFMON, AArch64::AEK_PREDRES,
+ AArch64::AEK_PROFILE, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_ETE,
+ AArch64::AEK_SPE_EEF, AArch64::AEK_TRBE}),
+ AArch64CPUTestParams("cyclone", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("apple-a7", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("apple-a8", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("apple-a9", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("apple-a10", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_RDM, AArch64::AEK_SIMD,
+ AArch64::AEK_PERFMON, AArch64::AEK_PAN,
+ AArch64::AEK_LOR, AArch64::AEK_VH}),
+ AArch64CPUTestParams("apple-a11", "armv8.2-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_LSE, AArch64::AEK_RAS,
+ AArch64::AEK_RDM, AArch64::AEK_SIMD,
+ AArch64::AEK_FP16, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"apple-a12", "armv8.3-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_SIMD, AArch64::AEK_LSE,
- AArch64::AEK_RAS, AArch64::AEK_RDM, AArch64::AEK_RCPC,
- AArch64::AEK_FP16, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
- "8.3-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_FP16, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"apple-s4", "armv8.3-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_SIMD, AArch64::AEK_LSE,
- AArch64::AEK_RAS, AArch64::AEK_RDM, AArch64::AEK_RCPC,
- AArch64::AEK_FP16, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
- "8.3-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_FP16, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"apple-s5", "armv8.3-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_SIMD, AArch64::AEK_LSE,
- AArch64::AEK_RAS, AArch64::AEK_RDM, AArch64::AEK_RCPC,
- AArch64::AEK_FP16, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
- "8.3-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_FP16, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"apple-a13", "armv8.4-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
- "8.4-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "apple-a14", "armv8.4-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_SHA3,
- AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_LSE, AArch64::AEK_RAS,
- AArch64::AEK_RDM, AArch64::AEK_RCPC,
- AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_SHA3,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON,
- AArch64::AEK_PREDRES, AArch64::AEK_SB,
- AArch64::AEK_SSBS, AArch64::AEK_CCDP,
- AArch64::AEK_FRINT3264, AArch64::AEK_SPECRESTRICT,
- AArch64::AEK_ALTERNATIVENZCV}),
- "8.4-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "apple-m1", "armv8.4-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_SHA3,
- AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_LSE, AArch64::AEK_RAS,
- AArch64::AEK_RDM, AArch64::AEK_RCPC,
- AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_SHA3,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON,
- AArch64::AEK_PREDRES, AArch64::AEK_SB,
- AArch64::AEK_SSBS, AArch64::AEK_CCDP,
- AArch64::AEK_FRINT3264, AArch64::AEK_SPECRESTRICT,
- AArch64::AEK_ALTERNATIVENZCV}),
- "8.6-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
+ AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("apple-a14", "armv8.4-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC,
+ AArch64::AEK_AES,
+ AArch64::AEK_SHA2,
+ AArch64::AEK_SHA3,
+ AArch64::AEK_FP,
+ AArch64::AEK_SIMD,
+ AArch64::AEK_LSE,
+ AArch64::AEK_RAS,
+ AArch64::AEK_RDM,
+ AArch64::AEK_RCPC,
+ AArch64::AEK_DOTPROD,
+ AArch64::AEK_FP16,
+ AArch64::AEK_FP16FML,
+ AArch64::AEK_SHA3,
+ AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH,
+ AArch64::AEK_PERFMON,
+ AArch64::AEK_PREDRES,
+ AArch64::AEK_SB,
+ AArch64::AEK_SSBS,
+ AArch64::AEK_CCDP,
+ AArch64::AEK_FRINT3264,
+ AArch64::AEK_SPECRESTRICT,
+ AArch64::AEK_ALTERNATIVENZCV}),
+ AArch64CPUTestParams("apple-m1", "armv8.4-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC,
+ AArch64::AEK_AES,
+ AArch64::AEK_SHA2,
+ AArch64::AEK_SHA3,
+ AArch64::AEK_FP,
+ AArch64::AEK_SIMD,
+ AArch64::AEK_LSE,
+ AArch64::AEK_RAS,
+ AArch64::AEK_RDM,
+ AArch64::AEK_RCPC,
+ AArch64::AEK_DOTPROD,
+ AArch64::AEK_FP16,
+ AArch64::AEK_FP16FML,
+ AArch64::AEK_SHA3,
+ AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH,
+ AArch64::AEK_PERFMON,
+ AArch64::AEK_PREDRES,
+ AArch64::AEK_SB,
+ AArch64::AEK_SSBS,
+ AArch64::AEK_CCDP,
+ AArch64::AEK_FRINT3264,
+ AArch64::AEK_SPECRESTRICT,
+ AArch64::AEK_ALTERNATIVENZCV}),
+ AArch64CPUTestParams(
"apple-a15", "armv8.6-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
- AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
- "8.6-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
+ AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
+ AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"apple-m2", "armv8.6-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
- AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
- "8.6-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
+ AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
+ AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"apple-a16", "armv8.6-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
- AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON, AArch64::AEK_HCX}),
- "8.4-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
+ AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
+ AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH, AArch64::AEK_PERFMON, AArch64::AEK_HCX}),
+ AArch64CPUTestParams(
"apple-m3", "armv8.6-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
- AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON, AArch64::AEK_HCX}),
- "8.6-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
+ AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
+ AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH, AArch64::AEK_PERFMON, AArch64::AEK_HCX}),
+ AArch64CPUTestParams(
"apple-a17", "armv8.6-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
- AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON, AArch64::AEK_HCX}),
- "8.6-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "apple-m4", "armv8.7-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_SHA3,
- AArch64::AEK_FP, AArch64::AEK_SIMD,
- AArch64::AEK_LSE, AArch64::AEK_RAS,
- AArch64::AEK_RDM, AArch64::AEK_RCPC,
- AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_BF16,
- AArch64::AEK_I8MM, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
- AArch64::AEK_SME, AArch64::AEK_SME2,
- AArch64::AEK_SMEF64F64, AArch64::AEK_SMEI16I64,
- AArch64::AEK_PERFMON}),
- "8.3-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "exynos-m3", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_SHA3, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
+ AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
+ AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH, AArch64::AEK_PERFMON, AArch64::AEK_HCX}),
+ AArch64CPUTestParams("apple-m4", "armv8.7-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_SHA3,
+ AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_LSE, AArch64::AEK_RAS,
+ AArch64::AEK_RDM, AArch64::AEK_RCPC,
+ AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
+ AArch64::AEK_FP16FML, AArch64::AEK_BF16,
+ AArch64::AEK_I8MM, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
+ AArch64::AEK_SME, AArch64::AEK_SME2,
+ AArch64::AEK_SMEF64F64, AArch64::AEK_SMEI16I64,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("exynos-m3", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"exynos-m4", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
- AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"exynos-m5", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
- AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "falkor", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_RDM,
- AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "kryo", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("falkor", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_RDM,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("kryo", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"neoverse-e1", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
- AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RCPC,
- AArch64::AEK_RDM, AArch64::AEK_SIMD, AArch64::AEK_SSBS,
- AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
+ AArch64::AEK_LSE, AArch64::AEK_RAS, AArch64::AEK_RCPC,
+ AArch64::AEK_RDM, AArch64::AEK_SIMD, AArch64::AEK_SSBS,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"neoverse-n1", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
- AArch64::AEK_LSE, AArch64::AEK_PROFILE, AArch64::AEK_RAS,
- AArch64::AEK_RCPC, AArch64::AEK_RDM, AArch64::AEK_SIMD,
- AArch64::AEK_SSBS, AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "neoverse-n2", "armv9-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_FP16,
- AArch64::AEK_RAS, AArch64::AEK_LSE,
- AArch64::AEK_SVE, AArch64::AEK_DOTPROD,
- AArch64::AEK_RCPC, AArch64::AEK_RDM,
- AArch64::AEK_MTE, AArch64::AEK_SSBS,
- AArch64::AEK_SB, AArch64::AEK_SVE2,
- AArch64::AEK_SVE2BITPERM, AArch64::AEK_BF16,
- AArch64::AEK_I8MM, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
- AArch64::AEK_FP16FML, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE, AArch64::AEK_TRBE}),
- "9-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "neoverse-n3", "armv9.2-a", "neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_BF16, AArch64::AEK_I8MM,
- AArch64::AEK_SVE, AArch64::AEK_SVE2,
- AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_SIMD, AArch64::AEK_RCPC,
- AArch64::AEK_RAS, AArch64::AEK_CRC,
- AArch64::AEK_FP, AArch64::AEK_PROFILE,
- AArch64::AEK_MTE, AArch64::AEK_SSBS,
- AArch64::AEK_SB, AArch64::AEK_PREDRES,
- AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
- AArch64::AEK_FLAGM, AArch64::AEK_PERFMON,
- AArch64::AEK_RAND, AArch64::AEK_SVE2BITPERM,
- AArch64::AEK_FP16FML, AArch64::AEK_PROFILE,
- AArch64::AEK_JSCVT, AArch64::AEK_PERFMON,
- AArch64::AEK_ETE, AArch64::AEK_SPE_EEF}),
- "9.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_DOTPROD, AArch64::AEK_FP, AArch64::AEK_FP16,
+ AArch64::AEK_LSE, AArch64::AEK_PROFILE, AArch64::AEK_RAS,
+ AArch64::AEK_RCPC, AArch64::AEK_RDM, AArch64::AEK_SIMD,
+ AArch64::AEK_SSBS, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("neoverse-n2", "armv9-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_FP16,
+ AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_SVE, AArch64::AEK_DOTPROD,
+ AArch64::AEK_RCPC, AArch64::AEK_RDM,
+ AArch64::AEK_MTE, AArch64::AEK_SSBS,
+ AArch64::AEK_SB, AArch64::AEK_SVE2,
+ AArch64::AEK_SVE2BITPERM, AArch64::AEK_BF16,
+ AArch64::AEK_I8MM, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
+ AArch64::AEK_FP16FML, AArch64::AEK_PERFMON,
+ AArch64::AEK_ETE, AArch64::AEK_TRBE}),
+ AArch64CPUTestParams("neoverse-n3", "armv9.2-a", "neon-fp-armv8",
+ {AArch64::AEK_BF16, AArch64::AEK_I8MM,
+ AArch64::AEK_SVE, AArch64::AEK_SVE2,
+ AArch64::AEK_FP16, AArch64::AEK_DOTPROD,
+ AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_SIMD, AArch64::AEK_RCPC,
+ AArch64::AEK_RAS, AArch64::AEK_CRC,
+ AArch64::AEK_FP, AArch64::AEK_PROFILE,
+ AArch64::AEK_MTE, AArch64::AEK_SSBS,
+ AArch64::AEK_SB, AArch64::AEK_PREDRES,
+ AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
+ AArch64::AEK_FLAGM, AArch64::AEK_PERFMON,
+ AArch64::AEK_RAND, AArch64::AEK_SVE2BITPERM,
+ AArch64::AEK_FP16FML, AArch64::AEK_PROFILE,
+ AArch64::AEK_JSCVT, AArch64::AEK_PERFMON,
+ AArch64::AEK_ETE, AArch64::AEK_SPE_EEF}),
+ AArch64CPUTestParams(
"ampere1", "armv8.6-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_FP, AArch64::AEK_FP16,
- AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_LSE,
- AArch64::AEK_RDM, AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
- AArch64::AEK_SHA3, AArch64::AEK_BF16, AArch64::AEK_SHA2,
- AArch64::AEK_AES, AArch64::AEK_I8MM, AArch64::AEK_SSBS,
- AArch64::AEK_SB, AArch64::AEK_RAND, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
- "8.6-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_FP, AArch64::AEK_FP16,
+ AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_RDM, AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
+ AArch64::AEK_SHA3, AArch64::AEK_BF16, AArch64::AEK_SHA2,
+ AArch64::AEK_AES, AArch64::AEK_I8MM, AArch64::AEK_SSBS,
+ AArch64::AEK_SB, AArch64::AEK_RAND, AArch64::AEK_JSCVT,
+ AArch64::AEK_FCMA, AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"ampere1a", "armv8.6-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_FP, AArch64::AEK_FP16,
- AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_LSE,
- AArch64::AEK_RDM, AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
- AArch64::AEK_SM4, AArch64::AEK_SHA3, AArch64::AEK_BF16,
- AArch64::AEK_SHA2, AArch64::AEK_AES, AArch64::AEK_I8MM,
- AArch64::AEK_SSBS, AArch64::AEK_SB, AArch64::AEK_RAND,
- AArch64::AEK_MTE, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
- "8.6-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_FP, AArch64::AEK_FP16,
+ AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_RDM, AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
+ AArch64::AEK_SM4, AArch64::AEK_SHA3, AArch64::AEK_BF16,
+ AArch64::AEK_SHA2, AArch64::AEK_AES, AArch64::AEK_I8MM,
+ AArch64::AEK_SSBS, AArch64::AEK_SB, AArch64::AEK_RAND,
+ AArch64::AEK_MTE, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"ampere1b", "armv8.7-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_FP, AArch64::AEK_FP16,
- AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_LSE,
- AArch64::AEK_RDM, AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
- AArch64::AEK_SM4, AArch64::AEK_SHA3, AArch64::AEK_BF16,
- AArch64::AEK_SHA2, AArch64::AEK_AES, AArch64::AEK_I8MM,
- AArch64::AEK_SSBS, AArch64::AEK_SB, AArch64::AEK_RAND,
- AArch64::AEK_MTE, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_CSSC, AArch64::AEK_PERFMON,
- AArch64::AEK_WFXT}),
- "8.7-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_FP, AArch64::AEK_FP16,
+ AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_RDM, AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
+ AArch64::AEK_SM4, AArch64::AEK_SHA3, AArch64::AEK_BF16,
+ AArch64::AEK_SHA2, AArch64::AEK_AES, AArch64::AEK_I8MM,
+ AArch64::AEK_SSBS, AArch64::AEK_SB, AArch64::AEK_RAND,
+ AArch64::AEK_MTE, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH, AArch64::AEK_CSSC, AArch64::AEK_PERFMON,
+ AArch64::AEK_WFXT}),
+ AArch64CPUTestParams(
"neoverse-512tvb", "armv8.4-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_RAS, AArch64::AEK_SVE,
- AArch64::AEK_SSBS, AArch64::AEK_RCPC,
- AArch64::AEK_CRC, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_RAS,
- AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
- AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_SHA3, AArch64::AEK_SM4,
- AArch64::AEK_FP16, AArch64::AEK_BF16,
- AArch64::AEK_PROFILE, AArch64::AEK_RAND,
- AArch64::AEK_FP16FML, AArch64::AEK_I8MM,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON,
- AArch64::AEK_CCDP}),
- "8.4-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "thunderx2t99", "armv8.1-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_LSE,
- AArch64::AEK_RDM, AArch64::AEK_FP,
- AArch64::AEK_SIMD}),
- "8.1-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "thunderx3t110", "armv8.3-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_LSE, AArch64::AEK_RDM, AArch64::AEK_FP,
- AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_RCPC,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
- AArch64::AEK_PERFMON}),
- "8.3-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "thunderx", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_SIMD,
- AArch64::AEK_FP, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "thunderxt81", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_SIMD,
- AArch64::AEK_FP, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "thunderxt83", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_SIMD,
- AArch64::AEK_FP, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "thunderxt88", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset({AArch64::AEK_CRC, AArch64::AEK_AES,
- AArch64::AEK_SHA2, AArch64::AEK_SIMD,
- AArch64::AEK_FP, AArch64::AEK_PERFMON}),
- "8-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_RAS, AArch64::AEK_SVE, AArch64::AEK_SSBS,
+ AArch64::AEK_RCPC, AArch64::AEK_CRC, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_RDM, AArch64::AEK_RCPC, AArch64::AEK_DOTPROD,
+ AArch64::AEK_AES, AArch64::AEK_SHA2, AArch64::AEK_SHA3,
+ AArch64::AEK_SM4, AArch64::AEK_FP16, AArch64::AEK_BF16,
+ AArch64::AEK_PROFILE, AArch64::AEK_RAND, AArch64::AEK_FP16FML,
+ AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_PAUTH, AArch64::AEK_PERFMON, AArch64::AEK_CCDP}),
+ AArch64CPUTestParams("thunderx2t99", "armv8.1-a",
+ "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_LSE,
+ AArch64::AEK_RDM, AArch64::AEK_FP,
+ AArch64::AEK_SIMD}),
+ AArch64CPUTestParams(
+ "thunderx3t110",
+ "armv8.3-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
+ AArch64::AEK_LSE, AArch64::AEK_RDM, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_RAS, AArch64::AEK_RCPC,
+ AArch64::AEK_JSCVT, AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("thunderx", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_SIMD,
+ AArch64::AEK_FP, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("thunderxt81", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_SIMD,
+ AArch64::AEK_FP, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("thunderxt83", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_SIMD,
+ AArch64::AEK_FP, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("thunderxt88", "armv8-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_SIMD,
+ AArch64::AEK_FP, AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams(
"tsv110", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_SIMD, AArch64::AEK_RAS,
- AArch64::AEK_LSE, AArch64::AEK_RDM, AArch64::AEK_PROFILE,
- AArch64::AEK_JSCVT, AArch64::AEK_FCMA, AArch64::AEK_FP16,
- AArch64::AEK_FP16FML, AArch64::AEK_DOTPROD,
- AArch64::AEK_PERFMON}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "a64fx", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_SIMD, AArch64::AEK_FP16,
- AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_SVE,
- AArch64::AEK_RDM, AArch64::AEK_PERFMON, AArch64::AEK_FCMA}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
- "carmel", "armv8.2-a", "crypto-neon-fp-armv8",
- AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
- AArch64::AEK_FP, AArch64::AEK_SIMD, AArch64::AEK_FP16,
- AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_RDM}),
- "8.2-A"),
- ARMCPUTestParams<AArch64::ExtensionBitset>(
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP, AArch64::AEK_SIMD,
+ AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_PROFILE, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
+ AArch64::AEK_FP16, AArch64::AEK_FP16FML, AArch64::AEK_DOTPROD,
+ AArch64::AEK_PERFMON}),
+ AArch64CPUTestParams("a64fx", "armv8.2-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_FP16,
+ AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_SVE, AArch64::AEK_RDM,
+ AArch64::AEK_PERFMON, AArch64::AEK_FCMA}),
+ AArch64CPUTestParams("carmel", "armv8.2-a", "crypto-neon-fp-armv8",
+ {AArch64::AEK_CRC, AArch64::AEK_AES,
+ AArch64::AEK_SHA2, AArch64::AEK_FP,
+ AArch64::AEK_SIMD, AArch64::AEK_FP16,
+ AArch64::AEK_RAS, AArch64::AEK_LSE,
+ AArch64::AEK_RDM}),
+ AArch64CPUTestParams(
"oryon-1", "armv8.6-a", "crypto-neon-fp-armv8",
- (AArch64::ExtensionBitset(
- {AArch64::AEK_CRC, AArch64::AEK_FP, AArch64::AEK_PAUTH,
- AArch64::AEK_FCMA, AArch64::AEK_JSCVT, AArch64::AEK_SIMD,
- AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_RDM,
- AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_SM4,
- AArch64::AEK_SHA3, AArch64::AEK_BF16, AArch64::AEK_SHA2,
- AArch64::AEK_AES, AArch64::AEK_I8MM, AArch64::AEK_RAND,
- AArch64::AEK_PROFILE, AArch64::AEK_PERFMON})),
- "8.6-A")),
-
- ARMCPUTestParams<AArch64::ExtensionBitset>::PrintToStringParamName);
+ {AArch64::AEK_CRC, AArch64::AEK_FP, AArch64::AEK_PAUTH,
+ AArch64::AEK_FCMA, AArch64::AEK_JSCVT, AArch64::AEK_SIMD,
+ AArch64::AEK_RAS, AArch64::AEK_LSE, AArch64::AEK_RDM,
+ AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_SM4,
+ AArch64::AEK_SHA3, AArch64::AEK_BF16, AArch64::AEK_SHA2,
+ AArch64::AEK_AES, AArch64::AEK_I8MM, AArch64::AEK_RAND,
+ AArch64::AEK_PROFILE, AArch64::AEK_PERFMON})),
+
+ AArch64CPUTestParams::PrintToStringParamName);
// Note: number of CPUs includes aliases.
static constexpr unsigned NumAArch64CPUArchs = 81;
@@ -2148,8 +2011,7 @@ TEST(TargetParserTest, AArch64ExtensionFeatures) {
// NONE has no feature names.
// We return True here because NONE is a valid choice.
- EXPECT_TRUE(AArch64::getExtensionFeatures(
- AArch64::ExtensionBitset({AArch64::AEK_NONE}), Features));
+ EXPECT_TRUE(AArch64::getExtensionFeatures({AArch64::AEK_NONE}, Features));
EXPECT_TRUE(!Features.size());
AArch64::getExtensionFeatures(ExtVal, Features);
>From 94471e6d238acab291b5b652fc18f17c4815cc7d Mon Sep 17 00:00:00 2001
From: Arthur Eubanks <aeubanks at google.com>
Date: Wed, 3 Jul 2024 11:14:49 -0600
Subject: [PATCH 154/246] [MLInliner] Handle CGSCC changes from #94815 (#96274)
With #94815, the nodes belonging to dead functions are no longer
invalidated, but kept around to batch delete at the end of the call
graph walk.
The ML inliner needs to be updated to handle this. This fixes some
asserts getting hit, e.g. https://crbug.com/348376263.
---
llvm/lib/Analysis/MLInlineAdvisor.cpp | 20 +++++++++----------
llvm/test/Transforms/Inline/ML/dead-callee.ll | 17 ++++++++++++++++
2 files changed, 27 insertions(+), 10 deletions(-)
create mode 100644 llvm/test/Transforms/Inline/ML/dead-callee.ll
diff --git a/llvm/lib/Analysis/MLInlineAdvisor.cpp b/llvm/lib/Analysis/MLInlineAdvisor.cpp
index 8131acb3f0df3..b59aa4810005b 100644
--- a/llvm/lib/Analysis/MLInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/MLInlineAdvisor.cpp
@@ -211,14 +211,12 @@ void MLInlineAdvisor::onPassEntry(LazyCallGraph::SCC *CurSCC) {
// care about the nature of the Edge (call or ref). `FunctionLevels`-wise, we
// record them at the same level as the original node (this is a choice, may
// need revisiting).
+ // - nodes are only deleted at the end of a call graph walk where they are
+ // batch deleted, so we shouldn't see any dead nodes here.
while (!NodesInLastSCC.empty()) {
const auto *N = *NodesInLastSCC.begin();
+ assert(!N->isDead());
NodesInLastSCC.erase(N);
- // The Function wrapped by N could have been deleted since we last saw it.
- if (N->isDead()) {
- assert(!N->getFunction().isDeclaration());
- continue;
- }
EdgeCount += getLocalCalls(N->getFunction());
const auto NLevel = FunctionLevels.at(N);
for (const auto &E : *(*N)) {
@@ -256,11 +254,9 @@ void MLInlineAdvisor::onPassExit(LazyCallGraph::SCC *CurSCC) {
EdgesOfLastSeenNodes = 0;
// Check on nodes that were in SCC onPassEntry
- for (auto I = NodesInLastSCC.begin(); I != NodesInLastSCC.end();) {
- if ((*I)->isDead())
- NodesInLastSCC.erase(*I++);
- else
- EdgesOfLastSeenNodes += getLocalCalls((*I++)->getFunction());
+ for (const LazyCallGraph::Node *N : NodesInLastSCC) {
+ assert(!N->isDead());
+ EdgesOfLastSeenNodes += getLocalCalls(N->getFunction());
}
// Check on nodes that may have got added to SCC
@@ -311,8 +307,12 @@ void MLInlineAdvisor::onSuccessfulInlining(const MLInlineAdvice &Advice,
int64_t NewCallerAndCalleeEdges =
getCachedFPI(*Caller).DirectCallsToDefinedFunctions;
+ // A dead function's node is not actually removed from the call graph until
+ // the end of the call graph walk, but the node no longer belongs to any valid
+ // SCC.
if (CalleeWasDeleted) {
--NodeCount;
+ NodesInLastSCC.erase(CG.lookup(*Callee));
DeadFunctions.insert(Callee);
} else {
NewCallerAndCalleeEdges +=
diff --git a/llvm/test/Transforms/Inline/ML/dead-callee.ll b/llvm/test/Transforms/Inline/ML/dead-callee.ll
new file mode 100644
index 0000000000000..a88655777e6f9
--- /dev/null
+++ b/llvm/test/Transforms/Inline/ML/dead-callee.ll
@@ -0,0 +1,17 @@
+; REQUIRES: llvm_inliner_model_autogenerated
+; RUN: opt -passes=inliner-ml-advisor-release -S < %s | FileCheck %s
+
+; Check that our accounting works when a function in a non-trivial SCC is dead.
+
+; CHECK: define void @f
+; CHECK-NOT: @g
+
+define void @f() {
+ call void @g()
+ ret void
+}
+
+define internal void @g() {
+ call void @f()
+ ret void
+}
>From 77d131eddb6ca9060c844fae9cb78779fa70c8f0 Mon Sep 17 00:00:00 2001
From: jimingham <jingham at apple.com>
Date: Wed, 3 Jul 2024 10:39:34 -0700
Subject: [PATCH 155/246] Add the ability for Script based commands to specify
their "repeat command" (#94823)
Among other things, returning an empty string as the repeat command
disables auto-repeat, which can be useful for state-changing commands.
There's one remaining refinement to this setup, which is that for parsed
script commands, it should be possible to change an option value, or add
a new option value that wasn't originally specified, then ask lldb "make
this back into a command string". That would make doing fancy things
with repeat commands easier.
That capability isn't present in the lldb_private side either, however.
So that's for a next iteration.
I haven't added this to the docs on adding commands yet. I wanted to
make sure this was an acceptable approach before I spend the time to do
that.
---
lldb/bindings/python/python-wrapper.swig | 22 +++++++
lldb/docs/use/python-reference.rst | 12 ++++
lldb/examples/python/cmdtemplate.py | 6 +-
lldb/include/lldb/Interpreter/CommandObject.h | 4 ++
.../lldb/Interpreter/ScriptInterpreter.h | 6 ++
.../source/Commands/CommandObjectCommands.cpp | 31 +++++++++-
lldb/source/Commands/CommandObjectThread.cpp | 2 +-
.../Python/SWIGPythonBridge.h | 4 ++
.../Python/ScriptInterpreterPython.cpp | 27 ++++++++
.../Python/ScriptInterpreterPythonImpl.h | 9 ++-
.../script/add/TestAddParsedCommand.py | 61 +++++++++++++++++--
.../command/script/add/test_commands.py | 25 +++++++-
.../Python/PythonTestSuite.cpp | 6 ++
13 files changed, 202 insertions(+), 13 deletions(-)
diff --git a/lldb/bindings/python/python-wrapper.swig b/lldb/bindings/python/python-wrapper.swig
index 7915f7c4b2076..8f050643fa68b 100644
--- a/lldb/bindings/python/python-wrapper.swig
+++ b/lldb/bindings/python/python-wrapper.swig
@@ -728,6 +728,28 @@ bool lldb_private::python::SWIGBridge::LLDBSwigPythonCallCommandObject(
return true;
}
+std::optional<std::string>
+lldb_private::python::SWIGBridge::LLDBSwigPythonGetRepeatCommandForScriptedCommand(PyObject *implementor,
+ std::string &command) {
+ PyErr_Cleaner py_err_cleaner(true);
+
+ PythonObject self(PyRefType::Borrowed, implementor);
+ auto pfunc = self.ResolveName<PythonCallable>("get_repeat_command");
+ // If not implemented, repeat the exact command.
+ if (!pfunc.IsAllocated())
+ return std::nullopt;
+
+ PythonString command_str(command);
+ PythonObject result = pfunc(command_str);
+
+ // A return of None is the equivalent of nullopt - means repeat
+ // the command as is:
+ if (result.IsNone())
+ return std::nullopt;
+
+ return result.Str().GetString().str();
+}
+
#include "lldb/Interpreter/CommandReturnObject.h"
bool lldb_private::python::SWIGBridge::LLDBSwigPythonCallParsedCommandObject(
diff --git a/lldb/docs/use/python-reference.rst b/lldb/docs/use/python-reference.rst
index 795e38fab3794..041e541a96f08 100644
--- a/lldb/docs/use/python-reference.rst
+++ b/lldb/docs/use/python-reference.rst
@@ -562,6 +562,18 @@ which should implement the following interface:
this call should return the short help text for this command[1]
def get_long_help(self):
this call should return the long help text for this command[1]
+ def get_repeat_command(self, command):
+ The auto-repeat command is what will get executed when the user types just
+ a return at the next prompt after this command is run. Even if your command
+ was run because it was specified as a repeat command, that invocation will still
+ get asked for IT'S repeat command, so you can chain a series of repeats, for instance
+ to implement a pager.
+
+ The command argument is the command that is about to be executed.
+
+ If this call returns None, then the ordinary repeat mechanism will be used
+ If this call returns an empty string, then auto-repeat is disabled
+ If this call returns any other string, that will be the repeat command [1]
[1] This method is optional.
diff --git a/lldb/examples/python/cmdtemplate.py b/lldb/examples/python/cmdtemplate.py
index 49a08365268f8..9a96888508b6f 100644
--- a/lldb/examples/python/cmdtemplate.py
+++ b/lldb/examples/python/cmdtemplate.py
@@ -19,7 +19,7 @@ class FrameStatCommand(ParsedCommand):
@classmethod
def register_lldb_command(cls, debugger, module_name):
- ParsedCommandBase.do_register_cmd(cls, debugger, module_name)
+ ParsedCommand.do_register_cmd(cls, debugger, module_name)
print(
'The "{0}" command has been installed, type "help {0}" or "{0} '
'--help" for detailed help.'.format(cls.program)
@@ -72,6 +72,10 @@ def setup_command_definition(self):
default = True,
)
+ def get_repeat_command(self, args):
+ """As an example, make the command not auto-repeat:"""
+ return ""
+
def get_short_help(self):
return "Example command for use in debugging"
diff --git a/lldb/include/lldb/Interpreter/CommandObject.h b/lldb/include/lldb/Interpreter/CommandObject.h
index a641a468b49d2..d48dbcdd5a5da 100644
--- a/lldb/include/lldb/Interpreter/CommandObject.h
+++ b/lldb/include/lldb/Interpreter/CommandObject.h
@@ -297,6 +297,10 @@ class CommandObject : public std::enable_shared_from_this<CommandObject> {
/// \param[in] current_command_args
/// The command arguments.
///
+ /// \param[in] index
+ /// This is for internal use - it is how the completion request is tracked
+ /// in CommandObjectMultiword, and should otherwise be ignored.
+ ///
/// \return
/// std::nullopt if there is no special repeat command - it will use the
/// current command line.
diff --git a/lldb/include/lldb/Interpreter/ScriptInterpreter.h b/lldb/include/lldb/Interpreter/ScriptInterpreter.h
index 14a52709c1e61..05f0d7f0955f3 100644
--- a/lldb/include/lldb/Interpreter/ScriptInterpreter.h
+++ b/lldb/include/lldb/Interpreter/ScriptInterpreter.h
@@ -439,6 +439,12 @@ class ScriptInterpreter : public PluginInterface {
return false;
}
+ virtual std::optional<std::string>
+ GetRepeatCommandForScriptedCommand(StructuredData::GenericSP impl_obj_sp,
+ Args &args) {
+ return std::nullopt;
+ }
+
virtual bool RunScriptFormatKeyword(const char *impl_function,
Process *process, std::string &output,
Status &error) {
diff --git a/lldb/source/Commands/CommandObjectCommands.cpp b/lldb/source/Commands/CommandObjectCommands.cpp
index f4903e373b086..c63445b7c8c86 100644
--- a/lldb/source/Commands/CommandObjectCommands.cpp
+++ b/lldb/source/Commands/CommandObjectCommands.cpp
@@ -1142,6 +1142,15 @@ class CommandObjectScriptingObjectRaw : public CommandObjectRaw {
ScriptedCommandSynchronicity GetSynchronicity() { return m_synchro; }
+ std::optional<std::string> GetRepeatCommand(Args &args,
+ uint32_t index) override {
+ ScriptInterpreter *scripter = GetDebugger().GetScriptInterpreter();
+ if (!scripter)
+ return std::nullopt;
+
+ return scripter->GetRepeatCommandForScriptedCommand(m_cmd_obj_sp, args);
+ }
+
llvm::StringRef GetHelp() override {
if (m_fetched_help_short)
return CommandObjectRaw::GetHelp();
@@ -1588,7 +1597,9 @@ class CommandObjectScriptingObjectParsed : public CommandObjectParsed {
options.ForEach(add_element);
return error;
}
-
+
+ size_t GetNumOptions() { return m_num_options; }
+
private:
struct EnumValueStorage {
EnumValueStorage() {
@@ -1827,6 +1838,15 @@ class CommandObjectScriptingObjectParsed : public CommandObjectParsed {
ScriptedCommandSynchronicity GetSynchronicity() { return m_synchro; }
+ std::optional<std::string> GetRepeatCommand(Args &args,
+ uint32_t index) override {
+ ScriptInterpreter *scripter = GetDebugger().GetScriptInterpreter();
+ if (!scripter)
+ return std::nullopt;
+
+ return scripter->GetRepeatCommandForScriptedCommand(m_cmd_obj_sp, args);
+ }
+
llvm::StringRef GetHelp() override {
if (m_fetched_help_short)
return CommandObjectParsed::GetHelp();
@@ -1857,9 +1877,14 @@ class CommandObjectScriptingObjectParsed : public CommandObjectParsed {
SetHelpLong(docstring);
return CommandObjectParsed::GetHelpLong();
}
-
- Options *GetOptions() override { return &m_options; }
+ Options *GetOptions() override {
+ // CommandObjectParsed requires that a command with no options return
+ // nullptr.
+ if (m_options.GetNumOptions() == 0)
+ return nullptr;
+ return &m_options;
+ }
protected:
void DoExecute(Args &args,
diff --git a/lldb/source/Commands/CommandObjectThread.cpp b/lldb/source/Commands/CommandObjectThread.cpp
index 5e64dd2f8f084..4398cf3c3b89e 100644
--- a/lldb/source/Commands/CommandObjectThread.cpp
+++ b/lldb/source/Commands/CommandObjectThread.cpp
@@ -132,7 +132,7 @@ class CommandObjectThreadBacktrace : public CommandObjectIterateOverThreads {
Options *GetOptions() override { return &m_options; }
std::optional<std::string> GetRepeatCommand(Args ¤t_args,
- uint32_t idx) override {
+ uint32_t index) override {
llvm::StringRef count_opt("--count");
llvm::StringRef start_opt("--start");
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h b/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
index 95eb5a782097b..3026b6113ae8f 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
@@ -206,6 +206,10 @@ class SWIGBridge {
lldb_private::CommandReturnObject &cmd_retobj,
lldb::ExecutionContextRefSP exe_ctx_ref_sp);
+ static std::optional<std::string>
+ LLDBSwigPythonGetRepeatCommandForScriptedCommand(PyObject *implementor,
+ std::string &command);
+
static bool LLDBSwigPythonCallModuleInit(const char *python_module_name,
const char *session_dictionary_name,
lldb::DebuggerSP debugger);
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
index 70c9f94754418..70fa6d83e306f 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
@@ -2708,6 +2708,33 @@ bool ScriptInterpreterPythonImpl::RunScriptBasedParsedCommand(
return ret_val;
}
+std::optional<std::string>
+ScriptInterpreterPythonImpl::GetRepeatCommandForScriptedCommand(
+ StructuredData::GenericSP impl_obj_sp, Args &args) {
+ if (!impl_obj_sp || !impl_obj_sp->IsValid())
+ return std::nullopt;
+
+ lldb::DebuggerSP debugger_sp = m_debugger.shared_from_this();
+
+ if (!debugger_sp.get())
+ return std::nullopt;
+
+ std::optional<std::string> ret_val;
+
+ {
+ Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN,
+ Locker::FreeLock);
+
+ StructuredData::ArraySP args_arr_sp(new StructuredData::Array());
+
+ // For scripting commands, we send the command string:
+ std::string command;
+ args.GetQuotedCommandString(command);
+ ret_val = SWIGBridge::LLDBSwigPythonGetRepeatCommandForScriptedCommand(
+ static_cast<PyObject *>(impl_obj_sp->GetValue()), command);
+ }
+ return ret_val;
+}
/// In Python, a special attribute __doc__ contains the docstring for an object
/// (function, method, class, ...) if any is defined Otherwise, the attribute's
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
index fa23540534738..c2024efb395d7 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
@@ -160,13 +160,16 @@ class ScriptInterpreterPythonImpl : public ScriptInterpreterPython {
lldb_private::CommandReturnObject &cmd_retobj, Status &error,
const lldb_private::ExecutionContext &exe_ctx) override;
- virtual bool RunScriptBasedParsedCommand(
- StructuredData::GenericSP impl_obj_sp, Args& args,
+ bool RunScriptBasedParsedCommand(
+ StructuredData::GenericSP impl_obj_sp, Args &args,
ScriptedCommandSynchronicity synchronicity,
lldb_private::CommandReturnObject &cmd_retobj, Status &error,
const lldb_private::ExecutionContext &exe_ctx) override;
-
+ std::optional<std::string>
+ GetRepeatCommandForScriptedCommand(StructuredData::GenericSP impl_obj_sp,
+ Args &args) override;
+
Status GenerateFunction(const char *signature, const StringList &input,
bool is_callback) override;
diff --git a/lldb/test/API/commands/command/script/add/TestAddParsedCommand.py b/lldb/test/API/commands/command/script/add/TestAddParsedCommand.py
index d30b0b67124ed..c7680e9bb7f41 100644
--- a/lldb/test/API/commands/command/script/add/TestAddParsedCommand.py
+++ b/lldb/test/API/commands/command/script/add/TestAddParsedCommand.py
@@ -16,6 +16,11 @@ class ParsedCommandTestCase(TestBase):
def test(self):
self.pycmd_tests()
+ def setUp(self):
+ TestBase.setUp(self)
+ self.stdin_path = self.getBuildArtifact("stdin.txt")
+ self.stdout_path = self.getBuildArtifact("stdout.txt")
+
def check_help_options(self, cmd_name, opt_list, substrs=[]):
"""
Pass the command name in cmd_name and a vector of the short option, type & long option.
@@ -29,9 +34,40 @@ def check_help_options(self, cmd_name, opt_list, substrs=[]):
else:
(short_opt, type, long_opt) = elem
substrs.append(f"-{short_opt} <{type}> ( --{long_opt} <{type}> )")
- print(f"Opt Vec\n{substrs}")
self.expect("help " + cmd_name, substrs=substrs)
+ def run_one_repeat(self, commands, expected_num_errors):
+ with open(self.stdin_path, "w") as input_handle:
+ input_handle.write(commands)
+
+ in_fileH = open(self.stdin_path, "r")
+ self.dbg.SetInputFileHandle(in_fileH, False)
+
+ out_fileH = open(self.stdout_path, "w")
+ self.dbg.SetOutputFileHandle(out_fileH, False)
+ self.dbg.SetErrorFileHandle(out_fileH, False)
+
+ options = lldb.SBCommandInterpreterRunOptions()
+ options.SetEchoCommands(False)
+ options.SetPrintResults(True)
+ options.SetPrintErrors(True)
+ options.SetAllowRepeats(True)
+
+ n_errors, quit_requested, has_crashed = self.dbg.RunCommandInterpreter(
+ True, False, options, 0, False, False
+ )
+
+ in_fileH.close()
+ out_fileH.close()
+
+ results = None
+ with open(self.stdout_path, "r") as out_fileH:
+ results = out_fileH.read()
+
+ self.assertEqual(n_errors, expected_num_errors)
+
+ return results
+
def pycmd_tests(self):
source_dir = self.getSourceDir()
test_file_path = os.path.join(source_dir, "test_commands.py")
@@ -168,9 +204,6 @@ def cleanup():
num_completions = interp.HandleCompletionWithDescriptions(
cmd_str, len(cmd_str) - 1, 0, 1000, matches, descriptions
)
- print(
- f"First: {matches.GetStringAtIndex(0)}\nSecond: {matches.GetStringAtIndex(1)}\nThird: {matches.GetStringAtIndex(2)}"
- )
self.assertEqual(num_completions, 1, "Only one completion for source file")
self.assertEqual(matches.GetSize(), 2, "The first element is the complete line")
self.assertEqual(
@@ -197,3 +230,23 @@ def cleanup():
"two-args 'First Argument' 'Second Argument'",
substrs=["0: First Argument", "1: Second Argument"],
)
+
+ # Now make sure get_repeat_command works properly:
+
+ # no-args turns off auto-repeat
+ results = self.run_one_repeat("no-args\n\n", 1)
+ self.assertIn("No auto repeat", results, "Got auto-repeat error")
+
+ # one-args does the normal repeat
+ results = self.run_one_repeat("one-arg-no-opt ONE_ARG\n\n", 0)
+ self.assertEqual(results.count("ONE_ARG"), 2, "We did a normal repeat")
+
+ # two-args adds an argument:
+ results = self.run_one_repeat("two-args FIRST_ARG SECOND_ARG\n\n", 0)
+ self.assertEqual(
+ results.count("FIRST_ARG"), 2, "Passed first arg to both commands"
+ )
+ self.assertEqual(
+ results.count("SECOND_ARG"), 2, "Passed second arg to both commands"
+ )
+ self.assertEqual(results.count("THIRD_ARG"), 1, "Passed third arg in repeat")
diff --git a/lldb/test/API/commands/command/script/add/test_commands.py b/lldb/test/API/commands/command/script/add/test_commands.py
index 68f5a44556366..fcde6cd3ef6dc 100644
--- a/lldb/test/API/commands/command/script/add/test_commands.py
+++ b/lldb/test/API/commands/command/script/add/test_commands.py
@@ -32,6 +32,12 @@ def __call__(self, debugger, args_array, exe_ctx, result):
)
+# Use these to make sure that get_repeat_command sends the right
+# command.
+no_args_repeat = None
+one_arg_repeat = None
+two_arg_repeat = None
+
class NoArgsCommand(ReportingCmd):
program = "no-args"
@@ -96,6 +102,12 @@ def setup_command_definition(self):
default="foo",
)
+ def get_repeat_command(self, command):
+ # No auto-repeat
+ global no_args_repeat
+ no_args_repeat = command
+ return ""
+
def get_short_help(self):
return "Example command for use in debugging"
@@ -118,6 +130,12 @@ def setup_command_definition(self):
[self.ov_parser.make_argument_element(lldb.eArgTypeSourceFile, "plain")]
)
+ def get_repeat_command(self, command):
+ # Repeat the current command
+ global one_arg_repeat
+ one_arg_repeat = command
+ return None
+
def get_short_help(self):
return "Example command for use in debugging"
@@ -187,8 +205,13 @@ def setup_command_definition(self):
]
)
+ def get_repeat_command(self, command):
+ global two_arg_repeat
+ two_arg_repeat = command
+ return command + " THIRD_ARG"
+
def get_short_help(self):
- return "Example command for use in debugging"
+ return "This is my short help string"
def get_long_help(self):
return self.help_string
diff --git a/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp b/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp
index 017953b372e3e..0edde54d310fd 100644
--- a/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp
+++ b/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp
@@ -200,6 +200,12 @@ bool lldb_private::python::SWIGBridge::LLDBSwigPythonCallParsedCommandObject(
return false;
}
+std::optional<std::string>
+LLDBSwigPythonGetRepeatCommandForScriptedCommand(PyObject *implementor,
+ std::string &command) {
+ return std::nullopt;
+}
+
bool lldb_private::python::SWIGBridge::LLDBSwigPythonCallModuleInit(
const char *python_module_name, const char *session_dictionary_name,
lldb::DebuggerSP debugger) {
>From 845dee36ba4161df153ba05009cea615e20eda5a Mon Sep 17 00:00:00 2001
From: jimingham <jingham at apple.com>
Date: Wed, 3 Jul 2024 10:45:20 -0700
Subject: [PATCH 156/246] SBThread::StepInstruction shouldn't discard other
plans (#97493)
This was just a typo, none of the external execution control functions
should discard other plans. In particular, it means if you stop in a
hand-called function and step an instruction, the function call thread
plan gets unshipped, popping all the function call frames.
I also added a test that asserts the correct behavior. I tested all the
stepping operations even though only StepInstruction was wrong.
---
lldb/source/API/SBThread.cpp | 2 +-
.../API/python_api/thread/TestThreadAPI.py | 51 +++++++++++++++++++
lldb/test/API/python_api/thread/main.cpp | 10 ++++
3 files changed, 62 insertions(+), 1 deletion(-)
diff --git a/lldb/source/API/SBThread.cpp b/lldb/source/API/SBThread.cpp
index ac3e2cd25daa9..53643362421d4 100644
--- a/lldb/source/API/SBThread.cpp
+++ b/lldb/source/API/SBThread.cpp
@@ -722,7 +722,7 @@ void SBThread::StepInstruction(bool step_over, SBError &error) {
Thread *thread = exe_ctx.GetThreadPtr();
Status new_plan_status;
ThreadPlanSP new_plan_sp(thread->QueueThreadPlanForStepSingleInstruction(
- step_over, true, true, new_plan_status));
+ step_over, false, true, new_plan_status));
if (new_plan_status.Success())
error = ResumeNewPlan(exe_ctx, new_plan_sp.get());
diff --git a/lldb/test/API/python_api/thread/TestThreadAPI.py b/lldb/test/API/python_api/thread/TestThreadAPI.py
index 0fe91c88c325e..d5fc77532d859 100644
--- a/lldb/test/API/python_api/thread/TestThreadAPI.py
+++ b/lldb/test/API/python_api/thread/TestThreadAPI.py
@@ -52,6 +52,11 @@ def test_negative_indexing(self):
self.build()
self.validate_negative_indexing()
+ def test_StepInstruction(self):
+ """Test that StepInstruction preserves the plan stack."""
+ self.build()
+ self.step_instruction_in_called_function()
+
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
@@ -303,3 +308,49 @@ def validate_negative_indexing(self):
neg_range = range(thread.num_frames, 0, -1)
for pos, neg in zip(pos_range, neg_range):
self.assertEqual(thread.frame[pos].idx, thread.frame[-neg].idx)
+
+ def step_instruction_in_called_function(self):
+ main_file_spec = lldb.SBFileSpec("main.cpp")
+ target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(
+ self, "Set break point at this line", main_file_spec
+ )
+ options = lldb.SBExpressionOptions()
+ options.SetIgnoreBreakpoints(False)
+
+ call_me_bkpt = target.BreakpointCreateBySourceRegex(
+ "Set a breakpoint in call_me", main_file_spec
+ )
+ self.assertGreater(
+ call_me_bkpt.GetNumLocations(), 0, "Got at least one location in call_me"
+ )
+ # Now run the expression, this will fail because we stopped at a breakpoint:
+ self.runCmd("expr -i 0 -- call_me(true)", check=False)
+ # Now we should be stopped in call_me:
+ self.assertEqual(
+ thread.frames[0].name, "call_me(bool)", "Stopped in call_me(bool)"
+ )
+ # Now do a various API steps. These should not cause the expression context to get unshipped:
+ thread.StepInstruction(False)
+ self.assertEqual(
+ thread.frames[0].name,
+ "call_me(bool)",
+ "Still in call_me(bool) after StepInstruction",
+ )
+ thread.StepInstruction(True)
+ self.assertEqual(
+ thread.frames[0].name,
+ "call_me(bool)",
+ "Still in call_me(bool) after NextInstruction",
+ )
+ thread.StepInto()
+ self.assertEqual(
+ thread.frames[0].name,
+ "call_me(bool)",
+ "Still in call_me(bool) after StepInto",
+ )
+ thread.StepOver(False)
+ self.assertEqual(
+ thread.frames[0].name,
+ "call_me(bool)",
+ "Still in call_me(bool) after StepOver",
+ )
diff --git a/lldb/test/API/python_api/thread/main.cpp b/lldb/test/API/python_api/thread/main.cpp
index dde740a1b6bf6..d4b0ad2372c3d 100644
--- a/lldb/test/API/python_api/thread/main.cpp
+++ b/lldb/test/API/python_api/thread/main.cpp
@@ -5,8 +5,18 @@
char my_char = 'u';
int my_int = 0;
+void
+call_me(bool should_spin) {
+ int counter = 0;
+ if (should_spin) {
+ while (1)
+ counter++; // Set a breakpoint in call_me
+ }
+}
+
int main (int argc, char const *argv[])
{
+ call_me(false);
for (int i = 0; i < 3; ++i) {
printf("my_char='%c'\n", my_char);
++my_char;
>From 9e6b46a9846cf5051c2aaef361af0fe1a76c856e Mon Sep 17 00:00:00 2001
From: David Truby <david.truby at arm.com>
Date: Wed, 3 Jul 2024 18:49:42 +0100
Subject: [PATCH 157/246] [flang] Implement -mcmodel flag (#95411)
This patch implements the -mcmodel flag from clang, allowing the Code
Model to be changed for the LLVM module. The same set of mcmodel
flags are accepted as in clang and the same Code Model attributes are
added to the LLVM module for those flags.
Also add `-mlarge-data-threshold` for x86-64, which is automatically set
by the shared command-line code (see below). This is also added as an
attribute into the LLVM module and on the target machine.
A function is created for `addMCModel` that is copied out of clang's
argument handling so that it can be shared with flang.
---------
Co-authored-by: Mats Petersson <mats.petersson at arm.com>
---
clang/include/clang/Driver/Options.td | 4 +-
clang/lib/Driver/ToolChains/Clang.cpp | 76 +----------------
clang/lib/Driver/ToolChains/CommonArgs.cpp | 81 +++++++++++++++++++
clang/lib/Driver/ToolChains/CommonArgs.h | 4 +
clang/lib/Driver/ToolChains/Flang.cpp | 5 ++
flang/include/flang/Frontend/CodeGenOptions.h | 9 +++
flang/lib/Frontend/CodeGenOptions.cpp | 11 +++
flang/lib/Frontend/CompilerInstance.cpp | 10 ++-
flang/lib/Frontend/CompilerInvocation.cpp | 24 ++++++
flang/lib/Frontend/FrontendActions.cpp | 12 +++
flang/test/Driver/large-data-threshold.f90 | 18 +++++
flang/test/Driver/mcmodel.f90 | 44 ++++++++++
flang/test/Lower/large-data-threshold.f90 | 21 +++++
flang/test/Lower/mcmodel.f90 | 16 ++++
14 files changed, 257 insertions(+), 78 deletions(-)
create mode 100644 flang/test/Driver/large-data-threshold.f90
create mode 100644 flang/test/Driver/mcmodel.f90
create mode 100644 flang/test/Lower/large-data-threshold.f90
create mode 100644 flang/test/Lower/mcmodel.f90
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 1ede75d3782cd..af5ed95510ceb 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -4612,10 +4612,10 @@ def inline_asm_EQ : Joined<["-"], "inline-asm=">, Group<m_Group>,
NormalizedValuesScope<"CodeGenOptions">, NormalizedValues<["IAD_ATT", "IAD_Intel"]>,
MarshallingInfoEnum<CodeGenOpts<"InlineAsmDialect">, "IAD_ATT">;
def mcmodel_EQ : Joined<["-"], "mcmodel=">, Group<m_Group>,
- Visibility<[ClangOption, CC1Option]>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
MarshallingInfoString<TargetOpts<"CodeModel">, [{"default"}]>;
def mlarge_data_threshold_EQ : Joined<["-"], "mlarge-data-threshold=">, Group<m_Group>,
- Flags<[TargetSpecific]>, Visibility<[ClangOption, CC1Option]>,
+ Flags<[TargetSpecific]>, Visibility<[ClangOption, CC1Option,FlangOption, FC1Option]>,
MarshallingInfoInt<TargetOpts<"LargeDataThreshold">, "0">;
def mtls_size_EQ : Joined<["-"], "mtls-size=">, Group<m_Group>,
Visibility<[ClangOption, CC1Option]>,
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index 27c451c565f2b..cf1767a1f644f 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -5909,81 +5909,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
TC.addClangTargetOptions(Args, CmdArgs, JA.getOffloadingDeviceKind());
- if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
- StringRef CM = A->getValue();
- bool Ok = false;
- if (Triple.isOSAIX() && CM == "medium")
- CM = "large";
- if (Triple.isAArch64(64)) {
- Ok = CM == "tiny" || CM == "small" || CM == "large";
- if (CM == "large" && !Triple.isOSBinFormatMachO() &&
- RelocationModel != llvm::Reloc::Static)
- D.Diag(diag::err_drv_argument_only_allowed_with)
- << A->getAsString(Args) << "-fno-pic";
- } else if (Triple.isLoongArch()) {
- if (CM == "extreme" &&
- Args.hasFlagNoClaim(options::OPT_fplt, options::OPT_fno_plt, false))
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << A->getAsString(Args) << "-fplt";
- Ok = CM == "normal" || CM == "medium" || CM == "extreme";
- // Convert to LLVM recognizable names.
- if (Ok)
- CM = llvm::StringSwitch<StringRef>(CM)
- .Case("normal", "small")
- .Case("extreme", "large")
- .Default(CM);
- } else if (Triple.isPPC64() || Triple.isOSAIX()) {
- Ok = CM == "small" || CM == "medium" || CM == "large";
- } else if (Triple.isRISCV()) {
- if (CM == "medlow")
- CM = "small";
- else if (CM == "medany")
- CM = "medium";
- Ok = CM == "small" || CM == "medium";
- } else if (Triple.getArch() == llvm::Triple::x86_64) {
- Ok = llvm::is_contained({"small", "kernel", "medium", "large", "tiny"},
- CM);
- } else if (Triple.isNVPTX() || Triple.isAMDGPU() || Triple.isSPIRV()) {
- // NVPTX/AMDGPU/SPIRV does not care about the code model and will accept
- // whatever works for the host.
- Ok = true;
- } else if (Triple.isSPARC64()) {
- if (CM == "medlow")
- CM = "small";
- else if (CM == "medmid")
- CM = "medium";
- else if (CM == "medany")
- CM = "large";
- Ok = CM == "small" || CM == "medium" || CM == "large";
- }
- if (Ok) {
- CmdArgs.push_back(Args.MakeArgString("-mcmodel=" + CM));
- } else {
- D.Diag(diag::err_drv_unsupported_option_argument_for_target)
- << A->getSpelling() << CM << TripleStr;
- }
- }
-
- if (Triple.getArch() == llvm::Triple::x86_64) {
- bool IsMediumCM = false;
- bool IsLargeCM = false;
- if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
- IsMediumCM = StringRef(A->getValue()) == "medium";
- IsLargeCM = StringRef(A->getValue()) == "large";
- }
- if (Arg *A = Args.getLastArg(options::OPT_mlarge_data_threshold_EQ)) {
- if (!IsMediumCM && !IsLargeCM) {
- D.Diag(diag::warn_drv_large_data_threshold_invalid_code_model)
- << A->getOption().getRenderName();
- } else {
- A->render(Args, CmdArgs);
- }
- } else if (IsMediumCM) {
- CmdArgs.push_back("-mlarge-data-threshold=65536");
- } else if (IsLargeCM) {
- CmdArgs.push_back("-mlarge-data-threshold=0");
- }
- }
+ addMCModel(D, Args, Triple, RelocationModel, CmdArgs);
if (Arg *A = Args.getLastArg(options::OPT_mtls_size_EQ)) {
StringRef Value = A->getValue();
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index be4d7d2cffb16..2cb152f77e501 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -2825,3 +2825,84 @@ void tools::addOffloadCompressArgs(const llvm::opt::ArgList &TCArgs,
CmdArgs.push_back(
TCArgs.MakeArgString(Twine("-compression-level=") + Arg->getValue()));
}
+
+void tools::addMCModel(const Driver &D, const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple,
+ const llvm::Reloc::Model &RelocationModel,
+ llvm::opt::ArgStringList &CmdArgs) {
+ if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
+ StringRef CM = A->getValue();
+ bool Ok = false;
+ if (Triple.isOSAIX() && CM == "medium")
+ CM = "large";
+ if (Triple.isAArch64(64)) {
+ Ok = CM == "tiny" || CM == "small" || CM == "large";
+ if (CM == "large" && !Triple.isOSBinFormatMachO() &&
+ RelocationModel != llvm::Reloc::Static)
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-fno-pic";
+ } else if (Triple.isLoongArch()) {
+ if (CM == "extreme" &&
+ Args.hasFlagNoClaim(options::OPT_fplt, options::OPT_fno_plt, false))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-fplt";
+ Ok = CM == "normal" || CM == "medium" || CM == "extreme";
+ // Convert to LLVM recognizable names.
+ if (Ok)
+ CM = llvm::StringSwitch<StringRef>(CM)
+ .Case("normal", "small")
+ .Case("extreme", "large")
+ .Default(CM);
+ } else if (Triple.isPPC64() || Triple.isOSAIX()) {
+ Ok = CM == "small" || CM == "medium" || CM == "large";
+ } else if (Triple.isRISCV()) {
+ if (CM == "medlow")
+ CM = "small";
+ else if (CM == "medany")
+ CM = "medium";
+ Ok = CM == "small" || CM == "medium";
+ } else if (Triple.getArch() == llvm::Triple::x86_64) {
+ Ok = llvm::is_contained({"small", "kernel", "medium", "large", "tiny"},
+ CM);
+ } else if (Triple.isNVPTX() || Triple.isAMDGPU() || Triple.isSPIRV()) {
+ // NVPTX/AMDGPU/SPIRV does not care about the code model and will accept
+ // whatever works for the host.
+ Ok = true;
+ } else if (Triple.isSPARC64()) {
+ if (CM == "medlow")
+ CM = "small";
+ else if (CM == "medmid")
+ CM = "medium";
+ else if (CM == "medany")
+ CM = "large";
+ Ok = CM == "small" || CM == "medium" || CM == "large";
+ }
+ if (Ok) {
+ CmdArgs.push_back(Args.MakeArgString("-mcmodel=" + CM));
+ } else {
+ D.Diag(diag::err_drv_unsupported_option_argument_for_target)
+ << A->getSpelling() << CM << Triple.getTriple();
+ }
+ }
+
+ if (Triple.getArch() == llvm::Triple::x86_64) {
+ bool IsMediumCM = false;
+ bool IsLargeCM = false;
+ if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
+ IsMediumCM = StringRef(A->getValue()) == "medium";
+ IsLargeCM = StringRef(A->getValue()) == "large";
+ }
+ if (Arg *A = Args.getLastArg(options::OPT_mlarge_data_threshold_EQ)) {
+ if (!IsMediumCM && !IsLargeCM) {
+ D.Diag(diag::warn_drv_large_data_threshold_invalid_code_model)
+ << A->getOption().getRenderName();
+ } else {
+ A->render(Args, CmdArgs);
+ }
+ } else if (IsMediumCM) {
+ CmdArgs.push_back("-mlarge-data-threshold=65536");
+ } else if (IsLargeCM) {
+ CmdArgs.push_back("-mlarge-data-threshold=0");
+ }
+ }
+}
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.h b/clang/lib/Driver/ToolChains/CommonArgs.h
index 5581905db3114..52818ecde924b 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -223,6 +223,10 @@ void addOutlineAtomicsArgs(const Driver &D, const ToolChain &TC,
const llvm::Triple &Triple);
void addOffloadCompressArgs(const llvm::opt::ArgList &TCArgs,
llvm::opt::ArgStringList &CmdArgs);
+void addMCModel(const Driver &D, const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple,
+ const llvm::Reloc::Model &RelocationModel,
+ llvm::opt::ArgStringList &CmdArgs);
} // end namespace tools
} // end namespace driver
diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp
index 42b45dba2bd31..962a6c2c6b298 100644
--- a/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/clang/lib/Driver/ToolChains/Flang.cpp
@@ -735,6 +735,11 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
// Add target args, features, etc.
addTargetOptions(Args, CmdArgs);
+ llvm::Reloc::Model RelocationModel =
+ std::get<0>(ParsePICArgs(getToolChain(), Args));
+ // Add MCModel information
+ addMCModel(D, Args, Triple, RelocationModel, CmdArgs);
+
// Add Codegen options
addCodegenOptions(Args, CmdArgs);
diff --git a/flang/include/flang/Frontend/CodeGenOptions.h b/flang/include/flang/Frontend/CodeGenOptions.h
index 3bc5d93c4c43e..ac7fcbcba4f74 100644
--- a/flang/include/flang/Frontend/CodeGenOptions.h
+++ b/flang/include/flang/Frontend/CodeGenOptions.h
@@ -133,6 +133,13 @@ class CodeGenOptions : public CodeGenOptionsBase {
/// transformation.
OptRemark OptimizationRemarkAnalysis;
+ /// The code model to use (-mcmodel).
+ std::string CodeModel;
+
+ /// The code model-specific large data threshold to use
+ /// (-mlarge-data-threshold).
+ uint64_t LargeDataThreshold;
+
// Define accessors/mutators for code generation options of enumeration type.
#define CODEGENOPT(Name, Bits, Default)
#define ENUM_CODEGENOPT(Name, Type, Bits, Default) \
@@ -143,6 +150,8 @@ class CodeGenOptions : public CodeGenOptionsBase {
CodeGenOptions();
};
+std::optional<llvm::CodeModel::Model> getCodeModel(llvm::StringRef string);
+
} // end namespace Fortran::frontend
#endif // FORTRAN_FRONTEND_CODEGENOPTIONS_H
diff --git a/flang/lib/Frontend/CodeGenOptions.cpp b/flang/lib/Frontend/CodeGenOptions.cpp
index a7947182decce..8a9d3c27c8bc3 100644
--- a/flang/lib/Frontend/CodeGenOptions.cpp
+++ b/flang/lib/Frontend/CodeGenOptions.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "flang/Frontend/CodeGenOptions.h"
+#include <optional>
#include <string.h>
namespace Fortran::frontend {
@@ -21,4 +22,14 @@ CodeGenOptions::CodeGenOptions() {
#include "flang/Frontend/CodeGenOptions.def"
}
+std::optional<llvm::CodeModel::Model> getCodeModel(llvm::StringRef string) {
+ return llvm::StringSwitch<std::optional<llvm::CodeModel::Model>>(string)
+ .Case("tiny", llvm::CodeModel::Model::Tiny)
+ .Case("small", llvm::CodeModel::Model::Small)
+ .Case("kernel", llvm::CodeModel::Model::Kernel)
+ .Case("medium", llvm::CodeModel::Model::Medium)
+ .Case("large", llvm::CodeModel::Model::Large)
+ .Default(std::nullopt);
+}
+
} // end namespace Fortran::frontend
diff --git a/flang/lib/Frontend/CompilerInstance.cpp b/flang/lib/Frontend/CompilerInstance.cpp
index c78137346640a..27c36b7f84d89 100644
--- a/flang/lib/Frontend/CompilerInstance.cpp
+++ b/flang/lib/Frontend/CompilerInstance.cpp
@@ -321,11 +321,19 @@ bool CompilerInstance::setUpTargetMachine() {
assert(OptLevelOrNone && "Invalid optimization level!");
llvm::CodeGenOptLevel OptLevel = *OptLevelOrNone;
std::string featuresStr = getTargetFeatures();
+ std::optional<llvm::CodeModel::Model> cm = getCodeModel(CGOpts.CodeModel);
targetMachine.reset(theTarget->createTargetMachine(
theTriple, /*CPU=*/targetOpts.cpu,
/*Features=*/featuresStr, llvm::TargetOptions(),
/*Reloc::Model=*/CGOpts.getRelocationModel(),
- /*CodeModel::Model=*/std::nullopt, OptLevel));
+ /*CodeModel::Model=*/cm, OptLevel));
assert(targetMachine && "Failed to create TargetMachine");
+ if (cm.has_value()) {
+ const llvm::Triple triple(theTriple);
+ if ((cm == llvm::CodeModel::Medium || cm == llvm::CodeModel::Large) &&
+ triple.getArch() == llvm::Triple::x86_64) {
+ targetMachine->setLargeDataThreshold(CGOpts.LargeDataThreshold);
+ }
+ }
return true;
}
diff --git a/flang/lib/Frontend/CompilerInvocation.cpp b/flang/lib/Frontend/CompilerInvocation.cpp
index f96d72f1ad691..e2d60ad46f14f 100644
--- a/flang/lib/Frontend/CompilerInvocation.cpp
+++ b/flang/lib/Frontend/CompilerInvocation.cpp
@@ -32,6 +32,7 @@
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptTable.h"
+#include "llvm/Support/CodeGen.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/Path.h"
@@ -386,6 +387,29 @@ static void parseCodeGenArgs(Fortran::frontend::CodeGenOptions &opts,
opts.IsPIE = 1;
}
+ // -mcmodel option.
+ if (const llvm::opt::Arg *a =
+ args.getLastArg(clang::driver::options::OPT_mcmodel_EQ)) {
+ llvm::StringRef modelName = a->getValue();
+ std::optional<llvm::CodeModel::Model> codeModel = getCodeModel(modelName);
+
+ if (codeModel.has_value())
+ opts.CodeModel = modelName;
+ else
+ diags.Report(clang::diag::err_drv_invalid_value)
+ << a->getAsString(args) << modelName;
+ }
+
+ if (const llvm::opt::Arg *arg = args.getLastArg(
+ clang::driver::options::OPT_mlarge_data_threshold_EQ)) {
+ uint64_t LDT;
+ if (llvm::StringRef(arg->getValue()).getAsInteger(/*Radix=*/10, LDT)) {
+ diags.Report(clang::diag::err_drv_invalid_value)
+ << arg->getSpelling() << arg->getValue();
+ }
+ opts.LargeDataThreshold = LDT;
+ }
+
// This option is compatible with -f[no-]underscoring in gfortran.
if (args.hasFlag(clang::driver::options::OPT_fno_underscoring,
clang::driver::options::OPT_funderscoring, false)) {
diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp
index 24db4df31279a..a85ecd1ac71b3 100644
--- a/flang/lib/Frontend/FrontendActions.cpp
+++ b/flang/lib/Frontend/FrontendActions.cpp
@@ -866,6 +866,17 @@ void CodeGenAction::generateLLVMIR() {
llvmModule->setPIELevel(
static_cast<llvm::PIELevel::Level>(opts.PICLevel));
}
+
+ // Set mcmodel level LLVM module flags
+ std::optional<llvm::CodeModel::Model> cm = getCodeModel(opts.CodeModel);
+ if (cm.has_value()) {
+ const llvm::Triple triple(ci.getInvocation().getTargetOpts().triple);
+ llvmModule->setCodeModel(*cm);
+ if ((cm == llvm::CodeModel::Medium || cm == llvm::CodeModel::Large) &&
+ triple.getArch() == llvm::Triple::x86_64) {
+ llvmModule->setLargeDataThreshold(opts.LargeDataThreshold);
+ }
+ }
}
static std::unique_ptr<llvm::raw_pwrite_stream>
@@ -1280,6 +1291,7 @@ void CodeGenAction::executeAction() {
// and the command-line target option if specified, or the default if not
// given on the command-line).
llvm::TargetMachine &targetMachine = ci.getTargetMachine();
+
const std::string &theTriple = targetMachine.getTargetTriple().str();
if (llvmModule->getTargetTriple() != theTriple) {
diff --git a/flang/test/Driver/large-data-threshold.f90 b/flang/test/Driver/large-data-threshold.f90
new file mode 100644
index 0000000000000..320566c4b2e43
--- /dev/null
+++ b/flang/test/Driver/large-data-threshold.f90
@@ -0,0 +1,18 @@
+! RUN: %flang -### -c --target=x86_64 -mcmodel=large -mlarge-data-threshold=32768 %s 2>&1 | FileCheck %s
+! RUN: %flang -### -c --target=x86_64 -mcmodel=large -mlarge-data-threshold=59000 %s 2>&1 | FileCheck %s --check-prefix=CHECK-59000
+! RUN: %flang -### -c --target=x86_64 -mcmodel=large -mlarge-data-threshold=1048576 %s 2>&1 | FileCheck %s --check-prefix=CHECK-1M
+! RUN: not %flang -c --target=x86_64 -mcmodel=large -mlarge-data-threshold=nonsense %s 2>&1 | FileCheck %s --check-prefix=INVALID
+! RUN: %flang -### -c --target=x86_64 -mlarge-data-threshold=32768 %s 2>&1 | FileCheck %s --check-prefix=NO-MCMODEL
+! RUN: %flang -### -c --target=x86_64 -mcmodel=small -mlarge-data-threshold=32768 %s 2>&1 | FileCheck %s --check-prefix=NO-MCMODEL
+! RUN: not %flang -### -c --target=aarch64 -mcmodel=small -mlarge-data-threshold=32768 %s 2>&1 | FileCheck %s --check-prefix=NOT-SUPPORTED
+
+
+! CHECK: "{{.*}}flang-new" "-fc1"
+! CHECK-SAME: "-mlarge-data-threshold=32768"
+! CHECK-59000: "{{.*}}flang-new" "-fc1"
+! CHECK-59000-SAME: "-mlarge-data-threshold=59000"
+! CHECK-1M: "{{.*}}flang-new" "-fc1"
+! CHECK-1M-SAME: "-mlarge-data-threshold=1048576"
+! NO-MCMODEL: 'mlarge-data-threshold=' only applies to medium and large code models
+! INVALID: error: invalid value 'nonsense' in '-mlarge-data-threshold='
+! NOT-SUPPORTED: error: unsupported option '-mlarge-data-threshold=' for target 'aarch64'
diff --git a/flang/test/Driver/mcmodel.f90 b/flang/test/Driver/mcmodel.f90
new file mode 100644
index 0000000000000..12d90ece2f24f
--- /dev/null
+++ b/flang/test/Driver/mcmodel.f90
@@ -0,0 +1,44 @@
+! RUN: not %flang -### -c --target=i686 -mcmodel=medium %s 2>&1 | FileCheck --check-prefix=ERR-MEDIUM %s
+! RUN: %flang --target=x86_64 -### -c -mcmodel=tiny %s 2>&1 | FileCheck --check-prefix=TINY %s
+! RUN: %flang --target=x86_64 -### -c -mcmodel=small %s 2>&1 | FileCheck --check-prefix=SMALL %s
+! RUN: %flang --target=x86_64 -### -S -mcmodel=kernel %s 2>&1 | FileCheck --check-prefix=KERNEL %s
+! RUN: %flang --target=x86_64 -### -c -mcmodel=medium %s 2>&1 | FileCheck --check-prefix=MEDIUM %s
+! RUN: %flang --target=x86_64 -### -S -mcmodel=large %s 2>&1 | FileCheck --check-prefix=LARGE %s
+! RUN: not %flang -### -c --target=powerpc-linux-gnu -mcmodel=medium %s 2>&1 | FileCheck --check-prefix=ERR-MEDIUM %s
+! RUN: %flang --target=powerpc-unknown-aix -### -S -mcmodel=small %s 2>&1 | FileCheck --check-prefix=SMALL %s
+! RUN: %flang --target=powerpc-unknown-aix -### -S -mcmodel=large %s 2>&1 | FileCheck --check-prefix=LARGE %s
+! RUN: %flang --target=powerpc-unknown-aix -### -S -mcmodel=medium %s 2> %t.log
+! RUN: FileCheck --check-prefix=AIX-MCMEDIUM-OVERRIDE %s < %t.log
+! RUN: not %flang -### -c -mcmodel=lager %s 2>&1 | FileCheck --check-prefix=INVALID %s
+! RUN: %flang --target=aarch64 -### -S -mcmodel=large -fno-pic %s 2>&1 | FileCheck --check-prefix=LARGE %s
+! RUN: %flang --target=aarch64-apple-macosx -### -S -mcmodel=large %s 2>&1 | FileCheck --check-prefix=LARGE %s
+! RUN: not %flang --target=aarch64 -### -S -mcmodel=large -fpic %s 2>&1 | FileCheck --check-prefix=AARCH64-PIC-LARGE %s
+! RUN: not %flang -### -c --target=aarch64 -mcmodel=medium %s 2>&1 | FileCheck --check-prefix=ERR-MEDIUM %s
+! RUN: not %flang -### -c --target=aarch64 -mcmodel=kernel %s 2>&1 | FileCheck --check-prefix=ERR-KERNEL %s
+! RUN: not %flang --target=aarch64_32-linux -### -S -mcmodel=small %s 2>&1 | FileCheck --check-prefix=ERR-AARCH64_32 %s
+! RUN: %flang --target=loongarch64 -### -S -mcmodel=normal %s 2>&1 | FileCheck --check-prefix=SMALL %s
+! RUN: %flang --target=loongarch64 -### -S -mcmodel=medium %s 2>&1 | FileCheck --check-prefix=MEDIUM %s
+! RUN: %flang --target=loongarch64 -### -S -mcmodel=extreme %s 2>&1 | FileCheck --check-prefix=LARGE %s
+! RUN: not %flang --target=loongarch64 -### -S -mcmodel=tiny %s 2>&1 | FileCheck --check-prefix=ERR-TINY %s
+! RUN: not %flang --target=loongarch64 -### -S -mcmodel=small %s 2>&1 | FileCheck --check-prefix=ERR-SMALL %s
+! RUN: not %flang --target=loongarch64 -### -S -mcmodel=kernel %s 2>&1 | FileCheck --check-prefix=ERR-KERNEL %s
+! RUN: not %flang --target=loongarch64 -### -S -mcmodel=large %s 2>&1 | FileCheck --check-prefix=ERR-LARGE %s
+
+! TINY: "-mcmodel=tiny"
+! SMALL: "-mcmodel=small"
+! KERNEL: "-mcmodel=kernel"
+! MEDIUM: "-mcmodel=medium"
+! LARGE: "-mcmodel=large"
+! AIX-MCMEDIUM-OVERRIDE: "-mcmodel=large"
+
+! INVALID: error: unsupported argument 'lager' to option '-mcmodel=' for target '{{.*}}'
+
+! ERR-TINY: error: unsupported argument 'tiny' to option '-mcmodel=' for target '{{.*}}'
+! ERR-SMALL: error: unsupported argument 'small' to option '-mcmodel=' for target '{{.*}}'
+! ERR-MEDIUM: error: unsupported argument 'medium' to option '-mcmodel=' for target '{{.*}}'
+! ERR-KERNEL: error: unsupported argument 'kernel' to option '-mcmodel=' for target '{{.*}}'
+! ERR-LARGE: error: unsupported argument 'large' to option '-mcmodel=' for target '{{.*}}'
+
+! AARCH64-PIC-LARGE: error: invalid argument '-mcmodel=large' only allowed with '-fno-pic'
+! ERR-AARCH64_32: error: unsupported argument 'small' to option '-mcmodel=' for target 'aarch64_32-unknown-linux'
+
diff --git a/flang/test/Lower/large-data-threshold.f90 b/flang/test/Lower/large-data-threshold.f90
new file mode 100644
index 0000000000000..51ddd429d770a
--- /dev/null
+++ b/flang/test/Lower/large-data-threshold.f90
@@ -0,0 +1,21 @@
+! REQUIRES: x86-registered-target
+
+! RUN: %flang_fc1 -triple x86_64-unknown-unknown -emit-llvm %s -o - -mcmodel=medium | FileCheck %s --check-prefix=IR-DEFAULT
+! RUN: %flang_fc1 -triple x86_64-unknown-unknown -emit-llvm %s -o - -mcmodel=medium -mlarge-data-threshold=200 | FileCheck %s --check-prefix=IR-CUSTOM
+! RUN: %flang_fc1 -triple x86_64-unknown-unknown -emit-llvm %s -o - -mcmodel=large -mlarge-data-threshold=200 | FileCheck %s --check-prefix=IR-CUSTOM
+! RUN: %flang_fc1 -triple x86_64-unknown-unknown -emit-llvm %s -o - -mcmodel=small -mlarge-data-threshold=200 | FileCheck %s --check-prefix=IR-SMALL
+! RUN: %flang_fc1 -triple x86_64-unknown-unknown -S %s -o - -mcmodel=medium -mlarge-data-threshold=200 | FileCheck %s --check-prefix=ASM-SMALL
+! RUN: %flang_fc1 -triple x86_64-unknown-unknown -S %s -o - -mcmodel=medium -mlarge-data-threshold=2 | FileCheck %s --check-prefix=ASM-LARGE
+
+! IR-DEFAULT: !{i32 1, !"Large Data Threshold", i64 0}
+! IR-CUSTOM: !{i32 1, !"Large Data Threshold", i64 200}
+! IR-SMALL-NOT: !"Large Data Threshold"
+
+! ASM-SMALL-NOT: movabsq
+! ASM-LARGE: movabsq
+
+function f
+ integer :: f
+ integer, save :: i
+ f = i
+end function f
diff --git a/flang/test/Lower/mcmodel.f90 b/flang/test/Lower/mcmodel.f90
new file mode 100644
index 0000000000000..dd9eb145f5e2a
--- /dev/null
+++ b/flang/test/Lower/mcmodel.f90
@@ -0,0 +1,16 @@
+! RUN: %flang_fc1 -triple aarch64 -emit-llvm -mcmodel=tiny %s -o - | FileCheck %s -check-prefix=CHECK-TINY
+! RUN: %flang_fc1 -emit-llvm -mcmodel=small %s -o - | FileCheck %s -check-prefix=CHECK-SMALL
+! RUN: %flang_fc1 -triple x86_64-unknown-linux-gnu -emit-llvm -mcmodel=kernel %s -o - | FileCheck %s -check-prefix=CHECK-KERNEL
+! RUN: %flang_fc1 -triple x86_64-unknown-linux-gnu -emit-llvm -mcmodel=medium %s -o - | FileCheck %s -check-prefix=CHECK-MEDIUM
+! RUN: %flang_fc1 -emit-llvm -mcmodel=large %s -o - | FileCheck %s -check-prefix=CHECK-LARGE
+
+! CHECK-TINY: !llvm.module.flags = !{{{.*}}}
+! CHECK-TINY: !{{[0-9]+}} = !{i32 1, !"Code Model", i32 0}
+! CHECK-SMALL: !llvm.module.flags = !{{{.*}}}
+! CHECK-SMALL: !{{[0-9]+}} = !{i32 1, !"Code Model", i32 1}
+! CHECK-KERNEL: !llvm.module.flags = !{{{.*}}}
+! CHECK-KERNEL: !{{[0-9]+}} = !{i32 1, !"Code Model", i32 2}
+! CHECK-MEDIUM: !llvm.module.flags = !{{{.*}}}
+! CHECK-MEDIUM: !{{[0-9]+}} = !{i32 1, !"Code Model", i32 3}
+! CHECK-LARGE: !llvm.module.flags = !{{{.*}}}
+! CHECK-LARGE: !{{[0-9]+}} = !{i32 1, !"Code Model", i32 4}
>From a0176533766201eca58b20a11e42ab30c73d1b1b Mon Sep 17 00:00:00 2001
From: Michael Buch <michaelbuch12 at gmail.com>
Date: Wed, 3 Jul 2024 19:54:20 +0200
Subject: [PATCH 158/246] [lldb][DataFormatter][NFC] Factor out MapIterator
logic into separate helper (#97544)
This patch factors all the logic for advancing the `MapIterator` out of
`GetChildAtIndex`. This, in my opinion, helps readability, and will be
useful for upcoming cleanups in this area.
While here, some drive-by changes:
* added a couple of clarification comments
* fixed a variable name typo
* turned the `return lldb::ValueObjectSP()` into `return nullptr`
* added an assertion to make sure we keep the iterator cache in a valid
state
---
.../Plugins/Language/CPlusPlus/LibCxxMap.cpp | 117 +++++++++++-------
1 file changed, 73 insertions(+), 44 deletions(-)
diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
index 44fe294ced722..6c2bc1a34137a 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
+++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
@@ -17,6 +17,7 @@
#include "lldb/Utility/Endian.h"
#include "lldb/Utility/Status.h"
#include "lldb/Utility/Stream.h"
+#include "lldb/lldb-forward.h"
using namespace lldb;
using namespace lldb_private;
@@ -184,6 +185,22 @@ class LibcxxStdMapSyntheticFrontEnd : public SyntheticChildrenFrontEnd {
void GetValueOffset(const lldb::ValueObjectSP &node);
+ /// Returns the ValueObject for the __tree_node type that
+ /// holds the key/value pair of the node at index \ref idx.
+ ///
+ /// \param[in] idx The child index that we're looking to get
+ /// the key/value pair for.
+ ///
+ /// \param[in] max_depth The maximum search depth after which
+ /// we stop trying to find the key/value
+ /// pair for.
+ ///
+ /// \returns On success, returns the ValueObjectSP corresponding
+ /// to the __tree_node's __value_ member (which holds
+ /// the key/value pair the formatter wants to display).
+ /// On failure, will return nullptr.
+ ValueObjectSP GetKeyValuePair(size_t idx, size_t max_depth);
+
ValueObject *m_tree = nullptr;
ValueObject *m_root_node = nullptr;
CompilerType m_element_type;
@@ -267,7 +284,7 @@ void lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd::GetValueOffset(
uint64_t bit_offset;
if (node_type.GetIndexOfFieldWithName("__value_", nullptr, &bit_offset) !=
UINT32_MAX) {
- // Old layout (pre 089a7cc5dea)
+ // Old layout (pre d05b10ab4fc65)
m_skip_size = bit_offset / 8u;
} else {
auto ast_ctx = node_type.GetTypeSystem().dyn_cast_or_null<TypeSystemClang>();
@@ -299,75 +316,88 @@ void lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd::GetValueOffset(
}
}
-lldb::ValueObjectSP
-lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd::GetChildAtIndex(
- uint32_t idx) {
- static ConstString g_cc_("__cc_"), g_cc("__cc");
- static ConstString g_nc("__nc");
- uint32_t num_children = CalculateNumChildrenIgnoringErrors();
- if (idx >= num_children)
- return lldb::ValueObjectSP();
- if (m_tree == nullptr || m_root_node == nullptr)
- return lldb::ValueObjectSP();
-
- MapIterator iterator(m_root_node, num_children);
+ValueObjectSP
+lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd::GetKeyValuePair(
+ size_t idx, size_t max_depth) {
+ MapIterator iterator(m_root_node, max_depth);
const bool need_to_skip = (idx > 0);
- size_t actual_advancde = idx;
+ size_t actual_advance = idx;
if (need_to_skip) {
+ // If we have already created the iterator for the previous
+ // index, we can start from there and advance by 1.
auto cached_iterator = m_iterators.find(idx - 1);
if (cached_iterator != m_iterators.end()) {
iterator = cached_iterator->second;
- actual_advancde = 1;
+ actual_advance = 1;
}
}
- ValueObjectSP iterated_sp(iterator.advance(actual_advancde));
- if (!iterated_sp) {
+ ValueObjectSP iterated_sp(iterator.advance(actual_advance));
+ if (!iterated_sp)
// this tree is garbage - stop
- m_tree =
- nullptr; // this will stop all future searches until an Update() happens
- return iterated_sp;
- }
+ return nullptr;
- if (!GetDataType()) {
- m_tree = nullptr;
- return lldb::ValueObjectSP();
- }
+ if (!GetDataType())
+ return nullptr;
if (!need_to_skip) {
Status error;
iterated_sp = iterated_sp->Dereference(error);
- if (!iterated_sp || error.Fail()) {
- m_tree = nullptr;
- return lldb::ValueObjectSP();
- }
+ if (!iterated_sp || error.Fail())
+ return nullptr;
+
GetValueOffset(iterated_sp);
auto child_sp = iterated_sp->GetChildMemberWithName("__value_");
- if (child_sp)
+ if (child_sp) {
+ // Old layout (pre 089a7cc5dea)
iterated_sp = child_sp;
- else
+ } else {
iterated_sp = iterated_sp->GetSyntheticChildAtOffset(
m_skip_size, m_element_type, true);
- if (!iterated_sp) {
- m_tree = nullptr;
- return lldb::ValueObjectSP();
}
+
+ if (!iterated_sp)
+ return nullptr;
} else {
// because of the way our debug info is made, we need to read item 0
// first so that we can cache information used to generate other elements
if (m_skip_size == UINT32_MAX)
GetChildAtIndex(0);
- if (m_skip_size == UINT32_MAX) {
- m_tree = nullptr;
- return lldb::ValueObjectSP();
- }
+
+ if (m_skip_size == UINT32_MAX)
+ return nullptr;
+
iterated_sp = iterated_sp->GetSyntheticChildAtOffset(m_skip_size,
m_element_type, true);
- if (!iterated_sp) {
- m_tree = nullptr;
- return lldb::ValueObjectSP();
- }
+ if (!iterated_sp)
+ return nullptr;
+ }
+
+ m_iterators[idx] = iterator;
+ assert(iterated_sp != nullptr &&
+ "Cached MapIterator for invalid ValueObject");
+
+ return iterated_sp;
+}
+
+lldb::ValueObjectSP
+lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd::GetChildAtIndex(
+ uint32_t idx) {
+ static ConstString g_cc_("__cc_"), g_cc("__cc");
+ static ConstString g_nc("__nc");
+ uint32_t num_children = CalculateNumChildrenIgnoringErrors();
+ if (idx >= num_children)
+ return nullptr;
+
+ if (m_tree == nullptr || m_root_node == nullptr)
+ return nullptr;
+
+ ValueObjectSP key_val_sp = GetKeyValuePair(idx, /*max_depth=*/num_children);
+ if (!key_val_sp) {
+ // this will stop all future searches until an Update() happens
+ m_tree = nullptr;
+ return nullptr;
}
// at this point we have a valid
@@ -375,7 +405,7 @@ lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd::GetChildAtIndex(
// all items named __value_
StreamString name;
name.Printf("[%" PRIu64 "]", (uint64_t)idx);
- auto potential_child_sp = iterated_sp->Clone(ConstString(name.GetString()));
+ auto potential_child_sp = key_val_sp->Clone(ConstString(name.GetString()));
if (potential_child_sp) {
switch (potential_child_sp->GetNumChildrenIgnoringErrors()) {
case 1: {
@@ -396,7 +426,6 @@ lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd::GetChildAtIndex(
}
}
}
- m_iterators[idx] = iterator;
return potential_child_sp;
}
>From d4f3d24e7f016440083a785ded818b8fb410b7d8 Mon Sep 17 00:00:00 2001
From: Hansang Bae <hansang.bae at intel.com>
Date: Wed, 3 Jul 2024 12:59:34 -0500
Subject: [PATCH 159/246] [OpenMP] Add ompt_start_tool declaration in
omp-tools.h (#97099)
The function ompt_start_tool is a globally-visible C function according
to the specification.
---
openmp/runtime/src/include/omp-tools.h.var | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/openmp/runtime/src/include/omp-tools.h.var b/openmp/runtime/src/include/omp-tools.h.var
index 1d1a0f7771e95..8ee179dfe84d7 100644
--- a/openmp/runtime/src/include/omp-tools.h.var
+++ b/openmp/runtime/src/include/omp-tools.h.var
@@ -1408,6 +1408,14 @@ typedef ompt_record_ompt_t *(*ompt_get_record_ompt_t) (
ompt_buffer_cursor_t current
);
+#ifdef _WIN32
+__declspec(dllexport)
+#else
+__attribute__((visibility("default")))
+#endif
+ompt_start_tool_result_t *ompt_start_tool(unsigned int omp_version,
+ const char *runtime_version);
+
#define ompt_id_none 0
#define ompt_data_none {0}
#define ompt_time_none 0
>From c0d1d0405cb9f55ec12dfb0cec9c5639d3b357d8 Mon Sep 17 00:00:00 2001
From: Kirill <pyasetskiyr at gmail.com>
Date: Wed, 3 Jul 2024 21:01:26 +0300
Subject: [PATCH 160/246] [bazel] Update WORKSPACE file in examples (#97613)
---
utils/bazel/examples/submodule/WORKSPACE | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/utils/bazel/examples/submodule/WORKSPACE b/utils/bazel/examples/submodule/WORKSPACE
index e8eff85ffda52..d6f0a5db91c99 100644
--- a/utils/bazel/examples/submodule/WORKSPACE
+++ b/utils/bazel/examples/submodule/WORKSPACE
@@ -8,6 +8,8 @@ workspace(name = "submodule_example")
SKYLIB_VERSION = "1.0.3"
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
http_archive(
name = "bazel_skylib",
sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44",
@@ -28,6 +30,8 @@ load("@llvm-raw//utils/bazel:configure.bzl", "llvm_configure")
llvm_configure(name = "llvm-project")
+load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
+
maybe(
http_archive,
name = "llvm_zlib",
>From c940317d023e6790fc20152f354487571d0e3087 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 3 Jul 2024 11:07:50 -0700
Subject: [PATCH 161/246] [msan] Add test cases for vector shadow track origins
bug (#97611)
These test cases demonstrate a bug in MSan (vector shadow is not always
converted to scalar before zext) that will shortly be fixed in
https://github.com/llvm/llvm-project/pull/96722
The bug is not architecture-specific; we provide both x86 and Arm NEON
test cases.
Since the test cases will crash the compiler (unless it is a release
build), they are marked as UNSUPPORTED.
The buggy codepath is nested inside 'if
(instrumentWithCalls(ConvertedShadow)'. To keep the test cases small, we
set -msan-instrumentation-with-call-threshold=0, though we have observed
this bug in the real world with default settings.
---
.../vector-track-origins-neon.ll | 95 +++++++++++++++++++
.../vector-track-origins-struct.ll | 46 +++++++++
2 files changed, 141 insertions(+)
create mode 100644 llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-neon.ll
create mode 100644 llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-struct.ll
diff --git a/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-neon.ll b/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-neon.ll
new file mode 100644
index 0000000000000..0fe842e28ff92
--- /dev/null
+++ b/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-neon.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool build-release/bin/opt --version 5
+; RUN: opt < %s -S -passes="msan<eager-checks;track-origins=2>" -msan-instrumentation-with-call-threshold=0 -disable-verify | FileCheck %s
+;
+; UNSUPPORTED: target={{.*}}
+;
+; This test illustrates a bug in MemorySanitizer that will shortly be fixed
+; (https://github.com/llvm/llvm-project/pull/96722).
+;
+; '-msan-instrumentation-with-call-threshold=0' makes it possible to detect the
+; bug with a short test case.
+;
+; '-disable-verify' with a release build is needed to avoid a compiler crash
+; (e.g., to autogenerate the assertions).
+;
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+target triple = "aarch64-grtev4-linux-gnu"
+
+; Function Attrs: mustprogress noreturn nounwind sanitize_memory
+define dso_local void @_Z1cv() local_unnamed_addr #0 {
+; CHECK-LABEL: define dso_local void @_Z1cv(
+; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[DOTPRE:%.*]] = load <4 x i16>, ptr @_Z1cv, align 8, !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i16>, ptr inttoptr (i64 xor (i64 ptrtoint (ptr @_Z1cv to i64), i64 193514046488576) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 xor (i64 ptrtoint (ptr @_Z1cv to i64), i64 193514046488576), i64 35184372088832) to ptr), align 8
+; CHECK-NEXT: br label %[[FOR_COND:.*]]
+; CHECK: [[FOR_COND]]:
+; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi <4 x i16> [ [[_MSLD]], %[[ENTRY]] ], [ [[_MSLD3:%.*]], %[[FOR_COND]] ]
+; CHECK-NEXT: [[_MSPHI_O:%.*]] = phi i32 [ [[TMP0]], %[[ENTRY]] ], [ [[TMP15:%.*]], %[[FOR_COND]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = phi <4 x i16> [ [[DOTPRE]], %[[ENTRY]] ], [ [[TMP5:%.*]], %[[FOR_COND]] ]
+; CHECK-NEXT: [[_MSPHI_S1:%.*]] = phi <4 x i16> [ <i16 -1, i16 -1, i16 -1, i16 -1>, %[[ENTRY]] ], [ [[_MSLD3]], %[[FOR_COND]] ]
+; CHECK-NEXT: [[_MSPHI_O2:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[TMP15]], %[[FOR_COND]] ]
+; CHECK-NEXT: [[E_0:%.*]] = phi <4 x i16> [ undef, %[[ENTRY]] ], [ [[TMP5]], %[[FOR_COND]] ]
+; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[_MSPHI_S1]], <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT: [[LANE:%.*]] = shufflevector <4 x i16> [[E_0]], <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+;
+; Editor's note: the following zext instructions are invalid
+; ('zext source and destination must both be a vector or neither')
+;
+; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[_MSPHI_S]] to i64
+; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP2]], i32 zeroext [[_MSPHI_O]])
+; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[_MSPROP]] to i64
+;
+; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP3]], i32 zeroext [[_MSPHI_O2]])
+; CHECK-NEXT: [[CALL:%.*]] = tail call noundef i32 @_Z1b11__Int16x4_tS_(<4 x i16> noundef [[TMP1]], <4 x i16> noundef [[LANE]])
+; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[CALL]] to i64
+; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[CONV]] to ptr
+; CHECK-NEXT: [[TMP5]] = load <4 x i16>, ptr [[TMP8]], align 8, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; CHECK-NEXT: [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT: [[_MSLD3]] = load <4 x i16>, ptr [[TMP12]], align 8
+; CHECK-NEXT: [[TMP15]] = load i32, ptr [[TMP14]], align 8
+; CHECK-NEXT: store <4 x i16> [[_MSLD3]], ptr inttoptr (i64 xor (i64 ptrtoint (ptr @_Z1cv to i64), i64 193514046488576) to ptr), align 8
+; CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x i16> [[_MSLD3]] to i64
+; CHECK-NEXT: call void @__msan_maybe_store_origin_8(i64 zeroext [[TMP16]], ptr @_Z1cv, i32 zeroext [[TMP15]])
+; CHECK-NEXT: store <4 x i16> [[TMP5]], ptr @_Z1cv, align 8, !tbaa [[TBAA0]]
+; CHECK-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+entry:
+ %.pre = load <4 x i16>, ptr @_Z1cv, align 8, !tbaa !2
+ br label %for.cond
+
+for.cond: ; preds = %for.cond, %entry
+ %0 = phi <4 x i16> [ %.pre, %entry ], [ %2, %for.cond ]
+ %e.0 = phi <4 x i16> [ undef, %entry ], [ %2, %for.cond ]
+ %lane = shufflevector <4 x i16> %e.0, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %call = tail call noundef i32 @_Z1b11__Int16x4_tS_(<4 x i16> noundef %0, <4 x i16> noundef %lane) #2
+ %conv = sext i32 %call to i64
+ %1 = inttoptr i64 %conv to ptr
+ %2 = load <4 x i16>, ptr %1, align 8, !tbaa !2
+ store <4 x i16> %2, ptr @_Z1cv, align 8, !tbaa !2
+ br label %for.cond, !llvm.loop !5
+}
+
+declare noundef i32 @_Z1b11__Int16x4_tS_(<4 x i16> noundef, <4 x i16> noundef) local_unnamed_addr #1
+
+attributes #0 = { mustprogress noreturn nounwind sanitize_memory "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+neon" }
+
+!2 = !{!3, !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C++ TBAA"}
+!5 = distinct !{!5, !6}
+!6 = !{!"llvm.loop.mustprogress"}
+;.
+; CHECK: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0}
+; CHECK: [[META1]] = !{!"omnipotent char", [[META2:![0-9]+]], i64 0}
+; CHECK: [[META2]] = !{!"Simple C++ TBAA"}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]}
+; CHECK: [[META4]] = !{!"llvm.loop.mustprogress"}
+;.
diff --git a/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-struct.ll b/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-struct.ll
new file mode 100644
index 0000000000000..5eae441f05eae
--- /dev/null
+++ b/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-struct.ll
@@ -0,0 +1,46 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool build-release/bin/opt --version 5
+; RUN: opt < %s -S -passes="msan<eager-checks;track-origins=2>" -msan-instrumentation-with-call-threshold=0 -disable-verify | FileCheck %s
+;
+; UNSUPPORTED: target={{.*}}
+;
+; This test illustrates a bug in MemorySanitizer that will shortly be fixed
+; (https://github.com/llvm/llvm-project/pull/96722).
+;
+; '-msan-instrumentation-with-call-threshold=0' makes it possible to detect the
+; bug with a short test case.
+;
+; '-disable-verify' with a release build is needed to avoid a compiler crash
+; (e.g., to autogenerate the assertions).
+;
+; This is based on check-struct.ll.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define { i32, i8 } @main() sanitize_memory {
+; CHECK-LABEL: define { i32, i8 } @main(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to ptr
+; CHECK-NEXT: [[O:%.*]] = load { i32, i8 }, ptr [[P]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT: [[_MSLD:%.*]] = load { i32, i8 }, ptr [[TMP3]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
+; CHECK-NEXT: store { i32, i8 } zeroinitializer, ptr @__msan_retval_tls, align 8
+;
+; Editor's note: the following zext instruction is invalid
+; ('ZExt only operates on integer')
+;
+; CHECK-NEXT: [[TMP7:%.*]] = zext { i32, i8 } [[_MSLD]] to i64
+;
+; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP7]], i32 zeroext [[TMP6]])
+; CHECK-NEXT: ret { i32, i8 } [[O]]
+;
+ %p = inttoptr i64 0 to ptr
+ %o = load { i32, i8 }, ptr %p
+ ret { i32, i8 } %o
+}
>From 96c18a2769a48436e01ccc59154b8de9fe305e22 Mon Sep 17 00:00:00 2001
From: Alexey Bataev <a.bataev at outlook.com>
Date: Wed, 3 Jul 2024 10:45:51 -0700
Subject: [PATCH 162/246] [SLP][NFC]Make instructions non-foldable, NFC
---
.../SLPVectorizer/X86/entries-different-vf.ll | 36 ++++++++++---------
1 file changed, 19 insertions(+), 17 deletions(-)
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll b/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll
index 536526a5cfe06..1dfeaa1959e07 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll
@@ -1,27 +1,29 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
; RUN: opt --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -mcpu=icelake-server -S < %s | FileCheck %s
-define i1 @test() {
+define i1 @test(i64 %v) {
; CHECK-LABEL: define i1 @test
-; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: (i64 [[V:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = shl i64 0, 0
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i64> <i64 poison, i64 poison, i64 poison, i64 poison, i64 0, i64 0, i64 0, i64 0>, i64 0, i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> poison, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7>
-; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i64> <i64 undef, i64 undef, i64 0, i64 0>, i64 [[TMP0]], i32 0
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> [[TMP11]], i64 0, i32 1
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i64> [[TMP4]], <4 x i64> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 1, i32 1, i32 3, i32 0>
-; CHECK-NEXT: [[TMP6:%.*]] = or <8 x i64> [[TMP3]], [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = sub <8 x i64> [[TMP3]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x i64> [[TMP6]], <8 x i64> [[TMP7]], <8 x i32> <i32 0, i32 1, i32 2, i32 11, i32 12, i32 5, i32 6, i32 7>
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ult <8 x i64> [[TMP8]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> [[TMP9]])
-; CHECK-NEXT: ret i1 [[TMP10]]
+; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[V]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[V]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[V]], 7
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i64> <i64 poison, i64 poison, i64 poison, i64 poison, i64 0, i64 0, i64 0, i64 0>, i64 [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> [[TMP3]], <8 x i64> poison, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i64> <i64 undef, i64 undef, i64 0, i64 0>, i64 [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i64> [[TMP5]], i64 [[TMP2]], i32 1
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i64> [[TMP6]], <4 x i64> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 1, i32 1, i32 3, i32 0>
+; CHECK-NEXT: [[TMP8:%.*]] = or <8 x i64> [[TMP4]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = sub <8 x i64> [[TMP4]], [[TMP7]]
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x i64> [[TMP8]], <8 x i64> [[TMP9]], <8 x i32> <i32 0, i32 1, i32 2, i32 11, i32 12, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <8 x i64> [[TMP10]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> [[TMP11]])
+; CHECK-NEXT: ret i1 [[TMP12]]
;
entry:
- %0 = shl i64 0, 0
- %1 = add i64 0, 0
- %2 = add i64 0, 0
+ %0 = shl i64 %v, 1
+ %1 = add i64 %v, 3
+ %2 = add i64 %v, 7
%3 = or i64 %2, %1
%cmp750 = icmp ult i64 %3, 0
%4 = or i64 %0, %1
>From d54802092de3d92c4ecd331801970b0d84fecc25 Mon Sep 17 00:00:00 2001
From: Alexis Engelke <engelke at in.tum.de>
Date: Wed, 3 Jul 2024 20:15:29 +0200
Subject: [PATCH 163/246] [MC][ELF] Eliminate some hash maps from
ELFObjectWriter (#97421)
Remove some maps. Mostly cleanup, only a slight performance win.
- Replace SectionIndexMap with layout order: The section layout order is
only used in MachO, so we can repurpose the field as section table
index.
- Store section offsets in MCSectionELF: No need for a map, and
especially not a std::map. Direct access to the underlying (and easily
modifyable) data structure is always faster.
- Improve storage of groups: There's no point in having a DenseMap, the
number of sections and groups are reasonably small to use vectors.
---
llvm/include/llvm/MC/MCSectionELF.h | 12 +++
llvm/lib/MC/ELFObjectWriter.cpp | 120 +++++++++++-----------------
2 files changed, 60 insertions(+), 72 deletions(-)
diff --git a/llvm/include/llvm/MC/MCSectionELF.h b/llvm/include/llvm/MC/MCSectionELF.h
index 3d45d3da10ca1..d43ffbd885c96 100644
--- a/llvm/include/llvm/MC/MCSectionELF.h
+++ b/llvm/include/llvm/MC/MCSectionELF.h
@@ -46,6 +46,10 @@ class MCSectionELF final : public MCSection {
/// section header index of the section where LinkedToSym is defined.
const MCSymbol *LinkedToSym;
+ /// Start/end offset in file, used by ELFWriter.
+ uint64_t StartOffset;
+ uint64_t EndOffset;
+
private:
friend class MCContext;
@@ -92,6 +96,14 @@ class MCSectionELF final : public MCSection {
}
const MCSymbol *getLinkedToSymbol() const { return LinkedToSym; }
+ void setOffsets(uint64_t Start, uint64_t End) {
+ StartOffset = Start;
+ EndOffset = End;
+ }
+ std::pair<uint64_t, uint64_t> getOffsets() const {
+ return std::make_pair(StartOffset, EndOffset);
+ }
+
static bool classof(const MCSection *S) {
return S->getVariant() == SV_ELF;
}
diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp
index bcc6dfeeeccd6..5cba6eb15b5c9 100644
--- a/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/llvm/lib/MC/ELFObjectWriter.cpp
@@ -66,8 +66,6 @@ using namespace llvm;
namespace {
-using SectionIndexMapTy = DenseMap<const MCSectionELF *, uint32_t>;
-
class ELFObjectWriter;
struct ELFWriter;
@@ -136,8 +134,8 @@ struct ELFWriter {
unsigned SymbolTableIndex = ~0u;
// Sections in the order they are to be output in the section table.
- std::vector<const MCSectionELF *> SectionTable;
- unsigned addToSectionTable(const MCSectionELF *Sec);
+ std::vector<MCSectionELF *> SectionTable;
+ unsigned addToSectionTable(MCSectionELF *Sec);
// TargetObjectWriter wrappers.
bool is64Bit() const;
@@ -171,31 +169,21 @@ struct ELFWriter {
void writeSymbol(const MCAssembler &Asm, SymbolTableWriter &Writer,
uint32_t StringIndex, ELFSymbolData &MSD);
- // Start and end offset of each section
- using SectionOffsetsTy =
- std::map<const MCSectionELF *, std::pair<uint64_t, uint64_t>>;
-
// Map from a signature symbol to the group section index
using RevGroupMapTy = DenseMap<const MCSymbol *, unsigned>;
/// Compute the symbol table data
///
/// \param Asm - The assembler.
- /// \param SectionIndexMap - Maps a section to its index.
/// \param RevGroupMap - Maps a signature symbol to the group section.
- void computeSymbolTable(MCAssembler &Asm,
- const SectionIndexMapTy &SectionIndexMap,
- const RevGroupMapTy &RevGroupMap,
- SectionOffsetsTy &SectionOffsets);
+ void computeSymbolTable(MCAssembler &Asm, const RevGroupMapTy &RevGroupMap);
void writeAddrsigSection();
MCSectionELF *createRelocationSection(MCContext &Ctx,
const MCSectionELF &Sec);
- void writeSectionHeader(const MCAssembler &Asm,
- const SectionIndexMapTy &SectionIndexMap,
- const SectionOffsetsTy &SectionOffsets);
+ void writeSectionHeader(const MCAssembler &Asm);
void writeSectionData(const MCAssembler &Asm, MCSection &Sec);
@@ -207,8 +195,7 @@ struct ELFWriter {
void writeRelocations(const MCAssembler &Asm, const MCSectionELF &Sec);
uint64_t writeObject(MCAssembler &Asm);
- void writeSection(const SectionIndexMapTy &SectionIndexMap,
- uint32_t GroupSymbolIndex, uint64_t Offset, uint64_t Size,
+ void writeSection(uint32_t GroupSymbolIndex, uint64_t Offset, uint64_t Size,
const MCSectionELF &Section);
};
@@ -330,7 +317,7 @@ uint64_t ELFWriter::align(Align Alignment) {
return NewOffset;
}
-unsigned ELFWriter::addToSectionTable(const MCSectionELF *Sec) {
+unsigned ELFWriter::addToSectionTable(MCSectionELF *Sec) {
SectionTable.push_back(Sec);
StrTabBuilder.add(Sec->getName());
return SectionTable.size();
@@ -612,9 +599,7 @@ bool ELFWriter::isInSymtab(const MCAssembler &Asm, const MCSymbolELF &Symbol,
}
void ELFWriter::computeSymbolTable(MCAssembler &Asm,
- const SectionIndexMapTy &SectionIndexMap,
- const RevGroupMapTy &RevGroupMap,
- SectionOffsetsTy &SectionOffsets) {
+ const RevGroupMapTy &RevGroupMap) {
MCContext &Ctx = Asm.getContext();
SymbolTableWriter Writer(*this, is64Bit());
@@ -697,7 +682,7 @@ void ELFWriter::computeSymbolTable(MCAssembler &Asm,
if (Mode == NonDwoOnly && isDwoSection(Section))
continue;
- MSD.SectionIndex = SectionIndexMap.lookup(&Section);
+ MSD.SectionIndex = Section.getOrdinal();
assert(MSD.SectionIndex && "Invalid section index!");
if (MSD.SectionIndex >= ELF::SHN_LORESERVE)
HasLargeSectionIndex = true;
@@ -775,7 +760,7 @@ void ELFWriter::computeSymbolTable(MCAssembler &Asm,
}
uint64_t SecEnd = W.OS.tell();
- SectionOffsets[SymtabSection] = std::make_pair(SecStart, SecEnd);
+ SymtabSection->setOffsets(SecStart, SecEnd);
ArrayRef<uint32_t> ShndxIndexes = Writer.getShndxIndexes();
if (ShndxIndexes.empty()) {
@@ -785,12 +770,11 @@ void ELFWriter::computeSymbolTable(MCAssembler &Asm,
assert(SymtabShndxSectionIndex != 0);
SecStart = W.OS.tell();
- const MCSectionELF *SymtabShndxSection =
- SectionTable[SymtabShndxSectionIndex - 1];
+ MCSectionELF *SymtabShndxSection = SectionTable[SymtabShndxSectionIndex - 1];
for (uint32_t Index : ShndxIndexes)
write(Index);
SecEnd = W.OS.tell();
- SectionOffsets[SymtabShndxSection] = std::make_pair(SecStart, SecEnd);
+ SymtabShndxSection->setOffsets(SecStart, SecEnd);
}
void ELFWriter::writeAddrsigSection() {
@@ -1030,8 +1014,7 @@ void ELFWriter::writeRelocations(const MCAssembler &Asm,
}
}
-void ELFWriter::writeSection(const SectionIndexMapTy &SectionIndexMap,
- uint32_t GroupSymbolIndex, uint64_t Offset,
+void ELFWriter::writeSection(uint32_t GroupSymbolIndex, uint64_t Offset,
uint64_t Size, const MCSectionELF &Section) {
uint64_t sh_link = 0;
uint64_t sh_info = 0;
@@ -1050,7 +1033,7 @@ void ELFWriter::writeSection(const SectionIndexMapTy &SectionIndexMap,
sh_link = SymbolTableIndex;
assert(sh_link && ".symtab not found");
const MCSection *InfoSection = Section.getLinkedToSection();
- sh_info = SectionIndexMap.lookup(cast<MCSectionELF>(InfoSection));
+ sh_info = InfoSection->getOrdinal();
break;
}
@@ -1075,10 +1058,8 @@ void ELFWriter::writeSection(const SectionIndexMapTy &SectionIndexMap,
// If the value in the associated metadata is not a definition, Sym will be
// undefined. Represent this with sh_link=0.
const MCSymbol *Sym = Section.getLinkedToSymbol();
- if (Sym && Sym->isInSection()) {
- const MCSectionELF *Sec = cast<MCSectionELF>(&Sym->getSection());
- sh_link = SectionIndexMap.lookup(Sec);
- }
+ if (Sym && Sym->isInSection())
+ sh_link = Sym->getSection().getOrdinal();
}
WriteSecHdrEntry(StrTabBuilder.getOffset(Section.getName()),
@@ -1087,9 +1068,7 @@ void ELFWriter::writeSection(const SectionIndexMapTy &SectionIndexMap,
Section.getEntrySize());
}
-void ELFWriter::writeSectionHeader(const MCAssembler &Asm,
- const SectionIndexMapTy &SectionIndexMap,
- const SectionOffsetsTy &SectionOffsets) {
+void ELFWriter::writeSectionHeader(const MCAssembler &Asm) {
const unsigned NumSections = SectionTable.size();
// Null section first.
@@ -1105,16 +1084,14 @@ void ELFWriter::writeSectionHeader(const MCAssembler &Asm,
else
GroupSymbolIndex = Section->getGroup()->getIndex();
- const std::pair<uint64_t, uint64_t> &Offsets =
- SectionOffsets.find(Section)->second;
+ std::pair<uint64_t, uint64_t> Offsets = Section->getOffsets();
uint64_t Size;
if (Type == ELF::SHT_NOBITS)
Size = Asm.getSectionAddressSize(*Section);
else
Size = Offsets.second - Offsets.first;
- writeSection(SectionIndexMap, GroupSymbolIndex, Offsets.first, Size,
- *Section);
+ writeSection(GroupSymbolIndex, Offsets.first, Size, *Section);
}
}
@@ -1127,17 +1104,15 @@ uint64_t ELFWriter::writeObject(MCAssembler &Asm) {
StringTableIndex = addToSectionTable(StrtabSection);
RevGroupMapTy RevGroupMap;
- SectionIndexMapTy SectionIndexMap;
-
- DenseMap<const MCSymbol *, SmallVector<const MCSectionELF *, 0>> GroupMembers;
// Write out the ELF header ...
writeHeader(Asm);
// ... then the sections ...
- SectionOffsetsTy SectionOffsets;
- std::vector<MCSectionELF *> Groups;
- std::vector<MCSectionELF *> Relocations;
+ SmallVector<std::pair<MCSectionELF *, SmallVector<unsigned>>, 0> Groups;
+ // Map from group section index to group
+ SmallVector<unsigned, 0> GroupMap;
+ SmallVector<MCSectionELF *> Relocations;
for (MCSection &Sec : Asm) {
MCSectionELF &Section = static_cast<MCSectionELF &>(Sec);
if (Mode == NonDwoOnly && isDwoSection(Section))
@@ -1152,49 +1127,50 @@ uint64_t ELFWriter::writeObject(MCAssembler &Asm) {
writeSectionData(Asm, Section);
uint64_t SecEnd = W.OS.tell();
- SectionOffsets[&Section] = std::make_pair(SecStart, SecEnd);
+ Section.setOffsets(SecStart, SecEnd);
MCSectionELF *RelSection = createRelocationSection(Ctx, Section);
+ unsigned *GroupIdxEntry = nullptr;
if (SignatureSymbol) {
- unsigned &GroupIdx = RevGroupMap[SignatureSymbol];
- if (!GroupIdx) {
+ GroupIdxEntry = &RevGroupMap[SignatureSymbol];
+ if (!*GroupIdxEntry) {
MCSectionELF *Group =
Ctx.createELFGroupSection(SignatureSymbol, Section.isComdat());
- GroupIdx = addToSectionTable(Group);
+ *GroupIdxEntry = addToSectionTable(Group);
Group->setAlignment(Align(4));
- Groups.push_back(Group);
+
+ GroupMap.resize(*GroupIdxEntry + 1);
+ GroupMap[*GroupIdxEntry] = Groups.size();
+ Groups.emplace_back(Group, SmallVector<unsigned>{});
}
- SmallVector<const MCSectionELF *, 0> &Members =
- GroupMembers[SignatureSymbol];
- Members.push_back(&Section);
- if (RelSection)
- Members.push_back(RelSection);
}
- SectionIndexMap[&Section] = addToSectionTable(&Section);
+ Section.setOrdinal(addToSectionTable(&Section));
if (RelSection) {
- SectionIndexMap[RelSection] = addToSectionTable(RelSection);
+ RelSection->setOrdinal(addToSectionTable(RelSection));
Relocations.push_back(RelSection);
}
+ if (GroupIdxEntry) {
+ auto &Members = Groups[GroupMap[*GroupIdxEntry]];
+ Members.second.push_back(Section.getOrdinal());
+ if (RelSection)
+ Members.second.push_back(RelSection->getOrdinal());
+ }
+
OWriter.TargetObjectWriter->addTargetSectionFlags(Ctx, Section);
}
- for (MCSectionELF *Group : Groups) {
+ for (auto &[Group, Members] : Groups) {
// Remember the offset into the file for this section.
const uint64_t SecStart = align(Group->getAlign());
- const MCSymbol *SignatureSymbol = Group->getGroup();
- assert(SignatureSymbol);
write(uint32_t(Group->isComdat() ? unsigned(ELF::GRP_COMDAT) : 0));
- for (const MCSectionELF *Member : GroupMembers[SignatureSymbol]) {
- uint32_t SecIndex = SectionIndexMap.lookup(Member);
- write(SecIndex);
- }
+ W.write<unsigned>(Members);
uint64_t SecEnd = W.OS.tell();
- SectionOffsets[Group] = std::make_pair(SecStart, SecEnd);
+ Group->setOffsets(SecStart, SecEnd);
}
if (Mode == DwoOnly) {
@@ -1210,7 +1186,7 @@ uint64_t ELFWriter::writeObject(MCAssembler &Asm) {
}
// Compute symbol table information.
- computeSymbolTable(Asm, SectionIndexMap, RevGroupMap, SectionOffsets);
+ computeSymbolTable(Asm, RevGroupMap);
for (MCSectionELF *RelSection : Relocations) {
// Remember the offset into the file for this section.
@@ -1220,27 +1196,27 @@ uint64_t ELFWriter::writeObject(MCAssembler &Asm) {
cast<MCSectionELF>(*RelSection->getLinkedToSection()));
uint64_t SecEnd = W.OS.tell();
- SectionOffsets[RelSection] = std::make_pair(SecStart, SecEnd);
+ RelSection->setOffsets(SecStart, SecEnd);
}
if (OWriter.EmitAddrsigSection) {
uint64_t SecStart = W.OS.tell();
writeAddrsigSection();
uint64_t SecEnd = W.OS.tell();
- SectionOffsets[AddrsigSection] = std::make_pair(SecStart, SecEnd);
+ AddrsigSection->setOffsets(SecStart, SecEnd);
}
}
{
uint64_t SecStart = W.OS.tell();
StrTabBuilder.write(W.OS);
- SectionOffsets[StrtabSection] = std::make_pair(SecStart, W.OS.tell());
+ StrtabSection->setOffsets(SecStart, W.OS.tell());
}
const uint64_t SectionHeaderOffset = align(is64Bit() ? Align(8) : Align(4));
// ... then the section header table ...
- writeSectionHeader(Asm, SectionIndexMap, SectionOffsets);
+ writeSectionHeader(Asm);
uint16_t NumSections = support::endian::byte_swap<uint16_t>(
(SectionTable.size() + 1 >= ELF::SHN_LORESERVE) ? (uint16_t)ELF::SHN_UNDEF
>From 4eecf3c650ea53aa00cae2fe983f95ee6ec6705a Mon Sep 17 00:00:00 2001
From: Alexey Bataev <a.bataev at outlook.com>
Date: Wed, 3 Jul 2024 14:36:30 -0400
Subject: [PATCH 164/246] [SLP]Reorder buildvector/reduction vectorization and
fuse the loops.
Currently SLP vectorizer tries at first to find reduction nodes, and
then vectorize buildvector sequences. Need to try to vectorize wide
buildvector sequences at first and only then try to vectorize
reductions, and then smaller buildvector sequences.
Reviewers: RKSimon
Reviewed By: RKSimon
Pull Request: https://github.com/llvm/llvm-project/pull/96943
---
.../llvm/Transforms/Vectorize/SLPVectorizer.h | 4 +-
.../Transforms/Vectorize/SLPVectorizer.cpp | 32 +++++++++++-----
.../AArch64/scalarization-overhead.ll | 37 ++++++++++---------
.../X86/reused-extractelements.ll | 8 ++--
4 files changed, 46 insertions(+), 35 deletions(-)
diff --git a/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h b/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h
index 3b522dd642db5..95531544a1c81 100644
--- a/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h
+++ b/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h
@@ -134,11 +134,11 @@ struct SLPVectorizerPass : public PassInfoMixin<SLPVectorizerPass> {
/// Try to vectorize trees that start at insertvalue instructions.
bool vectorizeInsertValueInst(InsertValueInst *IVI, BasicBlock *BB,
- slpvectorizer::BoUpSLP &R);
+ slpvectorizer::BoUpSLP &R, bool MaxVFOnly);
/// Try to vectorize trees that start at insertelement instructions.
bool vectorizeInsertElementInst(InsertElementInst *IEI, BasicBlock *BB,
- slpvectorizer::BoUpSLP &R);
+ slpvectorizer::BoUpSLP &R, bool MaxVFOnly);
/// Tries to vectorize \p CmpInts. \Returns true on success.
template <typename ItT>
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 868e9e2687f57..11f9ad70dc725 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -18108,7 +18108,8 @@ bool SLPVectorizerPass::tryToVectorize(ArrayRef<WeakTrackingVH> Insts,
}
bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
- BasicBlock *BB, BoUpSLP &R) {
+ BasicBlock *BB, BoUpSLP &R,
+ bool MaxVFOnly) {
if (!R.canMapToVector(IVI->getType()))
return false;
@@ -18119,11 +18120,12 @@ bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
// Aggregate value is unlikely to be processed in vector register.
- return tryToVectorizeList(BuildVectorOpds, R);
+ return tryToVectorizeList(BuildVectorOpds, R, MaxVFOnly);
}
bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
- BasicBlock *BB, BoUpSLP &R) {
+ BasicBlock *BB, BoUpSLP &R,
+ bool MaxVFOnly) {
SmallVector<Value *, 16> BuildVectorInsts;
SmallVector<Value *, 16> BuildVectorOpds;
SmallVector<int> Mask;
@@ -18133,7 +18135,7 @@ bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
return false;
LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n");
- return tryToVectorizeList(BuildVectorInsts, R);
+ return tryToVectorizeList(BuildVectorInsts, R, MaxVFOnly);
}
template <typename T>
@@ -18353,20 +18355,30 @@ bool SLPVectorizerPass::vectorizeInserts(InstSetVector &Instructions,
"This function only accepts Insert instructions");
bool OpsChanged = false;
SmallVector<WeakTrackingVH> PostponedInsts;
- // pass1 - try to vectorize reductions only
for (auto *I : reverse(Instructions)) {
+ // pass1 - try to match and vectorize a buildvector sequence for MaxVF only.
+ if (R.isDeleted(I) || isa<CmpInst>(I))
+ continue;
+ if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
+ OpsChanged |=
+ vectorizeInsertValueInst(LastInsertValue, BB, R, /*MaxVFOnly=*/true);
+ } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
+ OpsChanged |=
+ vectorizeInsertElementInst(LastInsertElem, BB, R, /*MaxVFOnly=*/true);
+ }
+ // pass2 - try to vectorize reductions only
if (R.isDeleted(I))
continue;
OpsChanged |= vectorizeHorReduction(nullptr, I, BB, R, TTI, PostponedInsts);
- }
- // pass2 - try to match and vectorize a buildvector sequence.
- for (auto *I : reverse(Instructions)) {
if (R.isDeleted(I) || isa<CmpInst>(I))
continue;
+ // pass3 - try to match and vectorize a buildvector sequence.
if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
- OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R);
+ OpsChanged |=
+ vectorizeInsertValueInst(LastInsertValue, BB, R, /*MaxVFOnly=*/false);
} else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
- OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R);
+ OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R,
+ /*MaxVFOnly=*/false);
}
}
// Now try to vectorize postponed instructions.
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
index c6209fd71063a..a24cb81541d7c 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
@@ -10,32 +10,33 @@ define fastcc i64 @zot(float %arg, float %arg1, float %arg2, float %arg3, float
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[ARG3:%.*]], i32 2
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> <float 0.000000e+00, float 0.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[TMP2]]
-; CHECK-NEXT: [[VAL12:%.*]] = fadd fast float [[ARG3]], 1.000000e+00
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> [[TMP2]], float [[VAL12]], i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x float> [[TMP4]], float 0.000000e+00, i32 1
-; CHECK-NEXT: [[TMP6:%.*]] = fadd fast <4 x float> [[TMP5]], <float 2.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x float> <float poison, float 0.000000e+00>, float [[ARG3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = fadd fast <2 x float> [[TMP4]], <float 1.000000e+00, float 0.000000e+00>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP5]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> [[TMP6]], <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+; CHECK-NEXT: [[TMP8:%.*]] = fadd fast <4 x float> [[TMP7]], <float 2.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
; CHECK-NEXT: br i1 [[ARG6:%.*]], label [[BB18:%.*]], label [[BB57:%.*]]
; CHECK: bb18:
-; CHECK-NEXT: [[TMP7:%.*]] = phi <4 x float> [ [[TMP6]], [[BB:%.*]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x float> [[TMP6]], i32 2
-; CHECK-NEXT: [[VAL23:%.*]] = fmul fast float [[TMP8]], 2.000000e+00
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x float> [[TMP6]], i32 3
-; CHECK-NEXT: [[VAL24:%.*]] = fmul fast float [[TMP9]], 3.000000e+00
+; CHECK-NEXT: [[TMP9:%.*]] = phi <4 x float> [ [[TMP8]], [[BB:%.*]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x float> [[TMP8]], i32 2
+; CHECK-NEXT: [[VAL23:%.*]] = fmul fast float [[TMP10]], 2.000000e+00
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x float> [[TMP8]], i32 3
+; CHECK-NEXT: [[VAL24:%.*]] = fmul fast float [[TMP11]], 3.000000e+00
; CHECK-NEXT: br i1 [[ARG7:%.*]], label [[BB25:%.*]], label [[BB57]]
; CHECK: bb25:
-; CHECK-NEXT: [[TMP10:%.*]] = phi <4 x float> [ [[TMP7]], [[BB18]] ]
-; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x float> [[TMP3]], i32 1
+; CHECK-NEXT: [[TMP12:%.*]] = phi <4 x float> [ [[TMP9]], [[BB18]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x float> [[TMP3]], i32 1
; CHECK-NEXT: br label [[BB30:%.*]]
; CHECK: bb30:
; CHECK-NEXT: [[VAL31:%.*]] = phi float [ [[VAL55:%.*]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
-; CHECK-NEXT: [[VAL32:%.*]] = phi float [ [[TMP11]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i8>, ptr [[ARG5:%.*]], align 1
-; CHECK-NEXT: [[TMP13:%.*]] = uitofp <4 x i8> [[TMP12]] to <4 x float>
-; CHECK-NEXT: [[TMP14:%.*]] = fsub fast <4 x float> [[TMP13]], [[TMP3]]
-; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <4 x float> [[TMP14]], [[TMP10]]
-; CHECK-NEXT: [[TMP16:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP15]])
+; CHECK-NEXT: [[VAL32:%.*]] = phi float [ [[TMP13]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i8>, ptr [[ARG5:%.*]], align 1
+; CHECK-NEXT: [[TMP15:%.*]] = uitofp <4 x i8> [[TMP14]] to <4 x float>
+; CHECK-NEXT: [[TMP16:%.*]] = fsub fast <4 x float> [[TMP15]], [[TMP3]]
+; CHECK-NEXT: [[TMP17:%.*]] = fmul fast <4 x float> [[TMP16]], [[TMP12]]
+; CHECK-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP17]])
; CHECK-NEXT: [[VAL55]] = tail call fast float @llvm.minnum.f32(float [[VAL31]], float [[ARG1:%.*]])
-; CHECK-NEXT: [[VAL56:%.*]] = tail call fast float @llvm.maxnum.f32(float [[ARG2:%.*]], float [[TMP16]])
+; CHECK-NEXT: [[VAL56:%.*]] = tail call fast float @llvm.maxnum.f32(float [[ARG2:%.*]], float [[TMP18]])
; CHECK-NEXT: call void @ham(float [[VAL55]], float [[VAL56]])
; CHECK-NEXT: br i1 [[ARG8:%.*]], label [[BB30]], label [[BB57]]
; CHECK: bb57:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reused-extractelements.ll b/llvm/test/Transforms/SLPVectorizer/X86/reused-extractelements.ll
index 94a1d7aa1951c..bf4903fd19b09 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reused-extractelements.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reused-extractelements.ll
@@ -4,13 +4,11 @@
; YAML: --- !Missed
; YAML-NEXT: Pass: slp-vectorizer
-; YAML-NEXT: Name: NotBeneficial
+; YAML-NEXT: Name: NotPossible
; YAML-NEXT: Function: g
; YAML-NEXT: Args:
-; YAML-NEXT: - String: 'List vectorization was possible but not beneficial with cost '
-; YAML-NEXT: - Cost: '0'
-; YAML-NEXT: - String: ' >= '
-; YAML-NEXT: - Treshold: '0'
+; YAML-NEXT: - String: 'Cannot SLP vectorize list: vectorization was impossible'
+; YAML-NEXT: - String: ' with available vectorization factors'
define <2 x i32> @g(<2 x i32> %x, i32 %a, i32 %b) {
; CHECK-LABEL: @g(
>From 97dc50882cbc63d7098e95f73f242185c75c226b Mon Sep 17 00:00:00 2001
From: Shaw Young <58664393+shawbyoung at users.noreply.github.com>
Date: Wed, 3 Jul 2024 11:39:18 -0700
Subject: [PATCH 165/246] [BOLT] Match functions with name similarity (#95884)
A mapping - from namespace to associated binary functions - is used to
match function profiles to binary based on the
'--name-similarity-function-matching-threshold' flag set edit distance
threshold. The flag is set to 0 (exact name matching) by default as it is
expensive, requiring the processing of all BFs.
Test Plan: Added name-similarity-function-matching.test. On a binary
with 5M functions, rewrite passes took ~520s without the flag and
~2018s with the flag set to 20.
---
bolt/docs/CommandLineArgumentReference.md | 4 +
bolt/include/bolt/Profile/YAMLProfileReader.h | 3 +
bolt/lib/Profile/YAMLProfileReader.cpp | 121 ++++++++++++++++++
.../name-similarity-function-matching.test | 63 +++++++++
4 files changed, 191 insertions(+)
create mode 100644 bolt/test/X86/name-similarity-function-matching.test
diff --git a/bolt/docs/CommandLineArgumentReference.md b/bolt/docs/CommandLineArgumentReference.md
index 00d472c578916..17c52c65e472f 100644
--- a/bolt/docs/CommandLineArgumentReference.md
+++ b/bolt/docs/CommandLineArgumentReference.md
@@ -688,6 +688,10 @@
Use a modified clustering algorithm geared towards minimizing branches
+- `--name-similarity-function-matching-threshold=<uint>`
+
+ Match functions using namespace and edit distance.
+
- `--no-inline`
Disable all inlining (overrides other inlining options)
diff --git a/bolt/include/bolt/Profile/YAMLProfileReader.h b/bolt/include/bolt/Profile/YAMLProfileReader.h
index 7a8aa176c30f1..8bcae2b4df739 100644
--- a/bolt/include/bolt/Profile/YAMLProfileReader.h
+++ b/bolt/include/bolt/Profile/YAMLProfileReader.h
@@ -93,6 +93,9 @@ class YAMLProfileReader : public ProfileReaderBase {
ProfiledFunctions.emplace(&BF);
}
+ /// Matches functions with similarly named profiled functions.
+ uint64_t matchWithNameSimilarity(BinaryContext &BC);
+
/// Check if the profile uses an event with a given \p Name.
bool usesEvent(StringRef Name) const;
};
diff --git a/bolt/lib/Profile/YAMLProfileReader.cpp b/bolt/lib/Profile/YAMLProfileReader.cpp
index 554def697fa21..63222147bedd6 100644
--- a/bolt/lib/Profile/YAMLProfileReader.cpp
+++ b/bolt/lib/Profile/YAMLProfileReader.cpp
@@ -11,8 +11,11 @@
#include "bolt/Core/BinaryFunction.h"
#include "bolt/Passes/MCF.h"
#include "bolt/Profile/ProfileYAMLMapping.h"
+#include "bolt/Utils/NameResolver.h"
#include "bolt/Utils/Utils.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/edit_distance.h"
+#include "llvm/Demangle/Demangle.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
@@ -24,6 +27,11 @@ extern cl::OptionCategory BoltOptCategory;
extern cl::opt<bool> InferStaleProfile;
extern cl::opt<bool> Lite;
+cl::opt<unsigned> NameSimilarityFunctionMatchingThreshold(
+ "name-similarity-function-matching-threshold",
+ cl::desc("Match functions using namespace and edit distance"), cl::init(0),
+ cl::Hidden, cl::cat(BoltOptCategory));
+
static llvm::cl::opt<bool>
IgnoreHash("profile-ignore-hash",
cl::desc("ignore hash while reading function profile"),
@@ -350,6 +358,111 @@ bool YAMLProfileReader::mayHaveProfileData(const BinaryFunction &BF) {
return false;
}
+uint64_t YAMLProfileReader::matchWithNameSimilarity(BinaryContext &BC) {
+ uint64_t MatchedWithNameSimilarity = 0;
+ ItaniumPartialDemangler Demangler;
+
+ // Demangle and derive namespace from function name.
+ auto DemangleName = [&](std::string &FunctionName) {
+ StringRef RestoredName = NameResolver::restore(FunctionName);
+ return demangle(RestoredName);
+ };
+ auto DeriveNameSpace = [&](std::string &DemangledName) {
+ if (Demangler.partialDemangle(DemangledName.c_str()))
+ return std::string("");
+ std::vector<char> Buffer(DemangledName.begin(), DemangledName.end());
+ size_t BufferSize;
+ char *NameSpace =
+ Demangler.getFunctionDeclContextName(&Buffer[0], &BufferSize);
+ return std::string(NameSpace, BufferSize);
+ };
+
+ // Maps namespaces to associated function block counts and gets profile
+ // function names and namespaces to minimize the number of BFs to process and
+ // avoid repeated name demangling/namespace derivation.
+ StringMap<std::set<uint32_t>> NamespaceToProfiledBFSizes;
+ std::vector<std::string> ProfileBFDemangledNames;
+ ProfileBFDemangledNames.reserve(YamlBP.Functions.size());
+ std::vector<std::string> ProfiledBFNamespaces;
+ ProfiledBFNamespaces.reserve(YamlBP.Functions.size());
+
+ for (auto &YamlBF : YamlBP.Functions) {
+ std::string YamlBFDemangledName = DemangleName(YamlBF.Name);
+ ProfileBFDemangledNames.push_back(YamlBFDemangledName);
+ std::string YamlBFNamespace = DeriveNameSpace(YamlBFDemangledName);
+ ProfiledBFNamespaces.push_back(YamlBFNamespace);
+ NamespaceToProfiledBFSizes[YamlBFNamespace].insert(YamlBF.NumBasicBlocks);
+ }
+
+ StringMap<std::vector<BinaryFunction *>> NamespaceToBFs;
+
+ // Maps namespaces to BFs excluding binary functions with no equal sized
+ // profiled functions belonging to the same namespace.
+ for (BinaryFunction *BF : BC.getAllBinaryFunctions()) {
+ std::string DemangledName = BF->getDemangledName();
+ std::string Namespace = DeriveNameSpace(DemangledName);
+
+ auto NamespaceToProfiledBFSizesIt =
+ NamespaceToProfiledBFSizes.find(Namespace);
+ // Skip if there are no ProfileBFs with a given \p Namespace.
+ if (NamespaceToProfiledBFSizesIt == NamespaceToProfiledBFSizes.end())
+ continue;
+ // Skip if there are no ProfileBFs in a given \p Namespace with
+ // equal number of blocks.
+ if (NamespaceToProfiledBFSizesIt->second.count(BF->size()) == 0)
+ continue;
+ auto NamespaceToBFsIt = NamespaceToBFs.find(Namespace);
+ if (NamespaceToBFsIt == NamespaceToBFs.end())
+ NamespaceToBFs[Namespace] = {BF};
+ else
+ NamespaceToBFsIt->second.push_back(BF);
+ }
+
+ // Iterates through all profiled functions and binary functions belonging to
+ // the same namespace and matches based on edit distance threshold.
+ assert(YamlBP.Functions.size() == ProfiledBFNamespaces.size() &&
+ ProfiledBFNamespaces.size() == ProfileBFDemangledNames.size());
+ for (size_t I = 0; I < YamlBP.Functions.size(); ++I) {
+ yaml::bolt::BinaryFunctionProfile &YamlBF = YamlBP.Functions[I];
+ std::string &YamlBFNamespace = ProfiledBFNamespaces[I];
+ if (YamlBF.Used)
+ continue;
+ // Skip if there are no BFs in a given \p Namespace.
+ auto It = NamespaceToBFs.find(YamlBFNamespace);
+ if (It == NamespaceToBFs.end())
+ continue;
+
+ std::string &YamlBFDemangledName = ProfileBFDemangledNames[I];
+ std::vector<BinaryFunction *> BFs = It->second;
+ unsigned MinEditDistance = UINT_MAX;
+ BinaryFunction *ClosestNameBF = nullptr;
+
+ // Determines BF the closest to the profiled function, in the
+ // same namespace.
+ for (BinaryFunction *BF : BFs) {
+ if (ProfiledFunctions.count(BF))
+ continue;
+ if (BF->size() != YamlBF.NumBasicBlocks)
+ continue;
+ std::string BFDemangledName = BF->getDemangledName();
+ unsigned BFEditDistance =
+ StringRef(BFDemangledName).edit_distance(YamlBFDemangledName);
+ if (BFEditDistance < MinEditDistance) {
+ MinEditDistance = BFEditDistance;
+ ClosestNameBF = BF;
+ }
+ }
+
+ if (ClosestNameBF &&
+ MinEditDistance <= opts::NameSimilarityFunctionMatchingThreshold) {
+ matchProfileToFunction(YamlBF, *ClosestNameBF);
+ ++MatchedWithNameSimilarity;
+ }
+ }
+
+ return MatchedWithNameSimilarity;
+}
+
Error YAMLProfileReader::readProfile(BinaryContext &BC) {
if (opts::Verbosity >= 1) {
outs() << "BOLT-INFO: YAML profile with hash: ";
@@ -461,6 +574,12 @@ Error YAMLProfileReader::readProfile(BinaryContext &BC) {
if (!YamlBF.Used && BF && !ProfiledFunctions.count(BF))
matchProfileToFunction(YamlBF, *BF);
+ // Uses name similarity to match functions that were not matched by name.
+ uint64_t MatchedWithNameSimilarity =
+ opts::NameSimilarityFunctionMatchingThreshold > 0
+ ? matchWithNameSimilarity(BC)
+ : 0;
+
for (yaml::bolt::BinaryFunctionProfile &YamlBF : YamlBP.Functions)
if (!YamlBF.Used && opts::Verbosity >= 1)
errs() << "BOLT-WARNING: profile ignored for function " << YamlBF.Name
@@ -473,6 +592,8 @@ Error YAMLProfileReader::readProfile(BinaryContext &BC) {
<< " functions with hash\n";
outs() << "BOLT-INFO: matched " << MatchedWithLTOCommonName
<< " functions with matching LTO common names\n";
+ outs() << "BOLT-INFO: matched " << MatchedWithNameSimilarity
+ << " functions with similar names\n";
}
// Set for parseFunctionProfile().
diff --git a/bolt/test/X86/name-similarity-function-matching.test b/bolt/test/X86/name-similarity-function-matching.test
new file mode 100644
index 0000000000000..97ef4bf0cff61
--- /dev/null
+++ b/bolt/test/X86/name-similarity-function-matching.test
@@ -0,0 +1,63 @@
+## Tests function matching in YAMLProfileReader by name similarity.
+
+# REQUIRES: system-linux
+# RUN: split-file %s %t
+# RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %t/main.s -o %t.o
+# RUN: %clang %cflags %t.o -o %t.exe -Wl,-q -nostdlib
+# RUN: llvm-bolt %t.exe -o %t.out --data %t/yaml -v=2 \
+# RUN: --print-cfg --name-similarity-function-matching-threshold=1 --funcs=main --profile-ignore-hash=0 2>&1 | FileCheck %s
+
+# CHECK: BOLT-INFO: matched 1 functions with similar names
+
+#--- main.s
+.globl main
+.type main, @function
+main:
+ .cfi_startproc
+.LBB00:
+ pushq %rbp
+ movq %rsp, %rbp
+ subq $16, %rsp
+ testq %rax, %rax
+ js .LBB03
+.LBB01:
+ jne .LBB04
+.LBB02:
+ nop
+.LBB03:
+ xorl %eax, %eax
+ addq $16, %rsp
+ popq %rbp
+ retq
+.LBB04:
+ xorl %eax, %eax
+ addq $16, %rsp
+ popq %rbp
+ retq
+## For relocations against .text
+ .reloc 0, R_X86_64_NONE
+ .cfi_endproc
+ .size main, .-main
+
+#--- yaml
+---
+header:
+ profile-version: 1
+ binary-name: 'hashing-based-function-matching.s.tmp.exe'
+ binary-build-id: '<unknown>'
+ profile-flags: [ lbr ]
+ profile-origin: branch profile reader
+ profile-events: ''
+ dfs-order: false
+ hash-func: xxh3
+functions:
+ - name: main2
+ fid: 0
+ hash: 0x0000000000000001
+ exec: 1
+ nblocks: 5
+ blocks:
+ - bid: 1
+ insns: 1
+ succ: [ { bid: 3, cnt: 1} ]
+...
>From c156d421851d175805309a83bf703ad304955847 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 3 Jul 2024 11:49:08 -0700
Subject: [PATCH 166/246] [RISCV] Remove SeenExtMap from
RISCVISAInfo::parseArchString. (#97506)
Use the Exts map directly instead of adding to a temporary MapVector
first.
There are a couple functional change from this.
-If an unknown extension is duplicated, we will now print an error for
it being unknown instead of an error for it being duplicated.
-If an unknown extension is followed by an underscore with no extension after
it, we will error for the unknown extension instead of the dangling
underscore.
These don't seem like serious changes to me. I've updated tests
accordingly.
---
clang/test/Driver/riscv-arch.c | 8 +++----
llvm/lib/TargetParser/RISCVISAInfo.cpp | 32 +++++++++-----------------
2 files changed, 15 insertions(+), 25 deletions(-)
diff --git a/clang/test/Driver/riscv-arch.c b/clang/test/Driver/riscv-arch.c
index ffd92e1f398c4..0f285f7c0033c 100644
--- a/clang/test/Driver/riscv-arch.c
+++ b/clang/test/Driver/riscv-arch.c
@@ -306,7 +306,7 @@
// RUN: not %clang --target=riscv32-unknown-elf -march=rv32ixabc_ -### %s \
// RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-XSEP %s
// RV32-XSEP: error: invalid arch name 'rv32ixabc_',
-// RV32-XSEP: extension name missing after separator '_'
+// RV32-XSEP: unsupported non-standard user-level extension 'xabc'
// RUN: not %clang --target=riscv32-unknown-elf -march=rv32ixabc_a -### %s \
// RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-PREFIX %s
@@ -318,10 +318,10 @@
// RV32-X-ORDER: error: invalid arch name 'rv32ixdef_sabc',
// RV32-X-ORDER unsupported non-standard user-level extension 'xdef'
-// RUN: not %clang --target=riscv32-unknown-elf -march=rv32ixabc_xabc -### %s \
+// RUN: not %clang --target=riscv32-unknown-elf -march=rv32im_m -### %s \
// RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-XDUP %s
-// RV32-XDUP: error: invalid arch name 'rv32ixabc_xabc',
-// RV32-XDUP: duplicated non-standard user-level extension 'xabc'
+// RV32-XDUP: error: invalid arch name 'rv32im_m',
+// RV32-XDUP: duplicated standard user-level extension 'm'
// RUN: not %clang --target=riscv32-unknown-elf -march=rv32ixabc_xdef -### %s \
// RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-X-X-INVAL %s
diff --git a/llvm/lib/TargetParser/RISCVISAInfo.cpp b/llvm/lib/TargetParser/RISCVISAInfo.cpp
index 133c6852bc85e..ee8024b3bea36 100644
--- a/llvm/lib/TargetParser/RISCVISAInfo.cpp
+++ b/llvm/lib/TargetParser/RISCVISAInfo.cpp
@@ -7,9 +7,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/TargetParser/RISCVISAInfo.h"
-#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Errc.h"
@@ -558,9 +556,6 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
"profile name");
std::unique_ptr<RISCVISAInfo> ISAInfo(new RISCVISAInfo(XLen));
- MapVector<std::string, RISCVISAUtils::ExtensionVersion,
- std::map<std::string, unsigned>>
- SeenExtMap;
// The canonical order specified in ISA manual.
// Ref: Table 22.1 in RISC-V User-Level ISA V2.2
@@ -583,8 +578,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
EnableExperimentalExtension, ExperimentalExtensionVersionCheck))
return std::move(E);
- // Postpone AddExtension until end of this function
- SeenExtMap[StringRef(&Baseline, 1).str()] = {Major, Minor};
+ ISAInfo->Exts[std::string(1, Baseline)] = {Major, Minor};
break;
case 'g':
// g expands to extensions in RISCVGImplications.
@@ -597,11 +591,11 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
// No matter which version is given to `g`, we always set imafd to default
// version since the we don't have clear version scheme for that on
// ISA spec.
- for (const auto *Ext : RISCVGImplications) {
+ for (const char *Ext : RISCVGImplications) {
auto Version = findDefaultVersion(Ext);
assert(Version && "Default extension version not found?");
// Postpone AddExtension until end of this function
- SeenExtMap[Ext] = {Version->Major, Version->Minor};
+ ISAInfo->Exts[std::string(Ext)] = {Version->Major, Version->Minor};
}
break;
}
@@ -662,23 +656,19 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
if (Name.size() == 1)
Ext = Ext.substr(ConsumeLength);
- // Check if duplicated extension.
- if (SeenExtMap.contains(Name.str()))
+ if (!RISCVISAInfo::isSupportedExtension(Name))
+ return getErrorForInvalidExt(Name);
+
+ // Insert and error for duplicates.
+ if (!ISAInfo->Exts
+ .emplace(Name.str(),
+ RISCVISAUtils::ExtensionVersion{Major, Minor})
+ .second)
return getError("duplicated " + Desc + " '" + Name + "'");
- SeenExtMap[Name.str()] = {Major, Minor};
} while (!Ext.empty());
}
- // Check all Extensions are supported.
- for (auto &SeenExtAndVers : SeenExtMap) {
- const std::string &ExtName = SeenExtAndVers.first;
-
- if (!RISCVISAInfo::isSupportedExtension(ExtName))
- return getErrorForInvalidExt(ExtName);
- ISAInfo->Exts[ExtName] = SeenExtAndVers.second;
- }
-
return RISCVISAInfo::postProcessAndChecking(std::move(ISAInfo));
}
>From edbc0e30a9e587cee1189be023b9385adc2f239a Mon Sep 17 00:00:00 2001
From: srcarroll <50210727+srcarroll at users.noreply.github.com>
Date: Wed, 3 Jul 2024 14:03:54 -0500
Subject: [PATCH 167/246] [mlir][loops] Reland Refactor LoopFuseSiblingOp and
support parallel fusion #94391 (#97607)
The refactor had a bug where the fused loop was inserted in an incorrect
location. This patch fixes the bug and relands the original PR
https://github.com/llvm/llvm-project/pull/94391.
This patch refactors code related to LoopFuseSiblingOp transform in
attempt to reduce duplicate common code. The aim is to refactor as much
as possible to a functions on LoopLikeOpInterfaces, but this is still a
work in progress. A full refactor will require more additions to the
LoopLikeOpInterface.
In addition, scf.parallel fusion support has been added.
---
mlir/include/mlir/Dialect/SCF/IR/SCFOps.td | 3 +-
mlir/include/mlir/Dialect/SCF/Utils/Utils.h | 20 ++
.../mlir/Interfaces/LoopLikeInterface.h | 20 ++
mlir/lib/Dialect/SCF/IR/SCF.cpp | 38 +++
.../SCF/TransformOps/SCFTransformOps.cpp | 140 ++-------
.../SCF/Transforms/ParallelLoopFusion.cpp | 80 +----
mlir/lib/Dialect/SCF/Utils/Utils.cpp | 279 +++++++++++------
mlir/lib/Interfaces/LoopLikeInterface.cpp | 59 ++++
.../SCF/transform-loop-fuse-sibling.mlir | 290 +++++++++++++++++-
9 files changed, 646 insertions(+), 283 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
index f35ea962bea16..bf95fbe6721cf 100644
--- a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
+++ b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
@@ -303,7 +303,8 @@ def ForallOp : SCF_Op<"forall", [
DeclareOpInterfaceMethods<LoopLikeOpInterface,
["getInitsMutable", "getRegionIterArgs", "getLoopInductionVars",
"getLoopLowerBounds", "getLoopUpperBounds", "getLoopSteps",
- "promoteIfSingleIteration", "yieldTiledValuesAndReplace"]>,
+ "replaceWithAdditionalYields", "promoteIfSingleIteration",
+ "yieldTiledValuesAndReplace"]>,
RecursiveMemoryEffects,
SingleBlockImplicitTerminator<"scf::InParallelOp">,
DeclareOpInterfaceMethods<RegionBranchOpInterface>,
diff --git a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
index de807c3e4e1f8..6a40304e2eeba 100644
--- a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
@@ -181,6 +181,16 @@ Loops tilePerfectlyNested(scf::ForOp rootForOp, ArrayRef<Value> sizes);
void getPerfectlyNestedLoops(SmallVectorImpl<scf::ForOp> &nestedLoops,
scf::ForOp root);
+//===----------------------------------------------------------------------===//
+// Fusion related helpers
+//===----------------------------------------------------------------------===//
+
+/// Check structural compatibility between two loops such as iteration space
+/// and dominance.
+bool checkFusionStructuralLegality(LoopLikeOpInterface target,
+ LoopLikeOpInterface source,
+ Diagnostic &diag);
+
/// Given two scf.forall loops, `target` and `source`, fuses `target` into
/// `source`. Assumes that the given loops are siblings and are independent of
/// each other.
@@ -202,6 +212,16 @@ scf::ForallOp fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForOp fuseIndependentSiblingForLoops(scf::ForOp target, scf::ForOp source,
RewriterBase &rewriter);
+/// Given two scf.parallel loops, `target` and `source`, fuses `target` into
+/// `source`. Assumes that the given loops are siblings and are independent of
+/// each other.
+///
+/// This function does not perform any legality checks and simply fuses the
+/// loops. The caller is responsible for ensuring that the loops are legal to
+/// fuse.
+scf::ParallelOp fuseIndependentSiblingParallelLoops(scf::ParallelOp target,
+ scf::ParallelOp source,
+ RewriterBase &rewriter);
} // namespace mlir
#endif // MLIR_DIALECT_SCF_UTILS_UTILS_H_
diff --git a/mlir/include/mlir/Interfaces/LoopLikeInterface.h b/mlir/include/mlir/Interfaces/LoopLikeInterface.h
index 9925fc6ce6ca9..d08e097a9b4af 100644
--- a/mlir/include/mlir/Interfaces/LoopLikeInterface.h
+++ b/mlir/include/mlir/Interfaces/LoopLikeInterface.h
@@ -90,4 +90,24 @@ struct JamBlockGatherer {
/// Include the generated interface declarations.
#include "mlir/Interfaces/LoopLikeInterface.h.inc"
+namespace mlir {
+/// A function that rewrites `target`'s terminator as a teminator obtained by
+/// fusing `source` into `target`.
+using FuseTerminatorFn =
+ function_ref<void(RewriterBase &rewriter, LoopLikeOpInterface source,
+ LoopLikeOpInterface &target, IRMapping mapping)>;
+
+/// Returns a fused `LoopLikeOpInterface` created by fusing `source` to
+/// `target`. The `NewYieldValuesFn` callback is used to pass to the
+/// `replaceWithAdditionalYields` interface method to replace the loop with a
+/// new loop with (possibly) additional yields, while the `FuseTerminatorFn`
+/// callback is repsonsible for updating the fused loop terminator.
+LoopLikeOpInterface createFused(LoopLikeOpInterface target,
+ LoopLikeOpInterface source,
+ RewriterBase &rewriter,
+ NewYieldValuesFn newYieldValuesFn,
+ FuseTerminatorFn fuseTerminatorFn);
+
+} // namespace mlir
+
#endif // MLIR_INTERFACES_LOOPLIKEINTERFACE_H_
diff --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp
index 907d7f794593d..cb15e0ecebf05 100644
--- a/mlir/lib/Dialect/SCF/IR/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp
@@ -618,6 +618,44 @@ void ForOp::getSuccessorRegions(RegionBranchPoint point,
SmallVector<Region *> ForallOp::getLoopRegions() { return {&getRegion()}; }
+FailureOr<LoopLikeOpInterface> ForallOp::replaceWithAdditionalYields(
+ RewriterBase &rewriter, ValueRange newInitOperands,
+ bool replaceInitOperandUsesInLoop,
+ const NewYieldValuesFn &newYieldValuesFn) {
+ // Create a new loop before the existing one, with the extra operands.
+ OpBuilder::InsertionGuard g(rewriter);
+ rewriter.setInsertionPoint(getOperation());
+ SmallVector<Value> inits(getOutputs());
+ llvm::append_range(inits, newInitOperands);
+ scf::ForallOp newLoop = rewriter.create<scf::ForallOp>(
+ getLoc(), getMixedLowerBound(), getMixedUpperBound(), getMixedStep(),
+ inits, getMapping(),
+ /*bodyBuilderFn =*/[](OpBuilder &, Location, ValueRange) {});
+
+ // Move the loop body to the new op.
+ rewriter.mergeBlocks(getBody(), newLoop.getBody(),
+ newLoop.getBody()->getArguments().take_front(
+ getBody()->getNumArguments()));
+
+ if (replaceInitOperandUsesInLoop) {
+ // Replace all uses of `newInitOperands` with the corresponding basic block
+ // arguments.
+ for (auto &&[newOperand, oldOperand] :
+ llvm::zip(newInitOperands, newLoop.getBody()->getArguments().take_back(
+ newInitOperands.size()))) {
+ rewriter.replaceUsesWithIf(newOperand, oldOperand, [&](OpOperand &use) {
+ Operation *user = use.getOwner();
+ return newLoop->isProperAncestor(user);
+ });
+ }
+ }
+
+ // Replace the old loop.
+ rewriter.replaceOp(getOperation(),
+ newLoop->getResults().take_front(getNumResults()));
+ return cast<LoopLikeOpInterface>(newLoop.getOperation());
+}
+
/// Promotes the loop body of a forallOp to its containing block if it can be
/// determined that the loop has a single iteration.
LogicalResult scf::ForallOp::promoteIfSingleIteration(RewriterBase &rewriter) {
diff --git a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
index 56ff2709a589e..41834fea3bb84 100644
--- a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
+++ b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
@@ -261,8 +261,10 @@ loopScheduling(scf::ForOp forOp,
return 1;
};
- std::optional<int64_t> ubConstant = getConstantIntValue(forOp.getUpperBound());
- std::optional<int64_t> lbConstant = getConstantIntValue(forOp.getLowerBound());
+ std::optional<int64_t> ubConstant =
+ getConstantIntValue(forOp.getUpperBound());
+ std::optional<int64_t> lbConstant =
+ getConstantIntValue(forOp.getLowerBound());
DenseMap<Operation *, unsigned> opCycles;
std::map<unsigned, std::vector<Operation *>> wrappedSchedule;
for (Operation &op : forOp.getBody()->getOperations()) {
@@ -447,113 +449,6 @@ void transform::TakeAssumedBranchOp::getEffects(
// LoopFuseSiblingOp
//===----------------------------------------------------------------------===//
-/// Check if `target` and `source` are siblings, in the context that `target`
-/// is being fused into `source`.
-///
-/// This is a simple check that just checks if both operations are in the same
-/// block and some checks to ensure that the fused IR does not violate
-/// dominance.
-static DiagnosedSilenceableFailure isOpSibling(Operation *target,
- Operation *source) {
- // Check if both operations are same.
- if (target == source)
- return emitSilenceableFailure(source)
- << "target and source need to be different loops";
-
- // Check if both operations are in the same block.
- if (target->getBlock() != source->getBlock())
- return emitSilenceableFailure(source)
- << "target and source are not in the same block";
-
- // Check if fusion will violate dominance.
- DominanceInfo domInfo(source);
- if (target->isBeforeInBlock(source)) {
- // Since `target` is before `source`, all users of results of `target`
- // need to be dominated by `source`.
- for (Operation *user : target->getUsers()) {
- if (!domInfo.properlyDominates(source, user, /*enclosingOpOk=*/false)) {
- return emitSilenceableFailure(target)
- << "user of results of target should be properly dominated by "
- "source";
- }
- }
- } else {
- // Since `target` is after `source`, all values used by `target` need
- // to dominate `source`.
-
- // Check if operands of `target` are dominated by `source`.
- for (Value operand : target->getOperands()) {
- Operation *operandOp = operand.getDefiningOp();
- // Operands without defining operations are block arguments. When `target`
- // and `source` occur in the same block, these operands dominate `source`.
- if (!operandOp)
- continue;
-
- // Operand's defining operation should properly dominate `source`.
- if (!domInfo.properlyDominates(operandOp, source,
- /*enclosingOpOk=*/false))
- return emitSilenceableFailure(target)
- << "operands of target should be properly dominated by source";
- }
-
- // Check if values used by `target` are dominated by `source`.
- bool failed = false;
- OpOperand *failedValue = nullptr;
- visitUsedValuesDefinedAbove(target->getRegions(), [&](OpOperand *operand) {
- Operation *operandOp = operand->get().getDefiningOp();
- if (operandOp && !domInfo.properlyDominates(operandOp, source,
- /*enclosingOpOk=*/false)) {
- // `operand` is not an argument of an enclosing block and the defining
- // op of `operand` is outside `target` but does not dominate `source`.
- failed = true;
- failedValue = operand;
- }
- });
-
- if (failed)
- return emitSilenceableFailure(failedValue->getOwner())
- << "values used inside regions of target should be properly "
- "dominated by source";
- }
-
- return DiagnosedSilenceableFailure::success();
-}
-
-/// Check if `target` scf.forall can be fused into `source` scf.forall.
-///
-/// This simply checks if both loops have the same bounds, steps and mapping.
-/// No attempt is made at checking that the side effects of `target` and
-/// `source` are independent of each other.
-static bool isForallWithIdenticalConfiguration(Operation *target,
- Operation *source) {
- auto targetOp = dyn_cast<scf::ForallOp>(target);
- auto sourceOp = dyn_cast<scf::ForallOp>(source);
- if (!targetOp || !sourceOp)
- return false;
-
- return targetOp.getMixedLowerBound() == sourceOp.getMixedLowerBound() &&
- targetOp.getMixedUpperBound() == sourceOp.getMixedUpperBound() &&
- targetOp.getMixedStep() == sourceOp.getMixedStep() &&
- targetOp.getMapping() == sourceOp.getMapping();
-}
-
-/// Check if `target` scf.for can be fused into `source` scf.for.
-///
-/// This simply checks if both loops have the same bounds and steps. No attempt
-/// is made at checking that the side effects of `target` and `source` are
-/// independent of each other.
-static bool isForWithIdenticalConfiguration(Operation *target,
- Operation *source) {
- auto targetOp = dyn_cast<scf::ForOp>(target);
- auto sourceOp = dyn_cast<scf::ForOp>(source);
- if (!targetOp || !sourceOp)
- return false;
-
- return targetOp.getLowerBound() == sourceOp.getLowerBound() &&
- targetOp.getUpperBound() == sourceOp.getUpperBound() &&
- targetOp.getStep() == sourceOp.getStep();
-}
-
DiagnosedSilenceableFailure
transform::LoopFuseSiblingOp::apply(transform::TransformRewriter &rewriter,
transform::TransformResults &results,
@@ -569,25 +464,32 @@ transform::LoopFuseSiblingOp::apply(transform::TransformRewriter &rewriter,
<< "source handle (got " << llvm::range_size(sourceOps) << ")";
}
- Operation *target = *targetOps.begin();
- Operation *source = *sourceOps.begin();
+ auto target = dyn_cast<LoopLikeOpInterface>(*targetOps.begin());
+ auto source = dyn_cast<LoopLikeOpInterface>(*sourceOps.begin());
+ if (!target || !source)
+ return emitSilenceableFailure(target->getLoc())
+ << "target or source is not a loop op";
- // Check if the target and source are siblings.
- DiagnosedSilenceableFailure diag = isOpSibling(target, source);
- if (!diag.succeeded())
- return diag;
+ // Check if loops can be fused
+ Diagnostic diag(target.getLoc(), DiagnosticSeverity::Error);
+ if (!mlir::checkFusionStructuralLegality(target, source, diag))
+ return DiagnosedSilenceableFailure::silenceableFailure(std::move(diag));
Operation *fusedLoop;
- /// TODO: Support fusion for loop-like ops besides scf.for and scf.forall.
- if (isForWithIdenticalConfiguration(target, source)) {
+ // TODO: Support fusion for loop-like ops besides scf.for, scf.forall
+ // and scf.parallel.
+ if (isa<scf::ForOp>(target) && isa<scf::ForOp>(source)) {
fusedLoop = fuseIndependentSiblingForLoops(
cast<scf::ForOp>(target), cast<scf::ForOp>(source), rewriter);
- } else if (isForallWithIdenticalConfiguration(target, source)) {
+ } else if (isa<scf::ForallOp>(target) && isa<scf::ForallOp>(source)) {
fusedLoop = fuseIndependentSiblingForallLoops(
cast<scf::ForallOp>(target), cast<scf::ForallOp>(source), rewriter);
+ } else if (isa<scf::ParallelOp>(target) && isa<scf::ParallelOp>(source)) {
+ fusedLoop = fuseIndependentSiblingParallelLoops(
+ cast<scf::ParallelOp>(target), cast<scf::ParallelOp>(source), rewriter);
} else
return emitSilenceableFailure(target->getLoc())
- << "operations cannot be fused";
+ << "unsupported loop type for fusion";
assert(fusedLoop && "failed to fuse operations");
diff --git a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
index 5934d85373b03..b775f988576e3 100644
--- a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
@@ -16,6 +16,7 @@
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/SCF/Transforms/Transforms.h"
+#include "mlir/Dialect/SCF/Utils/Utils.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/OpDefinition.h"
@@ -37,24 +38,6 @@ static bool hasNestedParallelOp(ParallelOp ploop) {
return walkResult.wasInterrupted();
}
-/// Verify equal iteration spaces.
-static bool equalIterationSpaces(ParallelOp firstPloop,
- ParallelOp secondPloop) {
- if (firstPloop.getNumLoops() != secondPloop.getNumLoops())
- return false;
-
- auto matchOperands = [&](const OperandRange &lhs,
- const OperandRange &rhs) -> bool {
- // TODO: Extend this to support aliases and equal constants.
- return std::equal(lhs.begin(), lhs.end(), rhs.begin());
- };
- return matchOperands(firstPloop.getLowerBound(),
- secondPloop.getLowerBound()) &&
- matchOperands(firstPloop.getUpperBound(),
- secondPloop.getUpperBound()) &&
- matchOperands(firstPloop.getStep(), secondPloop.getStep());
-}
-
/// Checks if the parallel loops have mixed access to the same buffers. Returns
/// `true` if the first parallel loop writes to the same indices that the second
/// loop reads.
@@ -153,9 +136,10 @@ verifyDependencies(ParallelOp firstPloop, ParallelOp secondPloop,
static bool isFusionLegal(ParallelOp firstPloop, ParallelOp secondPloop,
const IRMapping &firstToSecondPloopIndices,
llvm::function_ref<bool(Value, Value)> mayAlias) {
+ Diagnostic diag(firstPloop.getLoc(), DiagnosticSeverity::Remark);
return !hasNestedParallelOp(firstPloop) &&
!hasNestedParallelOp(secondPloop) &&
- equalIterationSpaces(firstPloop, secondPloop) &&
+ checkFusionStructuralLegality(firstPloop, secondPloop, diag) &&
succeeded(verifyDependencies(firstPloop, secondPloop,
firstToSecondPloopIndices, mayAlias));
}
@@ -174,61 +158,9 @@ static void fuseIfLegal(ParallelOp firstPloop, ParallelOp &secondPloop,
mayAlias))
return;
- DominanceInfo dom;
- // We are fusing first loop into second, make sure there are no users of the
- // first loop results between loops.
- for (Operation *user : firstPloop->getUsers())
- if (!dom.properlyDominates(secondPloop, user, /*enclosingOpOk*/ false))
- return;
-
- ValueRange inits1 = firstPloop.getInitVals();
- ValueRange inits2 = secondPloop.getInitVals();
-
- SmallVector<Value> newInitVars(inits1.begin(), inits1.end());
- newInitVars.append(inits2.begin(), inits2.end());
-
- IRRewriter b(builder);
- b.setInsertionPoint(secondPloop);
- auto newSecondPloop = b.create<ParallelOp>(
- secondPloop.getLoc(), secondPloop.getLowerBound(),
- secondPloop.getUpperBound(), secondPloop.getStep(), newInitVars);
-
- Block *newBlock = newSecondPloop.getBody();
- auto term1 = cast<ReduceOp>(block1->getTerminator());
- auto term2 = cast<ReduceOp>(block2->getTerminator());
-
- b.inlineBlockBefore(block2, newBlock, newBlock->begin(),
- newBlock->getArguments());
- b.inlineBlockBefore(block1, newBlock, newBlock->begin(),
- newBlock->getArguments());
-
- ValueRange results = newSecondPloop.getResults();
- if (!results.empty()) {
- b.setInsertionPointToEnd(newBlock);
-
- ValueRange reduceArgs1 = term1.getOperands();
- ValueRange reduceArgs2 = term2.getOperands();
- SmallVector<Value> newReduceArgs(reduceArgs1.begin(), reduceArgs1.end());
- newReduceArgs.append(reduceArgs2.begin(), reduceArgs2.end());
-
- auto newReduceOp = b.create<scf::ReduceOp>(term2.getLoc(), newReduceArgs);
-
- for (auto &&[i, reg] : llvm::enumerate(llvm::concat<Region>(
- term1.getReductions(), term2.getReductions()))) {
- Block &oldRedBlock = reg.front();
- Block &newRedBlock = newReduceOp.getReductions()[i].front();
- b.inlineBlockBefore(&oldRedBlock, &newRedBlock, newRedBlock.begin(),
- newRedBlock.getArguments());
- }
-
- firstPloop.replaceAllUsesWith(results.take_front(inits1.size()));
- secondPloop.replaceAllUsesWith(results.take_back(inits2.size()));
- }
- term1->erase();
- term2->erase();
- firstPloop.erase();
- secondPloop.erase();
- secondPloop = newSecondPloop;
+ IRRewriter rewriter(builder);
+ secondPloop = mlir::fuseIndependentSiblingParallelLoops(
+ firstPloop, secondPloop, rewriter);
}
void mlir::scf::naivelyFuseParallelOps(
diff --git a/mlir/lib/Dialect/SCF/Utils/Utils.cpp b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
index c0ee9d2afe91c..abfc9a1b4d444 100644
--- a/mlir/lib/Dialect/SCF/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
@@ -17,6 +17,7 @@
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/IR/BuiltinOps.h"
+#include "mlir/IR/Dominance.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/PatternMatch.h"
@@ -1262,54 +1263,131 @@ TileLoops mlir::extractFixedOuterLoops(scf::ForOp rootForOp,
return tileLoops;
}
+//===----------------------------------------------------------------------===//
+// Fusion related helpers
+//===----------------------------------------------------------------------===//
+
+/// Check if `target` and `source` are siblings, in the context that `target`
+/// is being fused into `source`.
+///
+/// This is a simple check that just checks if both operations are in the same
+/// block and some checks to ensure that the fused IR does not violate
+/// dominance.
+static bool isOpSibling(Operation *target, Operation *source,
+ Diagnostic &diag) {
+ // Check if both operations are same.
+ if (target == source) {
+ diag << "target and source need to be different loops";
+ return false;
+ }
+
+ // Check if both operations are in the same block.
+ if (target->getBlock() != source->getBlock()) {
+ diag << "target and source are not in the same block";
+ return false;
+ }
+
+ // Check if fusion will violate dominance.
+ DominanceInfo domInfo(source);
+ if (target->isBeforeInBlock(source)) {
+ // Since `target` is before `source`, all users of results of `target`
+ // need to be dominated by `source`.
+ for (Operation *user : target->getUsers()) {
+ if (!domInfo.properlyDominates(source, user, /*enclosingOpOk=*/false)) {
+ diag << "user of results of target should "
+ "be properly dominated by source";
+ return false;
+ }
+ }
+ } else {
+ // Since `target` is after `source`, all values used by `target` need
+ // to dominate `source`.
+
+ // Check if operands of `target` are dominated by `source`.
+ for (Value operand : target->getOperands()) {
+ Operation *operandOp = operand.getDefiningOp();
+ // Operands without defining operations are block arguments. When `target`
+ // and `source` occur in the same block, these operands dominate `source`.
+ if (!operandOp)
+ continue;
+
+ // Operand's defining operation should properly dominate `source`.
+ if (!domInfo.properlyDominates(operandOp, source,
+ /*enclosingOpOk=*/false)) {
+ diag << "operands of target should be properly dominated by source";
+ return false;
+ }
+ }
+
+ // Check if values used by `target` are dominated by `source`.
+ bool failed = false;
+ OpOperand *failedValue = nullptr;
+ visitUsedValuesDefinedAbove(target->getRegions(), [&](OpOperand *operand) {
+ Operation *operandOp = operand->get().getDefiningOp();
+ if (operandOp && !domInfo.properlyDominates(operandOp, source,
+ /*enclosingOpOk=*/false)) {
+ // `operand` is not an argument of an enclosing block and the defining
+ // op of `operand` is outside `target` but does not dominate `source`.
+ failed = true;
+ failedValue = operand;
+ }
+ });
+
+ if (failed) {
+ diag << "values used inside regions of target should be properly "
+ "dominated by source";
+ diag.attachNote(failedValue->getOwner()->getLoc()) << "see operation";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool mlir::checkFusionStructuralLegality(LoopLikeOpInterface target,
+ LoopLikeOpInterface source,
+ Diagnostic &diag) {
+ if (target->getName() != source->getName()) {
+ diag << "target and source must be same loop type";
+ return false;
+ }
+
+ bool iterSpaceEq =
+ target.getLoopLowerBounds() == source.getLoopLowerBounds() &&
+ target.getLoopUpperBounds() == source.getLoopUpperBounds() &&
+ target.getLoopSteps() == source.getLoopSteps();
+ // TODO: Decouple checks on concrete loop types and move this function
+ // somewhere for general utility for `LoopLikeOpInterface`
+ if (auto forAllTarget = dyn_cast<scf::ForallOp>(*target))
+ iterSpaceEq = iterSpaceEq && forAllTarget.getMapping() ==
+ cast<scf::ForallOp>(*source).getMapping();
+ if (!iterSpaceEq) {
+ diag << "target and source iteration spaces must be equal";
+ return false;
+ }
+ return isOpSibling(target, source, diag);
+}
+
scf::ForallOp mlir::fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForallOp source,
RewriterBase &rewriter) {
- unsigned numTargetOuts = target.getNumResults();
- unsigned numSourceOuts = source.getNumResults();
-
- // Create fused shared_outs.
- SmallVector<Value> fusedOuts;
- llvm::append_range(fusedOuts, target.getOutputs());
- llvm::append_range(fusedOuts, source.getOutputs());
-
- // Create a new scf.forall op after the source loop.
- rewriter.setInsertionPointAfter(source);
- scf::ForallOp fusedLoop = rewriter.create<scf::ForallOp>(
- source.getLoc(), source.getMixedLowerBound(), source.getMixedUpperBound(),
- source.getMixedStep(), fusedOuts, source.getMapping());
-
- // Map control operands.
- IRMapping mapping;
- mapping.map(target.getInductionVars(), fusedLoop.getInductionVars());
- mapping.map(source.getInductionVars(), fusedLoop.getInductionVars());
-
- // Map shared outs.
- mapping.map(target.getRegionIterArgs(),
- fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
- mapping.map(source.getRegionIterArgs(),
- fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
-
- // Append everything except the terminator into the fused operation.
- rewriter.setInsertionPointToStart(fusedLoop.getBody());
- for (Operation &op : target.getBody()->without_terminator())
- rewriter.clone(op, mapping);
- for (Operation &op : source.getBody()->without_terminator())
- rewriter.clone(op, mapping);
-
- // Fuse the old terminator in_parallel ops into the new one.
- scf::InParallelOp targetTerm = target.getTerminator();
- scf::InParallelOp sourceTerm = source.getTerminator();
- scf::InParallelOp fusedTerm = fusedLoop.getTerminator();
- rewriter.setInsertionPointToStart(fusedTerm.getBody());
- for (Operation &op : targetTerm.getYieldingOps())
- rewriter.clone(op, mapping);
- for (Operation &op : sourceTerm.getYieldingOps())
- rewriter.clone(op, mapping);
-
- // Replace old loops by substituting their uses by results of the fused loop.
- rewriter.replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
- rewriter.replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
+ scf::ForallOp fusedLoop = cast<scf::ForallOp>(createFused(
+ target, source, rewriter,
+ [&](OpBuilder &b, Location loc, ArrayRef<BlockArgument> newBBArgs) {
+ // `ForallOp` does not have yields, rather an `InParallelOp` terminator.
+ return ValueRange{};
+ },
+ [&](RewriterBase &b, LoopLikeOpInterface source,
+ LoopLikeOpInterface &target, IRMapping mapping) {
+ auto sourceForall = cast<scf::ForallOp>(source);
+ auto targetForall = cast<scf::ForallOp>(target);
+ scf::InParallelOp fusedTerm = targetForall.getTerminator();
+ b.setInsertionPointToEnd(fusedTerm.getBody());
+ for (Operation &op : sourceForall.getTerminator().getYieldingOps())
+ b.clone(op, mapping);
+ }));
+ rewriter.replaceOp(source,
+ fusedLoop.getResults().take_back(source.getNumResults()));
return fusedLoop;
}
@@ -1317,49 +1395,74 @@ scf::ForallOp mlir::fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForOp mlir::fuseIndependentSiblingForLoops(scf::ForOp target,
scf::ForOp source,
RewriterBase &rewriter) {
- unsigned numTargetOuts = target.getNumResults();
- unsigned numSourceOuts = source.getNumResults();
-
- // Create fused init_args, with target's init_args before source's init_args.
- SmallVector<Value> fusedInitArgs;
- llvm::append_range(fusedInitArgs, target.getInitArgs());
- llvm::append_range(fusedInitArgs, source.getInitArgs());
-
- // Create a new scf.for op after the source loop (with scf.yield terminator
- // (without arguments) only in case its init_args is empty).
- rewriter.setInsertionPointAfter(source);
- scf::ForOp fusedLoop = rewriter.create<scf::ForOp>(
- source.getLoc(), source.getLowerBound(), source.getUpperBound(),
- source.getStep(), fusedInitArgs);
-
- // Map original induction variables and operands to those of the fused loop.
- IRMapping mapping;
- mapping.map(target.getInductionVar(), fusedLoop.getInductionVar());
- mapping.map(target.getRegionIterArgs(),
- fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
- mapping.map(source.getInductionVar(), fusedLoop.getInductionVar());
- mapping.map(source.getRegionIterArgs(),
- fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
-
- // Merge target's body into the new (fused) for loop and then source's body.
- rewriter.setInsertionPointToStart(fusedLoop.getBody());
- for (Operation &op : target.getBody()->without_terminator())
- rewriter.clone(op, mapping);
- for (Operation &op : source.getBody()->without_terminator())
- rewriter.clone(op, mapping);
-
- // Build fused yield results by appropriately mapping original yield operands.
- SmallVector<Value> yieldResults;
- for (Value operand : target.getBody()->getTerminator()->getOperands())
- yieldResults.push_back(mapping.lookupOrDefault(operand));
- for (Value operand : source.getBody()->getTerminator()->getOperands())
- yieldResults.push_back(mapping.lookupOrDefault(operand));
- if (!yieldResults.empty())
- rewriter.create<scf::YieldOp>(source.getLoc(), yieldResults);
-
- // Replace old loops by substituting their uses by results of the fused loop.
- rewriter.replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
- rewriter.replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
+ scf::ForOp fusedLoop = cast<scf::ForOp>(createFused(
+ target, source, rewriter,
+ [&](OpBuilder &b, Location loc, ArrayRef<BlockArgument> newBBArgs) {
+ return source.getYieldedValues();
+ },
+ [&](RewriterBase &b, LoopLikeOpInterface source,
+ LoopLikeOpInterface &target, IRMapping mapping) {
+ auto targetFor = cast<scf::ForOp>(target);
+ auto newTerm = b.clone(*targetFor.getBody()->getTerminator(), mapping);
+ b.replaceOp(targetFor.getBody()->getTerminator(), newTerm);
+ }));
+ rewriter.replaceOp(source,
+ fusedLoop.getResults().take_back(source.getNumResults()));
+ return fusedLoop;
+}
+
+// TODO: Finish refactoring this a la the above, but likely requires additional
+// interface methods.
+scf::ParallelOp mlir::fuseIndependentSiblingParallelLoops(
+ scf::ParallelOp target, scf::ParallelOp source, RewriterBase &rewriter) {
+ OpBuilder::InsertionGuard guard(rewriter);
+ Block *block1 = target.getBody();
+ Block *block2 = source.getBody();
+ auto term1 = cast<scf::ReduceOp>(block1->getTerminator());
+ auto term2 = cast<scf::ReduceOp>(block2->getTerminator());
+
+ ValueRange inits1 = target.getInitVals();
+ ValueRange inits2 = source.getInitVals();
+
+ SmallVector<Value> newInitVars(inits1.begin(), inits1.end());
+ newInitVars.append(inits2.begin(), inits2.end());
+
+ rewriter.setInsertionPoint(source);
+ auto fusedLoop = rewriter.create<scf::ParallelOp>(
+ rewriter.getFusedLoc(target.getLoc(), source.getLoc()),
+ source.getLowerBound(), source.getUpperBound(), source.getStep(),
+ newInitVars);
+ Block *newBlock = fusedLoop.getBody();
+ rewriter.inlineBlockBefore(block2, newBlock, newBlock->begin(),
+ newBlock->getArguments());
+ rewriter.inlineBlockBefore(block1, newBlock, newBlock->begin(),
+ newBlock->getArguments());
+
+ ValueRange results = fusedLoop.getResults();
+ if (!results.empty()) {
+ rewriter.setInsertionPointToEnd(newBlock);
+
+ ValueRange reduceArgs1 = term1.getOperands();
+ ValueRange reduceArgs2 = term2.getOperands();
+ SmallVector<Value> newReduceArgs(reduceArgs1.begin(), reduceArgs1.end());
+ newReduceArgs.append(reduceArgs2.begin(), reduceArgs2.end());
+
+ auto newReduceOp = rewriter.create<scf::ReduceOp>(
+ rewriter.getFusedLoc(term1.getLoc(), term2.getLoc()), newReduceArgs);
+
+ for (auto &&[i, reg] : llvm::enumerate(llvm::concat<Region>(
+ term1.getReductions(), term2.getReductions()))) {
+ Block &oldRedBlock = reg.front();
+ Block &newRedBlock = newReduceOp.getReductions()[i].front();
+ rewriter.inlineBlockBefore(&oldRedBlock, &newRedBlock,
+ newRedBlock.begin(),
+ newRedBlock.getArguments());
+ }
+ }
+ rewriter.replaceOp(target, results.take_front(inits1.size()));
+ rewriter.replaceOp(source, results.take_back(inits2.size()));
+ rewriter.eraseOp(term1);
+ rewriter.eraseOp(term2);
return fusedLoop;
}
diff --git a/mlir/lib/Interfaces/LoopLikeInterface.cpp b/mlir/lib/Interfaces/LoopLikeInterface.cpp
index 1e0e87b64e811..5a119a7cf2659 100644
--- a/mlir/lib/Interfaces/LoopLikeInterface.cpp
+++ b/mlir/lib/Interfaces/LoopLikeInterface.cpp
@@ -8,6 +8,8 @@
#include "mlir/Interfaces/LoopLikeInterface.h"
+#include "mlir/IR/IRMapping.h"
+#include "mlir/IR/PatternMatch.h"
#include "mlir/Interfaces/FunctionInterfaces.h"
#include "llvm/ADT/DenseSet.h"
@@ -113,3 +115,60 @@ LogicalResult detail::verifyLoopLikeOpInterface(Operation *op) {
return success();
}
+
+LoopLikeOpInterface mlir::createFused(LoopLikeOpInterface target,
+ LoopLikeOpInterface source,
+ RewriterBase &rewriter,
+ NewYieldValuesFn newYieldValuesFn,
+ FuseTerminatorFn fuseTerminatorFn) {
+ auto targetIterArgs = target.getRegionIterArgs();
+ std::optional<SmallVector<Value>> targetInductionVar =
+ target.getLoopInductionVars();
+ SmallVector<Value> targetYieldOperands(target.getYieldedValues());
+ auto sourceIterArgs = source.getRegionIterArgs();
+ std::optional<SmallVector<Value>> sourceInductionVar =
+ *source.getLoopInductionVars();
+ SmallVector<Value> sourceYieldOperands(source.getYieldedValues());
+ auto sourceRegion = source.getLoopRegions().front();
+
+ FailureOr<LoopLikeOpInterface> maybeFusedLoop =
+ target.replaceWithAdditionalYields(rewriter, source.getInits(),
+ /*replaceInitOperandUsesInLoop=*/false,
+ newYieldValuesFn);
+ if (failed(maybeFusedLoop))
+ llvm_unreachable("failed to replace loop");
+ LoopLikeOpInterface fusedLoop = *maybeFusedLoop;
+ // Since the target op is rewritten at the original's location, we move it to
+ // the soure op's location.
+ rewriter.moveOpBefore(fusedLoop, source);
+
+ // Map control operands.
+ IRMapping mapping;
+ std::optional<SmallVector<Value>> fusedInductionVar =
+ fusedLoop.getLoopInductionVars();
+ if (fusedInductionVar) {
+ if (!targetInductionVar || !sourceInductionVar)
+ llvm_unreachable(
+ "expected target and source loops to have induction vars");
+ mapping.map(*targetInductionVar, *fusedInductionVar);
+ mapping.map(*sourceInductionVar, *fusedInductionVar);
+ }
+ mapping.map(targetIterArgs,
+ fusedLoop.getRegionIterArgs().take_front(targetIterArgs.size()));
+ mapping.map(targetYieldOperands,
+ fusedLoop.getYieldedValues().take_front(targetIterArgs.size()));
+ mapping.map(sourceIterArgs,
+ fusedLoop.getRegionIterArgs().take_back(sourceIterArgs.size()));
+ mapping.map(sourceYieldOperands,
+ fusedLoop.getYieldedValues().take_back(sourceIterArgs.size()));
+ // Append everything except the terminator into the fused operation.
+ rewriter.setInsertionPoint(
+ fusedLoop.getLoopRegions().front()->front().getTerminator());
+ for (Operation &op : sourceRegion->front().without_terminator())
+ rewriter.clone(op, mapping);
+
+ // TODO: Replace with corresponding interface method if added
+ fuseTerminatorFn(rewriter, source, fusedLoop, mapping);
+
+ return fusedLoop;
+}
diff --git a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
index 54dd2bdf953ca..f8246b74a5744 100644
--- a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
+++ b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
@@ -47,6 +47,169 @@ module attributes {transform.with_named_sequence} {
// -----
+// CHECK-LABEL: func @fuse_two_parallel
+// CHECK-SAME: ([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}) {
+func.func @fuse_two_parallel(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
+// CHECK-DAG: [[C2:%.*]] = arith.constant 2 : index
+// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
+// CHECK-DAG: [[C1:%.*]] = arith.constant 1 : index
+// CHECK-DAG: [[C1FP:%.*]] = arith.constant 1.
+ %c2 = arith.constant 2 : index
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1fp = arith.constant 1.0 : f32
+// CHECK: [[SUM:%.*]] = memref.alloc()
+ %sum = memref.alloc() : memref<2x2xf32>
+// CHECK: scf.parallel ([[I:%.*]], [[J:%.*]]) = ([[C0]], [[C0]])
+// CHECK-SAME: to ([[C2]], [[C2]]) step ([[C1]], [[C1]]) {
+// CHECK: [[B_ELEM:%.*]] = memref.load [[B]]{{\[}}[[I]], [[J]]]
+// CHECK: [[SUM_ELEM:%.*]] = arith.addf [[B_ELEM]], [[C1FP]]
+// CHECK: memref.store [[SUM_ELEM]], [[SUM]]{{\[}}[[I]], [[J]]]
+// CHECK-NOT: scf.parallel
+// CHECK: [[SUM_ELEM_:%.*]] = memref.load [[SUM]]{{\[}}[[I]], [[J]]]
+// CHECK: [[A_ELEM:%.*]] = memref.load [[A]]{{\[}}[[I]], [[J]]]
+// CHECK: [[PRODUCT_ELEM:%.*]] = arith.mulf [[SUM_ELEM_]], [[A_ELEM]]
+// CHECK: memref.store [[PRODUCT_ELEM]], [[B]]{{\[}}[[I]], [[J]]]
+// CHECK: scf.reduce
+// CHECK: }
+ scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
+ %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
+ %sum_elem = arith.addf %B_elem, %c1fp : f32
+ memref.store %sum_elem, %sum[%i, %j] : memref<2x2xf32>
+ scf.reduce
+ }
+ scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
+ %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
+ %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
+ %product_elem = arith.mulf %sum_elem, %A_elem : f32
+ memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
+ scf.reduce
+ }
+// CHECK: memref.dealloc [[SUM]]
+ memref.dealloc %sum : memref<2x2xf32>
+ return
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @fuse_two_parallel_reverse
+// CHECK-SAME: ([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}) {
+func.func @fuse_two_parallel_reverse(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
+// CHECK-DAG: [[C2:%.*]] = arith.constant 2 : index
+// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
+// CHECK-DAG: [[C1:%.*]] = arith.constant 1 : index
+// CHECK-DAG: [[C1FP:%.*]] = arith.constant 1.
+ %c2 = arith.constant 2 : index
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1fp = arith.constant 1.0 : f32
+// CHECK: [[SUM:%.*]] = memref.alloc()
+ %sum = memref.alloc() : memref<2x2xf32>
+// CHECK: scf.parallel ([[I:%.*]], [[J:%.*]]) = ([[C0]], [[C0]])
+// CHECK-SAME: to ([[C2]], [[C2]]) step ([[C1]], [[C1]]) {
+// CHECK: [[SUM_ELEM_:%.*]] = memref.load [[SUM]]{{\[}}[[I]], [[J]]]
+// CHECK: [[A_ELEM:%.*]] = memref.load [[A]]{{\[}}[[I]], [[J]]]
+// CHECK: [[PRODUCT_ELEM:%.*]] = arith.mulf [[SUM_ELEM_]], [[A_ELEM]]
+// CHECK: memref.store [[PRODUCT_ELEM]], [[B]]{{\[}}[[I]], [[J]]]
+// CHECK-NOT: scf.parallel
+// CHECK: [[B_ELEM:%.*]] = memref.load [[B]]{{\[}}[[I]], [[J]]]
+// CHECK: [[SUM_ELEM:%.*]] = arith.addf [[B_ELEM]], [[C1FP]]
+// CHECK: memref.store [[SUM_ELEM]], [[SUM]]{{\[}}[[I]], [[J]]]
+// CHECK: scf.reduce
+// CHECK: }
+ scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
+ %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
+ %sum_elem = arith.addf %B_elem, %c1fp : f32
+ memref.store %sum_elem, %sum[%i, %j] : memref<2x2xf32>
+ scf.reduce
+ }
+ scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
+ %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
+ %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
+ %product_elem = arith.mulf %sum_elem, %A_elem : f32
+ memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
+ scf.reduce
+ }
+// CHECK: memref.dealloc [[SUM]]
+ memref.dealloc %sum : memref<2x2xf32>
+ return
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %parallel#1 into %parallel#0 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @fuse_reductions_two
+// CHECK-SAME: (%[[A:.*]]: memref<2x2xf32>, %[[B:.*]]: memref<2x2xf32>) -> (f32, f32)
+func.func @fuse_reductions_two(%A: memref<2x2xf32>, %B: memref<2x2xf32>) -> (f32, f32) {
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
+// CHECK-DAG: %[[INIT1:.*]] = arith.constant 1.000000e+00 : f32
+// CHECK-DAG: %[[INIT2:.*]] = arith.constant 2.000000e+00 : f32
+// CHECK: %[[RES:.*]]:2 = scf.parallel (%[[I:.*]], %[[J:.*]]) = (%[[C0]], %[[C0]])
+// CHECK-SAME: to (%[[C2]], %[[C2]]) step (%[[C1]], %[[C1]])
+// CHECK-SAME: init (%[[INIT1]], %[[INIT2]]) -> (f32, f32)
+// CHECK: %[[VAL_A:.*]] = memref.load %[[A]][%[[I]], %[[J]]]
+// CHECK: %[[VAL_B:.*]] = memref.load %[[B]][%[[I]], %[[J]]]
+// CHECK: scf.reduce(%[[VAL_A]], %[[VAL_B]] : f32, f32) {
+// CHECK: ^bb0(%[[LHS:.*]]: f32, %[[RHS:.*]]: f32):
+// CHECK: %[[R:.*]] = arith.addf %[[LHS]], %[[RHS]] : f32
+// CHECK: scf.reduce.return %[[R]] : f32
+// CHECK: }
+// CHECK: ^bb0(%[[LHS:.*]]: f32, %[[RHS:.*]]: f32):
+// CHECK: %[[R:.*]] = arith.mulf %[[LHS]], %[[RHS]] : f32
+// CHECK: scf.reduce.return %[[R]] : f32
+// CHECK: }
+// CHECK: return %[[RES]]#0, %[[RES]]#1 : f32, f32
+ %c2 = arith.constant 2 : index
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %init1 = arith.constant 1.0 : f32
+ %init2 = arith.constant 2.0 : f32
+ %res1 = scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) init(%init1) -> f32 {
+ %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
+ scf.reduce(%A_elem : f32) {
+ ^bb0(%lhs: f32, %rhs: f32):
+ %1 = arith.addf %lhs, %rhs : f32
+ scf.reduce.return %1 : f32
+ }
+ }
+ %res2 = scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) init(%init2) -> f32 {
+ %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
+ scf.reduce(%B_elem : f32) {
+ ^bb0(%lhs: f32, %rhs: f32):
+ %1 = arith.mulf %lhs, %rhs : f32
+ scf.reduce.return %1 : f32
+ }
+ }
+ return %res1, %res2 : f32, f32
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
// CHECK: func.func @fuse_2nd_for_into_1st([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
func.func @fuse_2nd_for_into_1st(%A: tensor<128xf32>, %B: tensor<128xf32>) -> (tensor<128xf32>, tensor<128xf32>) {
// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
@@ -208,6 +371,62 @@ module attributes {transform.with_named_sequence} {
}
}
+
+// -----
+
+// CHECK: #[[$MAP:.+]] = affine_map<(d0) -> (d0 * 32)
+#map = affine_map<(d0) -> (d0 * 32)>
+#map1 = affine_map<(d0, d1) -> (d0, d1)>
+module {
+ // CHECK: func.func @loop_sibling_fusion(%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}
+ func.func @loop_sibling_fusion(%arg0: tensor<128xf32>, %arg1: tensor<128x128xf16>, %arg2: tensor<128x64xf32>, %arg3: tensor<128x128xf32>) -> (tensor<128xf32>, tensor<128x128xf16>) {
+ // CHECK: %[[EMPTY:.*]] = tensor.empty() : tensor<128x128xf16>
+ // CHECK-NEXT: %[[RESULTS:.*]]:2 = scf.forall (%[[I:.*]]) in (4) shared_outs(%[[S1:.*]] = %[[ARG0]], %[[S2:.*]] = %[[ARG1]]) -> (tensor<128xf32>, tensor<128x128xf16>) {
+ // CHECK-NEXT: %[[IDX:.*]] = affine.apply #[[$MAP]](%[[I]])
+ // CHECK-NEXT: %[[SLICE0:.*]] = tensor.extract_slice %[[ARG3]][%[[IDX]], 0] [32, 1] [1, 1] : tensor<128x128xf32> to tensor<32xf32>
+ // CHECK-NEXT: %[[SLICE1:.*]] = tensor.extract_slice %[[ARG3]][%[[IDX]], 0] [32, 128] [1, 1] : tensor<128x128xf32> to tensor<32x128xf32>
+ // CHECK-NEXT: %[[SLICE2:.*]] = tensor.extract_slice %[[EMPTY]][%[[IDX]], 0] [32, 128] [1, 1] : tensor<128x128xf16> to tensor<32x128xf16>
+ // CHECK-NEXT: %[[GENERIC:.*]] = linalg.generic {{.*}} ins(%[[SLICE1]] : {{.*}}) outs(%[[SLICE2]] : {{.*}})
+ // CHECK: scf.forall.in_parallel {
+ // CHECK-NEXT: tensor.parallel_insert_slice %[[SLICE0]] into %[[S1]][%[[IDX]]] [32] [1] : tensor<32xf32> into tensor<128xf32>
+ // CHECK-NEXT: tensor.parallel_insert_slice %[[GENERIC]] into %[[S2]][%[[IDX]], 0] [32, 128] [1, 1] : tensor<32x128xf16> into tensor<128x128xf16>
+ // CHECK-NEXT: }
+ // CHECK-NEXT: } {mapping = [#gpu.warp<linear_dim_0>]}
+ // CHECK-NEXT: return %[[RESULTS]]#0, %[[RESULTS]]#1
+ %0 = scf.forall (%arg4) in (4) shared_outs(%arg5 = %arg0) -> (tensor<128xf32>) {
+ %3 = affine.apply #map(%arg4)
+ %extracted_slice = tensor.extract_slice %arg3[%3, 0] [32, 1] [1, 1] : tensor<128x128xf32> to tensor<32xf32>
+ scf.forall.in_parallel {
+ tensor.parallel_insert_slice %extracted_slice into %arg5[%3] [32] [1] : tensor<32xf32> into tensor<128xf32>
+ }
+ } {mapping = [#gpu.warp<linear_dim_0>]}
+ %1 = tensor.empty() : tensor<128x128xf16>
+ %2 = scf.forall (%arg4) in (4) shared_outs(%arg5 = %arg1) -> (tensor<128x128xf16>) {
+ %3 = affine.apply #map(%arg4)
+ %extracted_slice = tensor.extract_slice %arg3[%3, 0] [32, 128] [1, 1] : tensor<128x128xf32> to tensor<32x128xf32>
+ %extracted_slice_0 = tensor.extract_slice %1[%3, 0] [32, 128] [1, 1] : tensor<128x128xf16> to tensor<32x128xf16>
+ %4 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%extracted_slice : tensor<32x128xf32>) outs(%extracted_slice_0 : tensor<32x128xf16>) {
+ ^bb0(%in: f32, %out: f16):
+ %5 = arith.truncf %in : f32 to f16
+ linalg.yield %5 : f16
+ } -> tensor<32x128xf16>
+ scf.forall.in_parallel {
+ tensor.parallel_insert_slice %4 into %arg5[%3, 0] [32, 128] [1, 1] : tensor<32x128xf16> into tensor<128x128xf16>
+ }
+ } {mapping = [#gpu.warp<linear_dim_0>]}
+ return %0, %2 : tensor<128xf32>, tensor<128x128xf16>
+ }
+}
+
+module attributes { transform.with_named_sequence } {
+ transform.named_sequence @__transform_main(%root: !transform.any_op) {
+ %loops = transform.structured.match ops{["scf.forall"]} in %root : (!transform.any_op) -> !transform.any_op
+ %loop1, %loop2 = transform.split_handle %loops : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %loop3 = transform.loop.fuse_sibling %loop1 into %loop2 : (!transform.any_op, !transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
// -----
func.func @source_for_uses_result_of_target_for_err(%A: tensor<128xf32>, %B: tensor<128xf32>) -> (tensor<128xf32>, tensor<128xf32>) {
@@ -282,8 +501,9 @@ func.func @target_for_region_uses_result_of_source_for_err(%A: tensor<128xf32>,
%6 = vector.transfer_write %5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
scf.yield %6 : tensor<128xf32>
}
- %dup1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %B) -> (tensor<128xf32>) {
// expected-error @below {{values used inside regions of target should be properly dominated by source}}
+ %dup1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %B) -> (tensor<128xf32>) {
+ // expected-note @below {{see operation}}
%dup2 = vector.transfer_read %1[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
%dup3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
%dup5 = arith.addf %dup3, %dup2 : vector<16xf32>
@@ -328,6 +548,74 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
+
+// -----
+
+func.func @non_matching_iteration_spaces_err(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
+ %c2 = arith.constant 2 : index
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1fp = arith.constant 1.0 : f32
+ %sum = memref.alloc() : memref<2x2xf32>
+ // expected-error @below {{target and source iteration spaces must be equal}}
+ scf.parallel (%i) = (%c0) to (%c2) step (%c1) {
+ %B_elem = memref.load %B[%i, %c0] : memref<2x2xf32>
+ %sum_elem = arith.addf %B_elem, %c1fp : f32
+ memref.store %sum_elem, %sum[%i, %c0] : memref<2x2xf32>
+ scf.reduce
+ }
+ scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
+ %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
+ %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
+ %product_elem = arith.mulf %sum_elem, %A_elem : f32
+ memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
+ scf.reduce
+ }
+ memref.dealloc %sum : memref<2x2xf32>
+ return
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @non_matching_loop_types_err(%A: memref<2xf32>, %B: memref<2xf32>) {
+ %c2 = arith.constant 2 : index
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1fp = arith.constant 1.0 : f32
+ %sum = memref.alloc() : memref<2xf32>
+ // expected-error @below {{target and source must be same loop type}}
+ scf.for %i = %c0 to %c2 step %c1 {
+ %B_elem = memref.load %B[%i] : memref<2xf32>
+ %sum_elem = arith.addf %B_elem, %c1fp : f32
+ memref.store %sum_elem, %sum[%i] : memref<2xf32>
+ }
+ scf.parallel (%i) = (%c0) to (%c2) step (%c1) {
+ %sum_elem = memref.load %sum[%i] : memref<2xf32>
+ %A_elem = memref.load %A[%i] : memref<2xf32>
+ %product_elem = arith.mulf %sum_elem, %A_elem : f32
+ memref.store %product_elem, %B[%i] : memref<2xf32>
+ scf.reduce
+ }
+ memref.dealloc %sum : memref<2xf32>
+ return
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.for"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %fused = transform.loop.fuse_sibling %0 into %1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
// -----
// CHECK: func.func @foreach_loop_pair_fuse([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
>From d5f5dc9dcca427a290a9f454046113afdb6a68ff Mon Sep 17 00:00:00 2001
From: vporpo <vporpodas at google.com>
Date: Wed, 3 Jul 2024 12:04:16 -0700
Subject: [PATCH 168/246] [SandboxIR] More boilerplate: Function, Argument,
Constant, Instruction, OpaqueInst (#97343)
A very basic implementation of sandboxir::
`Fuction`
`Argument`
`Constant`
`Instruction`
`OpaqueInst`
---
llvm/include/llvm/SandboxIR/SandboxIR.h | 150 ++++++++++++++++++
.../llvm/SandboxIR/SandboxIRValues.def | 21 ++-
llvm/lib/SandboxIR/SandboxIR.cpp | 114 +++++++++++++
llvm/unittests/SandboxIR/SandboxIRTest.cpp | 50 ++++++
4 files changed, 331 insertions(+), 4 deletions(-)
diff --git a/llvm/include/llvm/SandboxIR/SandboxIR.h b/llvm/include/llvm/SandboxIR/SandboxIR.h
index ec7ec1a268fa7..ab6273a7ace66 100644
--- a/llvm/include/llvm/SandboxIR/SandboxIR.h
+++ b/llvm/include/llvm/SandboxIR/SandboxIR.h
@@ -58,6 +58,7 @@
#ifndef LLVM_TRANSFORMS_SANDBOXIR_SANDBOXIR_H
#define LLVM_TRANSFORMS_SANDBOXIR_SANDBOXIR_H
+#include "llvm/IR/Function.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/raw_ostream.h"
@@ -129,6 +130,35 @@ class Value {
void dumpCommonPrefix(raw_ostream &OS) const;
void dumpCommonSuffix(raw_ostream &OS) const;
void printAsOperandCommon(raw_ostream &OS) const;
+ friend raw_ostream &operator<<(raw_ostream &OS, const sandboxir::Value &V) {
+ V.dump(OS);
+ return OS;
+ }
+ virtual void dump(raw_ostream &OS) const = 0;
+ LLVM_DUMP_METHOD virtual void dump() const = 0;
+#endif
+};
+
+/// Argument of a sandboxir::Function.
+class Argument : public sandboxir::Value {
+public:
+ Argument(llvm::Argument *Arg, sandboxir::Context &Ctx)
+ : sandboxir::Value(ClassID::Argument, Arg, Ctx) {}
+ static bool classof(const sandboxir::Value *From) {
+ return From->getSubclassID() == ClassID::Argument;
+ }
+#ifndef NDEBUG
+ void verify() const final {
+ assert(isa<llvm::Argument>(Val) && "Expected Argument!");
+ }
+ friend raw_ostream &operator<<(raw_ostream &OS,
+ const sandboxir::Argument &TArg) {
+ TArg.dump(OS);
+ return OS;
+ }
+ void printAsOperand(raw_ostream &OS) const;
+ void dump(raw_ostream &OS) const final;
+ LLVM_DUMP_METHOD void dump() const final;
#endif
};
@@ -142,16 +172,136 @@ class User : public Value {
assert(isa<llvm::User>(Val) && "Expected User!");
}
void dumpCommonHeader(raw_ostream &OS) const final;
+ void dump(raw_ostream &OS) const override {
+ // TODO: Remove this tmp implementation once we get the Instruction classes.
+ }
+ LLVM_DUMP_METHOD void dump() const override {
+ // TODO: Remove this tmp implementation once we get the Instruction classes.
+ }
+#endif
+};
+
+class Constant : public sandboxir::User {
+public:
+ Constant(llvm::Constant *C, sandboxir::Context &SBCtx)
+ : sandboxir::User(ClassID::Constant, C, SBCtx) {}
+ /// For isa/dyn_cast.
+ static bool classof(const sandboxir::Value *From) {
+ return From->getSubclassID() == ClassID::Constant ||
+ From->getSubclassID() == ClassID::Function;
+ }
+ sandboxir::Context &getParent() const { return getContext(); }
+#ifndef NDEBUG
+ void verify() const final {
+ assert(isa<llvm::Constant>(Val) && "Expected Constant!");
+ }
+ friend raw_ostream &operator<<(raw_ostream &OS,
+ const sandboxir::Constant &SBC) {
+ SBC.dump(OS);
+ return OS;
+ }
+ void dump(raw_ostream &OS) const override;
+ LLVM_DUMP_METHOD void dump() const override;
+#endif
+};
+
+/// A sandboxir::User with operands and opcode.
+class Instruction : public sandboxir::User {
+public:
+ enum class Opcode {
+#define DEF_VALUE(ID, CLASS)
+#define DEF_USER(ID, CLASS)
+#define OP(OPC) OPC,
+#define DEF_INSTR(ID, OPC, CLASS) OPC
+#include "llvm/SandboxIR/SandboxIRValues.def"
+ };
+
+ Instruction(ClassID ID, Opcode Opc, llvm::Instruction *I,
+ sandboxir::Context &SBCtx)
+ : sandboxir::User(ID, I, SBCtx), Opc(Opc) {}
+
+protected:
+ Opcode Opc;
+
+public:
+ static const char *getOpcodeName(Opcode Opc);
+#ifndef NDEBUG
+ friend raw_ostream &operator<<(raw_ostream &OS, Opcode Opc) {
+ OS << getOpcodeName(Opc);
+ return OS;
+ }
+#endif
+ /// For isa/dyn_cast.
+ static bool classof(const sandboxir::Value *From);
+
+#ifndef NDEBUG
+ friend raw_ostream &operator<<(raw_ostream &OS,
+ const sandboxir::Instruction &SBI) {
+ SBI.dump(OS);
+ return OS;
+ }
+ void dump(raw_ostream &OS) const override;
+ LLVM_DUMP_METHOD void dump() const override;
+#endif
+};
+
+/// An LLLVM Instruction that has no SandboxIR equivalent class gets mapped to
+/// an OpaqueInstr.
+class OpaqueInst : public sandboxir::Instruction {
+public:
+ OpaqueInst(llvm::Instruction *I, sandboxir::Context &Ctx)
+ : sandboxir::Instruction(ClassID::Opaque, Opcode::Opaque, I, Ctx) {}
+ OpaqueInst(ClassID SubclassID, llvm::Instruction *I, sandboxir::Context &Ctx)
+ : sandboxir::Instruction(SubclassID, Opcode::Opaque, I, Ctx) {}
+ static bool classof(const sandboxir::Value *From) {
+ return From->getSubclassID() == ClassID::Opaque;
+ }
+#ifndef NDEBUG
+ void verify() const final {
+ // Nothing to do
+ }
+ friend raw_ostream &operator<<(raw_ostream &OS,
+ const sandboxir::OpaqueInst &OI) {
+ OI.dump(OS);
+ return OS;
+ }
+ void dump(raw_ostream &OS) const override;
+ LLVM_DUMP_METHOD void dump() const override;
#endif
};
class Context {
protected:
LLVMContext &LLVMCtx;
+ /// Maps LLVM Value to the corresponding sandboxir::Value. Owns all
+ /// SandboxIR objects.
+ DenseMap<llvm::Value *, std::unique_ptr<sandboxir::Value>>
+ LLVMValueToValueMap;
public:
Context(LLVMContext &LLVMCtx) : LLVMCtx(LLVMCtx) {}
+ sandboxir::Value *getValue(llvm::Value *V) const;
+};
+
+class Function : public sandboxir::Value {
+public:
+ Function(llvm::Function *F, sandboxir::Context &Ctx)
+ : sandboxir::Value(ClassID::Function, F, Ctx) {}
+ /// For isa/dyn_cast.
+ static bool classof(const sandboxir::Value *From) {
+ return From->getSubclassID() == ClassID::Function;
+ }
+
+#ifndef NDEBUG
+ void verify() const final {
+ assert(isa<llvm::Function>(Val) && "Expected Function!");
+ }
+ void dumpNameAndArgs(raw_ostream &OS) const;
+ void dump(raw_ostream &OS) const final;
+ LLVM_DUMP_METHOD void dump() const final;
+#endif
};
+
} // namespace sandboxir
} // namespace llvm
diff --git a/llvm/include/llvm/SandboxIR/SandboxIRValues.def b/llvm/include/llvm/SandboxIR/SandboxIRValues.def
index d41d8dce56e68..474b151ae03a4 100644
--- a/llvm/include/llvm/SandboxIR/SandboxIRValues.def
+++ b/llvm/include/llvm/SandboxIR/SandboxIRValues.def
@@ -7,7 +7,23 @@
//===----------------------------------------------------------------------===//
// ClassID, Class
-DEF_USER(User, sandboxir::User)
+#ifndef DEF_VALUE
+#define DEF_VALUE(ID, CLASS)
+#endif
+DEF_VALUE(Function, Function)
+DEF_VALUE(Argument, Argument)
+
+#ifndef DEF_USER
+#define DEF_USER(ID, CLASS)
+#endif
+DEF_USER(User, User)
+DEF_USER(Constant, Constant)
+
+#ifndef DEF_INSTR
+#define DEF_INSTR(ID, OPCODE, CLASS)
+#endif
+// ClassID, Opcode(s), Class
+DEF_INSTR(Opaque, OP(Opaque), OpaqueInst)
#ifdef DEF_VALUE
#undef DEF_VALUE
@@ -18,9 +34,6 @@ DEF_USER(User, sandboxir::User)
#ifdef DEF_INSTR
#undef DEF_INSTR
#endif
-#ifdef OPCODES
-#undef OPCODES
-#endif
#ifdef OP
#undef OP
#endif
diff --git a/llvm/lib/SandboxIR/SandboxIR.cpp b/llvm/lib/SandboxIR/SandboxIR.cpp
index 86757029c821c..47e1ae4422c98 100644
--- a/llvm/lib/SandboxIR/SandboxIR.cpp
+++ b/llvm/lib/SandboxIR/SandboxIR.cpp
@@ -58,8 +58,122 @@ void Value::printAsOperandCommon(raw_ostream &OS) const {
OS << "NULL ";
}
+void Argument::printAsOperand(raw_ostream &OS) const {
+ printAsOperandCommon(OS);
+}
+void Argument::dump(raw_ostream &OS) const {
+ dumpCommonPrefix(OS);
+ dumpCommonSuffix(OS);
+}
+void Argument::dump() const {
+ dump(dbgs());
+ dbgs() << "\n";
+}
+#endif // NDEBUG
+
+bool User::classof(const Value *From) {
+ switch (From->getSubclassID()) {
+#define DEF_VALUE(ID, CLASS)
+#define DEF_USER(ID, CLASS) \
+ case ClassID::ID: \
+ return true;
+#define DEF_INSTR(ID, OPC, CLASS) \
+ case ClassID::ID: \
+ return true;
+#include "llvm/SandboxIR/SandboxIRValues.def"
+ default:
+ return false;
+ }
+}
+
+#ifndef NDEBUG
void User::dumpCommonHeader(raw_ostream &OS) const {
Value::dumpCommonHeader(OS);
// TODO: This is incomplete
}
#endif // NDEBUG
+
+const char *Instruction::getOpcodeName(Opcode Opc) {
+ switch (Opc) {
+#define DEF_VALUE(ID, CLASS)
+#define DEF_USER(ID, CLASS)
+#define OP(OPC) \
+ case Opcode::OPC: \
+ return #OPC;
+#define DEF_INSTR(ID, OPC, CLASS) OPC
+#include "llvm/SandboxIR/SandboxIRValues.def"
+ }
+}
+
+bool Instruction::classof(const sandboxir::Value *From) {
+ switch (From->getSubclassID()) {
+#define DEF_INSTR(ID, OPC, CLASS) \
+ case ClassID::ID: \
+ return true;
+#include "llvm/SandboxIR/SandboxIRValues.def"
+ default:
+ return false;
+ }
+}
+
+#ifndef NDEBUG
+void Instruction::dump(raw_ostream &OS) const {
+ OS << "Unimplemented! Please override dump().";
+}
+void Instruction::dump() const {
+ dump(dbgs());
+ dbgs() << "\n";
+}
+
+void OpaqueInst::dump(raw_ostream &OS) const {
+ dumpCommonPrefix(OS);
+ dumpCommonSuffix(OS);
+}
+
+void OpaqueInst::dump() const {
+ dump(dbgs());
+ dbgs() << "\n";
+}
+
+void Constant::dump(raw_ostream &OS) const {
+ dumpCommonPrefix(OS);
+ dumpCommonSuffix(OS);
+}
+
+void Constant::dump() const {
+ dump(dbgs());
+ dbgs() << "\n";
+}
+
+void Function::dumpNameAndArgs(raw_ostream &OS) const {
+ auto *F = cast<llvm::Function>(Val);
+ OS << *getType() << " @" << F->getName() << "(";
+ auto NumArgs = F->arg_size();
+ for (auto [Idx, Arg] : enumerate(F->args())) {
+ auto *SBArg = cast_or_null<Argument>(Ctx.getValue(&Arg));
+ if (SBArg == nullptr)
+ OS << "NULL";
+ else
+ SBArg->printAsOperand(OS);
+ if (Idx + 1 < NumArgs)
+ OS << ", ";
+ }
+ OS << ")";
+}
+void Function::dump(raw_ostream &OS) const {
+ dumpNameAndArgs(OS);
+ OS << " {\n";
+ OS << "}\n";
+}
+void Function::dump() const {
+ dump(dbgs());
+ dbgs() << "\n";
+}
+#endif // NDEBUG
+
+Value *Context::getValue(llvm::Value *V) const {
+ auto It = LLVMValueToValueMap.find(V);
+ if (It != LLVMValueToValueMap.end())
+ return It->second.get();
+ return nullptr;
+}
diff --git a/llvm/unittests/SandboxIR/SandboxIRTest.cpp b/llvm/unittests/SandboxIR/SandboxIRTest.cpp
index 14e6e745f757a..0b0409aa15c4f 100644
--- a/llvm/unittests/SandboxIR/SandboxIRTest.cpp
+++ b/llvm/unittests/SandboxIR/SandboxIRTest.cpp
@@ -40,3 +40,53 @@ define void @foo(i32 %v1) {
sandboxir::Context Ctx(C);
[[maybe_unused]] sandboxir::User U(sandboxir::Value::ClassID::User, Ret, Ctx);
}
+
+TEST_F(SandboxIRTest, FunctionArgumentConstantAndOpaqueInstInstantiation) {
+ parseIR(C, R"IR(
+define void @foo(i32 %v1) {
+ %add = add i32 %v1, 42
+ ret void
+}
+)IR");
+ llvm::Function *LLVMF = &*M->getFunction("foo");
+ llvm::BasicBlock *LLVMBB = &*LLVMF->begin();
+ llvm::Instruction *LLVMAdd = &*LLVMBB->begin();
+ auto *LLVMC = cast<llvm::Constant>(LLVMAdd->getOperand(1));
+ auto *LLVMArg0 = LLVMF->getArg(0);
+
+ sandboxir::Context Ctx(C);
+ sandboxir::Function F(LLVMF, Ctx);
+ sandboxir::Argument Arg0(LLVMArg0, Ctx);
+ sandboxir::Constant Const0(LLVMC, Ctx);
+ sandboxir::OpaqueInst OpaqueI(LLVMAdd, Ctx);
+
+ EXPECT_TRUE(isa<sandboxir::Function>(F));
+ EXPECT_FALSE(isa<sandboxir::Function>(Arg0));
+ EXPECT_FALSE(isa<sandboxir::Function>(Const0));
+ EXPECT_FALSE(isa<sandboxir::Function>(OpaqueI));
+
+ EXPECT_FALSE(isa<sandboxir::Argument>(F));
+ EXPECT_TRUE(isa<sandboxir::Argument>(Arg0));
+ EXPECT_FALSE(isa<sandboxir::Argument>(Const0));
+ EXPECT_FALSE(isa<sandboxir::Argument>(OpaqueI));
+
+ EXPECT_TRUE(isa<sandboxir::Constant>(F));
+ EXPECT_FALSE(isa<sandboxir::Constant>(Arg0));
+ EXPECT_TRUE(isa<sandboxir::Constant>(Const0));
+ EXPECT_FALSE(isa<sandboxir::Constant>(OpaqueI));
+
+ EXPECT_FALSE(isa<sandboxir::OpaqueInst>(F));
+ EXPECT_FALSE(isa<sandboxir::OpaqueInst>(Arg0));
+ EXPECT_FALSE(isa<sandboxir::OpaqueInst>(Const0));
+ EXPECT_TRUE(isa<sandboxir::OpaqueInst>(OpaqueI));
+
+ EXPECT_FALSE(isa<sandboxir::Instruction>(F));
+ EXPECT_FALSE(isa<sandboxir::Instruction>(Arg0));
+ EXPECT_FALSE(isa<sandboxir::Instruction>(Const0));
+ EXPECT_TRUE(isa<sandboxir::Instruction>(OpaqueI));
+
+ EXPECT_FALSE(isa<sandboxir::User>(F));
+ EXPECT_FALSE(isa<sandboxir::User>(Arg0));
+ EXPECT_TRUE(isa<sandboxir::User>(Const0));
+ EXPECT_TRUE(isa<sandboxir::User>(OpaqueI));
+}
>From bbd52dd44ceee80e3b6ba6a9b2bd8ee9a9713833 Mon Sep 17 00:00:00 2001
From: Alexey Bataev <a.bataev at outlook.com>
Date: Wed, 3 Jul 2024 15:11:18 -0400
Subject: [PATCH 169/246] [SLP]Remove operands upon marking instruction for
deletion.
If the instruction is marked for deletion, better to drop all its
operands and mark them for deletion too (if allowed). It allows to have
more vectorizable patterns and generate less useless extractelement
instructions.
Reviewers: RKSimon
Reviewed By: RKSimon
Pull Request: https://github.com/llvm/llvm-project/pull/97409
---
.../Transforms/Vectorize/SLPVectorizer.cpp | 60 ++++++++++++++-----
.../SLPVectorizer/X86/arith-add-ssat.ll | 4 +-
.../SLPVectorizer/X86/arith-add-usat.ll | 2 +-
.../Transforms/SLPVectorizer/X86/arith-add.ll | 4 +-
.../Transforms/SLPVectorizer/X86/arith-fix.ll | 8 +--
.../SLPVectorizer/X86/arith-fshl-rot.ll | 2 +-
.../SLPVectorizer/X86/arith-fshl.ll | 20 +++----
.../SLPVectorizer/X86/arith-fshr-rot.ll | 2 +-
.../SLPVectorizer/X86/arith-fshr.ll | 20 +++----
.../Transforms/SLPVectorizer/X86/arith-mul.ll | 6 +-
.../SLPVectorizer/X86/arith-smax.ll | 2 +-
.../SLPVectorizer/X86/arith-smin.ll | 2 +-
.../SLPVectorizer/X86/arith-sub-ssat.ll | 4 +-
.../SLPVectorizer/X86/arith-sub-usat.ll | 2 +-
.../Transforms/SLPVectorizer/X86/arith-sub.ll | 4 +-
.../SLPVectorizer/X86/arith-umax.ll | 2 +-
.../SLPVectorizer/X86/arith-umin.ll | 2 +-
.../SLPVectorizer/X86/horizontal-list.ll | 11 +---
.../SLPVectorizer/X86/shift-ashr.ll | 2 +-
.../SLPVectorizer/X86/shift-lshr.ll | 2 +-
.../Transforms/SLPVectorizer/X86/shift-shl.ll | 2 +-
21 files changed, 93 insertions(+), 70 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 11f9ad70dc725..bba80c3f675d7 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -1163,6 +1163,12 @@ class BoUpSLP {
return VectorizableTree.front()->Scalars;
}
+ /// Checks if the root graph node can be emitted with narrower bitwidth at
+ /// codegen and returns it signedness, if so.
+ bool isSignedMinBitwidthRootNode() const {
+ return MinBWs.at(VectorizableTree.front().get()).second;
+ }
+
/// Builds external uses of the vectorized scalars, i.e. the list of
/// vectorized scalars to be extracted, their lanes and their scalar users. \p
/// ExternallyUsedValues contains additional list of external uses to handle
@@ -2430,6 +2436,21 @@ class BoUpSLP {
DeletedInstructions.insert(I);
}
+ /// Clear the operands of \p I, marking for deletion trivially dead operands.
+ void clearOperands(Instruction *I, const TreeEntry *Entry = nullptr) {
+ for (unsigned Idx : seq<unsigned>(I->getNumOperands())) {
+ // Ignore pointer operand of stores to keep correct DIAssignID.
+ if (isa<StoreInst>(I) && Idx == 1)
+ continue;
+ Value *Op = I->getOperand(Idx);
+ I->setOperand(Idx, PoisonValue::get(Op->getType()));
+ if (auto *OpI = dyn_cast<Instruction>(Op))
+ if (!isDeleted(OpI) && isInstructionTriviallyDead(OpI, TLI) &&
+ (!Entry || Entry->VectorizedValue != OpI))
+ eraseInstruction(OpI);
+ }
+ }
+
/// Checks if the instruction was already analyzed for being possible
/// reduction root.
bool isAnalyzedReductionRoot(Instruction *I) const {
@@ -3795,7 +3816,7 @@ class BoUpSLP {
/// Performs the "real" scheduling. Done before vectorization is actually
/// performed in a basic block.
- void scheduleBlock(BlockScheduling *BS);
+ void scheduleBlock(BlockScheduling *BS, BoUpSLP &R);
/// List of users to ignore during scheduling and that don't need extracting.
const SmallDenseSet<Value *> *UserIgnoreList = nullptr;
@@ -13524,7 +13545,7 @@ Value *BoUpSLP::vectorizeTree(
Instruction *ReductionRoot) {
// All blocks must be scheduled before any instructions are inserted.
for (auto &BSIter : BlocksSchedules) {
- scheduleBlock(BSIter.second.get());
+ scheduleBlock(BSIter.second.get(), *this);
}
// Clean Entry-to-LastInstruction table. It can be affected after scheduling,
// need to rebuild it.
@@ -14064,11 +14085,14 @@ Value *BoUpSLP::vectorizeTree(
}
#endif
LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
- eraseInstruction(cast<Instruction>(Scalar));
+ auto *I = cast<Instruction>(Scalar);
+ // Clear the operands, marking for deletion trivially dead operands.
+ clearOperands(I, Entry);
+ eraseInstruction(I);
// Retain to-be-deleted instructions for some debug-info
// bookkeeping. NOTE: eraseInstruction only marks the instruction for
// deletion - instructions are not deleted until later.
- RemovedInsts.push_back(cast<Instruction>(Scalar));
+ RemovedInsts.push_back(I);
}
}
@@ -14681,6 +14705,8 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
for (; DepDest; DepDest = DepDest->NextLoadStore) {
assert(isInSchedulingRegion(DepDest));
+ if (SLP->isDeleted(DepDest->Inst))
+ continue;
// We have two limits to reduce the complexity:
// 1) AliasedCheckLimit: It's a small limit to reduce calls to
@@ -14750,7 +14776,7 @@ void BoUpSLP::BlockScheduling::resetSchedule() {
ReadyInsts.clear();
}
-void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
+void BoUpSLP::scheduleBlock(BlockScheduling *BS, BoUpSLP &R) {
if (!BS->ScheduleStart)
return;
@@ -14807,6 +14833,8 @@ void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
for (ScheduleData *BundleMember = Picked; BundleMember;
BundleMember = BundleMember->NextInBundle) {
Instruction *PickedInst = BundleMember->Inst;
+ if (R.isDeleted(PickedInst))
+ continue;
if (PickedInst->getNextNonDebugInstruction() != LastScheduledInst)
PickedInst->moveAfter(LastScheduledInst->getPrevNode());
LastScheduledInst = PickedInst;
@@ -17344,14 +17372,11 @@ class HorizontalReduction {
Value *ReducedSubTree =
emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI);
if (ReducedSubTree->getType() != VL.front()->getType()) {
- ReducedSubTree = Builder.CreateIntCast(
- ReducedSubTree, VL.front()->getType(), any_of(VL, [&](Value *R) {
- KnownBits Known = computeKnownBits(
- R, cast<Instruction>(ReductionOps.front().front())
- ->getModule()
- ->getDataLayout());
- return !Known.isNonNegative();
- }));
+ assert(ReducedSubTree->getType() != VL.front()->getType() &&
+ "Expected different reduction type.");
+ ReducedSubTree =
+ Builder.CreateIntCast(ReducedSubTree, VL.front()->getType(),
+ V.isSignedMinBitwidthRootNode());
}
// Improved analysis for add/fadd/xor reductions with same scale factor
@@ -17513,10 +17538,13 @@ class HorizontalReduction {
}
#endif
if (!Ignore->use_empty()) {
- Value *Undef = UndefValue::get(Ignore->getType());
- Ignore->replaceAllUsesWith(Undef);
+ Value *P = PoisonValue::get(Ignore->getType());
+ Ignore->replaceAllUsesWith(P);
}
- V.eraseInstruction(cast<Instruction>(Ignore));
+ auto *I = cast<Instruction>(Ignore);
+ // Clear the operands, marking for deletion trivially dead operands.
+ V.clearOperands(I);
+ V.eraseInstruction(I);
}
}
} else if (!CheckForReusedReductionOps) {
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-add-ssat.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-add-ssat.ll
index 24c5fcb068086..8c4903dbc92bb 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-add-ssat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-add-ssat.ll
@@ -503,10 +503,10 @@ define void @add_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -522,10 +522,10 @@ define void @add_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-add-usat.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-add-usat.ll
index fab022d691c07..cb8d45b1a21a2 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-add-usat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-add-usat.ll
@@ -401,10 +401,10 @@ define void @add_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-add.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-add.ll
index dafed43e6e71c..a7ae2d9e02ff4 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-add.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-add.ll
@@ -439,10 +439,10 @@ define void @add_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = add <16 x i8> [[TMP7]], [[TMP8]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = add <16 x i8> [[TMP10]], [[TMP11]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -458,10 +458,10 @@ define void @add_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = add <16 x i8> [[TMP7]], [[TMP8]]
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = add <16 x i8> [[TMP10]], [[TMP11]]
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fix.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fix.ll
index e4c76daddb02e..d4eafdeb50a47 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fix.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fix.ll
@@ -520,10 +520,10 @@ define void @smul_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.smul.fix.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]], i32 3)
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.smul.fix.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], i32 3)
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -539,10 +539,10 @@ define void @smul_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.smul.fix.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]], i32 3)
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.smul.fix.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], i32 3)
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
@@ -1323,10 +1323,10 @@ define void @umul_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.umul.fix.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]], i32 3)
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.umul.fix.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], i32 3)
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -1342,10 +1342,10 @@ define void @umul_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.umul.fix.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]], i32 3)
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.umul.fix.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], i32 3)
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
index 9b8480cd0088a..16977c025e3ea 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
@@ -480,10 +480,10 @@ define void @fshl_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
index daf28b9a0bb4d..609a9024e5bf7 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
@@ -575,21 +575,21 @@ define void @fshl_v64i8() {
; SSE-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @b8, align 1
; SSE-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @c8, align 1
; SSE-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]])
+; SSE-NEXT: store <16 x i8> [[TMP4]], ptr @d8, align 1
; SSE-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP8:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP5]], <16 x i8> [[TMP6]], <16 x i8> [[TMP7]])
+; SSE-NEXT: store <16 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
-; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP12:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
-; SSE-NEXT: [[TMP14:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP15:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i8> [[TMP13]])
-; SSE-NEXT: [[TMP16:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP14]])
-; SSE-NEXT: store <16 x i8> [[TMP4]], ptr @d8, align 1
-; SSE-NEXT: store <16 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
-; SSE-NEXT: store <16 x i8> [[TMP15]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
+; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; SSE-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP14:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP15:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP16:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP13]], <16 x i8> [[TMP14]], <16 x i8> [[TMP15]])
; SSE-NEXT: store <16 x i8> [[TMP16]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -598,11 +598,11 @@ define void @fshl_v64i8() {
; AVX-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @b8, align 1
; AVX-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr @c8, align 1
; AVX-NEXT: [[TMP4:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[TMP1]], <32 x i8> [[TMP2]], <32 x i8> [[TMP3]])
+; AVX-NEXT: store <32 x i8> [[TMP4]], ptr @d8, align 1
; AVX-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP7:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP8:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[TMP5]], <32 x i8> [[TMP6]], <32 x i8> [[TMP7]])
-; AVX-NEXT: store <32 x i8> [[TMP4]], ptr @d8, align 1
; AVX-NEXT: store <32 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; AVX-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
index f3e73d0e6840e..090a9daa6a113 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
@@ -480,10 +480,10 @@ define void @fshr_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
index fb7532768c4b3..3dc7d164f5bc9 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
@@ -575,21 +575,21 @@ define void @fshr_v64i8() {
; SSE-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @b8, align 1
; SSE-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @c8, align 1
; SSE-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]])
+; SSE-NEXT: store <16 x i8> [[TMP4]], ptr @d8, align 1
; SSE-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP8:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP5]], <16 x i8> [[TMP6]], <16 x i8> [[TMP7]])
+; SSE-NEXT: store <16 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
-; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP12:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
-; SSE-NEXT: [[TMP14:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP15:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i8> [[TMP13]])
-; SSE-NEXT: [[TMP16:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP14]])
-; SSE-NEXT: store <16 x i8> [[TMP4]], ptr @d8, align 1
-; SSE-NEXT: store <16 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
-; SSE-NEXT: store <16 x i8> [[TMP15]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
+; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; SSE-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP14:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP15:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP16:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP13]], <16 x i8> [[TMP14]], <16 x i8> [[TMP15]])
; SSE-NEXT: store <16 x i8> [[TMP16]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -598,11 +598,11 @@ define void @fshr_v64i8() {
; AVX-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @b8, align 1
; AVX-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr @c8, align 1
; AVX-NEXT: [[TMP4:%.*]] = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> [[TMP1]], <32 x i8> [[TMP2]], <32 x i8> [[TMP3]])
+; AVX-NEXT: store <32 x i8> [[TMP4]], ptr @d8, align 1
; AVX-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP7:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP8:%.*]] = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> [[TMP5]], <32 x i8> [[TMP6]], <32 x i8> [[TMP7]])
-; AVX-NEXT: store <32 x i8> [[TMP4]], ptr @d8, align 1
; AVX-NEXT: store <32 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; AVX-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-mul.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-mul.ll
index 94976a8cdee25..51cf32242bfdf 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-mul.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-mul.ll
@@ -528,10 +528,10 @@ define void @mul_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = mul <16 x i8> [[TMP7]], [[TMP8]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = mul <16 x i8> [[TMP10]], [[TMP11]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -547,10 +547,10 @@ define void @mul_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = mul <16 x i8> [[TMP7]], [[TMP8]]
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = mul <16 x i8> [[TMP10]], [[TMP11]]
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
@@ -566,10 +566,10 @@ define void @mul_v64i8() {
; AVX128-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; AVX128-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; AVX128-NEXT: [[TMP9:%.*]] = mul <16 x i8> [[TMP7]], [[TMP8]]
+; AVX128-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; AVX128-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; AVX128-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; AVX128-NEXT: [[TMP12:%.*]] = mul <16 x i8> [[TMP10]], [[TMP11]]
-; AVX128-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; AVX128-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; AVX128-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-smax.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-smax.ll
index c63b672f4187c..dd76992c2570b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-smax.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-smax.ll
@@ -385,10 +385,10 @@ define void @smax_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.smax.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.smax.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-smin.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-smin.ll
index 826f97f2a2d89..678477fa1e397 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-smin.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-smin.ll
@@ -385,10 +385,10 @@ define void @smin_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.smin.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.smin.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-ssat.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-ssat.ll
index afaab8b8ca642..65e2a011cc9a1 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-ssat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-ssat.ll
@@ -503,10 +503,10 @@ define void @sub_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -522,10 +522,10 @@ define void @sub_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-usat.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-usat.ll
index 3510863c88930..18df499c6646e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-usat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-usat.ll
@@ -401,10 +401,10 @@ define void @sub_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub.ll
index be54c1e04ca39..9d34edbb506c0 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub.ll
@@ -439,10 +439,10 @@ define void @sub_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = sub <16 x i8> [[TMP7]], [[TMP8]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = sub <16 x i8> [[TMP10]], [[TMP11]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -458,10 +458,10 @@ define void @sub_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = sub <16 x i8> [[TMP7]], [[TMP8]]
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = sub <16 x i8> [[TMP10]], [[TMP11]]
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-umax.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-umax.ll
index 3a187930055f0..a3f2b97a08a6e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-umax.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-umax.ll
@@ -385,10 +385,10 @@ define void @umax_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.umax.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.umax.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-umin.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-umin.ll
index 15119a9628067..0c7688345ac48 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-umin.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-umin.ll
@@ -385,10 +385,10 @@ define void @umin_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.umin.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.umin.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
index e3dc67558af02..f036801865048 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
@@ -810,14 +810,9 @@ define float @extra_args_same_several_times(ptr nocapture readonly %x, i32 %a, i
; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
; THRESHOLD-NEXT: [[TMP0:%.*]] = load <8 x float>, ptr [[X:%.*]], align 4
; THRESHOLD-NEXT: [[TMP1:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[TMP0]])
-; THRESHOLD-NEXT: [[TMP2:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i32 0
-; THRESHOLD-NEXT: [[TMP3:%.*]] = insertelement <2 x float> [[TMP2]], float [[CONV]], i32 1
-; THRESHOLD-NEXT: [[TMP4:%.*]] = fadd fast <2 x float> [[TMP3]], <float 1.300000e+01, float 2.000000e+00>
-; THRESHOLD-NEXT: [[TMP5:%.*]] = fmul fast <2 x float> [[TMP3]], <float 1.300000e+01, float 2.000000e+00>
-; THRESHOLD-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> [[TMP5]], <2 x i32> <i32 0, i32 3>
-; THRESHOLD-NEXT: [[TMP7:%.*]] = extractelement <2 x float> [[TMP6]], i32 0
-; THRESHOLD-NEXT: [[TMP8:%.*]] = extractelement <2 x float> [[TMP6]], i32 1
-; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[TMP7]], [[TMP8]]
+; THRESHOLD-NEXT: [[OP_RDX:%.*]] = fadd fast float [[TMP1]], 1.300000e+01
+; THRESHOLD-NEXT: [[TMP2:%.*]] = fmul fast float [[CONV]], 2.000000e+00
+; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], [[TMP2]]
; THRESHOLD-NEXT: ret float [[OP_RDX1]]
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/shift-ashr.ll b/llvm/test/Transforms/SLPVectorizer/X86/shift-ashr.ll
index 51798deae694a..88aafb2bf148b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/shift-ashr.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/shift-ashr.ll
@@ -464,10 +464,10 @@ define void @ashr_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = ashr <16 x i8> [[TMP7]], [[TMP8]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = ashr <16 x i8> [[TMP10]], [[TMP11]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/shift-lshr.ll b/llvm/test/Transforms/SLPVectorizer/X86/shift-lshr.ll
index 7583561bbecf9..96977cd4fb7d7 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/shift-lshr.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/shift-lshr.ll
@@ -413,10 +413,10 @@ define void @lshr_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = lshr <16 x i8> [[TMP7]], [[TMP8]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = lshr <16 x i8> [[TMP10]], [[TMP11]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/shift-shl.ll b/llvm/test/Transforms/SLPVectorizer/X86/shift-shl.ll
index 5ec327c131fb7..789316ab33c43 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/shift-shl.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/shift-shl.ll
@@ -461,10 +461,10 @@ define void @shl_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = shl <16 x i8> [[TMP7]], [[TMP8]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = shl <16 x i8> [[TMP10]], [[TMP11]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
>From 7aa906dda56ea6843004f1d52eb13860341ca5e5 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 3 Jul 2024 11:45:46 -0700
Subject: [PATCH 170/246] [RISCV] Merge the Arch and Exts variables in
RISCVISAInfo::parseArchString. NFC
---
llvm/lib/TargetParser/RISCVISAInfo.cpp | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/llvm/lib/TargetParser/RISCVISAInfo.cpp b/llvm/lib/TargetParser/RISCVISAInfo.cpp
index ee8024b3bea36..1d077326e4cf2 100644
--- a/llvm/lib/TargetParser/RISCVISAInfo.cpp
+++ b/llvm/lib/TargetParser/RISCVISAInfo.cpp
@@ -561,7 +561,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
// Ref: Table 22.1 in RISC-V User-Level ISA V2.2
char Baseline = Arch.front();
// Skip the baseline.
- StringRef Exts = Arch.drop_front();
+ Arch = Arch.drop_front();
unsigned Major, Minor, ConsumeLength;
@@ -574,7 +574,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
case 'i':
// Baseline is `i` or `e`
if (auto E = getExtensionVersion(
- StringRef(&Baseline, 1), Exts, Major, Minor, ConsumeLength,
+ StringRef(&Baseline, 1), Arch, Major, Minor, ConsumeLength,
EnableExperimentalExtension, ExperimentalExtensionVersionCheck))
return std::move(E);
@@ -582,7 +582,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
break;
case 'g':
// g expands to extensions in RISCVGImplications.
- if (Arch.size() > 1 && isDigit(Arch[1]))
+ if (!Arch.empty() && isDigit(Arch.front()))
return getError("version not supported for 'g'");
// Versions for g are disallowed, and this was checked for previously.
@@ -602,18 +602,18 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
// Consume the base ISA version number and any '_' between rvxxx and the
// first extension
- Exts = Exts.drop_front(ConsumeLength);
+ Arch = Arch.drop_front(ConsumeLength);
- while (!Exts.empty()) {
- if (Exts.front() == '_') {
- if (Exts.size() == 1 || Exts[1] == '_')
+ while (!Arch.empty()) {
+ if (Arch.front() == '_') {
+ if (Arch.size() == 1 || Arch[1] == '_')
return getError("extension name missing after separator '_'");
- Exts = Exts.drop_front();
+ Arch = Arch.drop_front();
}
- size_t Idx = Exts.find('_');
- StringRef Ext = Exts.slice(0, Idx);
- Exts = Exts.slice(Idx, StringRef::npos);
+ size_t Idx = Arch.find('_');
+ StringRef Ext = Arch.slice(0, Idx);
+ Arch = Arch.slice(Idx, StringRef::npos);
do {
StringRef Name, Vers, Desc;
>From c02e8f762a410e55581866c43636efcd6504c1bd Mon Sep 17 00:00:00 2001
From: Ilia Sergachev <1894984+sergachev at users.noreply.github.com>
Date: Wed, 3 Jul 2024 21:26:21 +0200
Subject: [PATCH 171/246] [llvm][transforms] Add a new algorithm to SplitModule
(#95941)
The new round-robin algorithm overrides the hash-based distribution of
functions to modules. It achieves a more even number of functions per
module when the number of functions is close to the number of requested
modules. It's not in use by default and is available under a new flag.
---
.../llvm/Transforms/Utils/SplitModule.h | 5 +-
llvm/lib/Transforms/Utils/SplitModule.cpp | 62 +++++++++++++++----
.../name-hash-based-distribution.ll | 29 +++++++++
llvm/test/tools/llvm-split/round-robin.ll | 33 ++++++++++
llvm/tools/llvm-split/llvm-split.cpp | 11 +++-
5 files changed, 125 insertions(+), 15 deletions(-)
create mode 100644 llvm/test/tools/llvm-split/name-hash-based-distribution.ll
create mode 100644 llvm/test/tools/llvm-split/round-robin.ll
diff --git a/llvm/include/llvm/Transforms/Utils/SplitModule.h b/llvm/include/llvm/Transforms/Utils/SplitModule.h
index a5450738060a8..e7e8ee6279ace 100644
--- a/llvm/include/llvm/Transforms/Utils/SplitModule.h
+++ b/llvm/include/llvm/Transforms/Utils/SplitModule.h
@@ -24,6 +24,9 @@ class Module;
/// Splits the module M into N linkable partitions. The function ModuleCallback
/// is called N times passing each individual partition as the MPart argument.
+/// PreserveLocals: Split without externalizing locals.
+/// RoundRobin: Use round-robin distribution of functions to modules instead
+/// of the default name-hash-based one.
///
/// FIXME: This function does not deal with the somewhat subtle symbol
/// visibility issues around module splitting, including (but not limited to):
@@ -35,7 +38,7 @@ class Module;
void SplitModule(
Module &M, unsigned N,
function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback,
- bool PreserveLocals = false);
+ bool PreserveLocals = false, bool RoundRobin = false);
} // end namespace llvm
diff --git a/llvm/lib/Transforms/Utils/SplitModule.cpp b/llvm/lib/Transforms/Utils/SplitModule.cpp
index 55db3737a1c09..a30afadf0365d 100644
--- a/llvm/lib/Transforms/Utils/SplitModule.cpp
+++ b/llvm/lib/Transforms/Utils/SplitModule.cpp
@@ -55,6 +55,18 @@ using ClusterMapType = EquivalenceClasses<const GlobalValue *>;
using ComdatMembersType = DenseMap<const Comdat *, const GlobalValue *>;
using ClusterIDMapType = DenseMap<const GlobalValue *, unsigned>;
+bool compareClusters(const std::pair<unsigned, unsigned> &A,
+ const std::pair<unsigned, unsigned> &B) {
+ if (A.second || B.second)
+ return A.second > B.second;
+ return A.first > B.first;
+}
+
+using BalancingQueueType =
+ std::priority_queue<std::pair<unsigned, unsigned>,
+ std::vector<std::pair<unsigned, unsigned>>,
+ decltype(compareClusters) *>;
+
} // end anonymous namespace
static void addNonConstUser(ClusterMapType &GVtoClusterMap,
@@ -154,18 +166,7 @@ static void findPartitions(Module &M, ClusterIDMapType &ClusterIDMap,
// Assigned all GVs to merged clusters while balancing number of objects in
// each.
- auto CompareClusters = [](const std::pair<unsigned, unsigned> &a,
- const std::pair<unsigned, unsigned> &b) {
- if (a.second || b.second)
- return a.second > b.second;
- else
- return a.first > b.first;
- };
-
- std::priority_queue<std::pair<unsigned, unsigned>,
- std::vector<std::pair<unsigned, unsigned>>,
- decltype(CompareClusters)>
- BalancingQueue(CompareClusters);
+ BalancingQueueType BalancingQueue(compareClusters);
// Pre-populate priority queue with N slot blanks.
for (unsigned i = 0; i < N; ++i)
BalancingQueue.push(std::make_pair(i, 0));
@@ -254,7 +255,7 @@ static bool isInPartition(const GlobalValue *GV, unsigned I, unsigned N) {
void llvm::SplitModule(
Module &M, unsigned N,
function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback,
- bool PreserveLocals) {
+ bool PreserveLocals, bool RoundRobin) {
if (!PreserveLocals) {
for (Function &F : M)
externalize(&F);
@@ -271,6 +272,41 @@ void llvm::SplitModule(
ClusterIDMapType ClusterIDMap;
findPartitions(M, ClusterIDMap, N);
+ // Find functions not mapped to modules in ClusterIDMap and count functions
+ // per module. Map unmapped functions using round-robin so that they skip
+ // being distributed by isInPartition() based on function name hashes below.
+ // This provides better uniformity of distribution of functions to modules
+ // in some cases - for example when the number of functions equals to N.
+ if (RoundRobin) {
+ DenseMap<unsigned, unsigned> ModuleFunctionCount;
+ SmallVector<const GlobalValue *> UnmappedFunctions;
+ for (const auto &F : M.functions()) {
+ if (F.isDeclaration() ||
+ F.getLinkage() != GlobalValue::LinkageTypes::ExternalLinkage)
+ continue;
+ auto It = ClusterIDMap.find(&F);
+ if (It == ClusterIDMap.end())
+ UnmappedFunctions.push_back(&F);
+ else
+ ++ModuleFunctionCount[It->second];
+ }
+ BalancingQueueType BalancingQueue(compareClusters);
+ for (unsigned I = 0; I < N; ++I) {
+ if (auto It = ModuleFunctionCount.find(I);
+ It != ModuleFunctionCount.end())
+ BalancingQueue.push(*It);
+ else
+ BalancingQueue.push({I, 0});
+ }
+ for (const auto *const F : UnmappedFunctions) {
+ const unsigned I = BalancingQueue.top().first;
+ const unsigned Count = BalancingQueue.top().second;
+ BalancingQueue.pop();
+ ClusterIDMap.insert({F, I});
+ BalancingQueue.push({I, Count + 1});
+ }
+ }
+
// FIXME: We should be able to reuse M as the last partition instead of
// cloning it. Note that the callers at the moment expect the module to
// be preserved, so will need some adjustments as well.
diff --git a/llvm/test/tools/llvm-split/name-hash-based-distribution.ll b/llvm/test/tools/llvm-split/name-hash-based-distribution.ll
new file mode 100644
index 0000000000000..b0e2d5eba12f9
--- /dev/null
+++ b/llvm/test/tools/llvm-split/name-hash-based-distribution.ll
@@ -0,0 +1,29 @@
+; RUN: llvm-split -o %t %s -j 2
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+
+; CHECK0-NOT: define
+; CHECK0: define void @D
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: define void @A
+; CHECK1: define void @B
+; CHECK1: define void @C
+; CHECK1-NOT: define
+
+define void @A() {
+ ret void
+}
+
+define void @B() {
+ ret void
+}
+
+define void @C() {
+ ret void
+}
+
+define void @D() {
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/round-robin.ll b/llvm/test/tools/llvm-split/round-robin.ll
new file mode 100644
index 0000000000000..385c896888710
--- /dev/null
+++ b/llvm/test/tools/llvm-split/round-robin.ll
@@ -0,0 +1,33 @@
+; RUN: llvm-split -o %t %s -j 2 -round-robin
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+
+; CHECK0-NOT: define
+; CHECK0: declare extern_weak void @e
+; CHECK0: define void @A
+; CHECK0: define void @C
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: declare extern_weak void @e
+; CHECK1: define void @B
+; CHECK1: define void @D
+; CHECK1-NOT: define
+
+declare extern_weak void @e(...)
+
+define void @A() {
+ ret void
+}
+
+define void @B() {
+ ret void
+}
+
+define void @C() {
+ ret void
+}
+
+define void @D() {
+ ret void
+}
diff --git a/llvm/tools/llvm-split/llvm-split.cpp b/llvm/tools/llvm-split/llvm-split.cpp
index 39a89cb1d2e75..c456403e6bc68 100644
--- a/llvm/tools/llvm-split/llvm-split.cpp
+++ b/llvm/tools/llvm-split/llvm-split.cpp
@@ -53,6 +53,12 @@ static cl::opt<bool>
cl::desc("Split without externalizing locals"),
cl::cat(SplitCategory));
+static cl::opt<bool>
+ RoundRobin("round-robin", cl::Prefix, cl::init(false),
+ cl::desc("Use round-robin distribution of functions to "
+ "modules instead of the default name-hash-based one"),
+ cl::cat(SplitCategory));
+
static cl::opt<std::string>
MTriple("mtriple",
cl::desc("Target triple. When present, a TargetMachine is created "
@@ -122,6 +128,9 @@ int main(int argc, char **argv) {
errs() << "warning: -preserve-locals has no effect when using "
"TargetMachine::splitModule\n";
}
+ if (RoundRobin)
+ errs() << "warning: -round-robin has no effect when using "
+ "TargetMachine::splitModule\n";
if (TM->splitModule(*M, NumOutputs, HandleModulePart))
return 0;
@@ -131,6 +140,6 @@ int main(int argc, char **argv) {
"splitModule implementation\n";
}
- SplitModule(*M, NumOutputs, HandleModulePart, PreserveLocals);
+ SplitModule(*M, NumOutputs, HandleModulePart, PreserveLocals, RoundRobin);
return 0;
}
>From 7002ecb4c6dba2050b321699e0e17eb890c3ca2c Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 3 Jul 2024 12:40:12 -0700
Subject: [PATCH 172/246] [msan] Convert vector shadow to scalar before
zext (#96722)
zext does not allow converting vector shadow to scalar, so we must
manually convert it prior to calling zext in materializeOneCheck, for
which the 'ConvertedShadow' parameter isn't actually guaranteed to be
scalar (1). Note that it is safe/no-op to call convertShadowToScalar on
a shadow that is already scalar.
In contrast, the storeOrigin function already converts the (potentially
vector) shadow to scalar; we add a comment to note why it is load
bearing.
(1) In materializeInstructionChecks():
"// Disable combining in some cases. TrackOrigins checks each shadow to
pick
// correct origin.
bool Combine = !MS.TrackOrigins;
...
if (!Combine) {
materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
continue;
}"
---
.../Instrumentation/MemorySanitizer.cpp | 3 ++
.../vector-track-origins-neon.ll | 45 +++++++------------
.../vector-track-origins-struct.ll | 23 ++++------
3 files changed, 29 insertions(+), 42 deletions(-)
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index d0dbb108b1eca..c7d41f6298372 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1283,6 +1283,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
const DataLayout &DL = F.getDataLayout();
const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
TypeSize StoreSize = DL.getTypeStoreSize(Shadow->getType());
+ // ZExt cannot convert between vector and scalar
Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
if (!ClCheckConstantShadow || ConstantShadow->isZeroValue()) {
@@ -1398,6 +1399,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (instrumentWithCalls(ConvertedShadow) &&
SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
+ // ZExt cannot convert between vector and scalar
+ ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
Value *ConvertedShadow2 =
IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
CallBase *CB = IRB.CreateCall(
diff --git a/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-neon.ll b/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-neon.ll
index 0fe842e28ff92..98a9cdc696049 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-neon.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-neon.ll
@@ -1,17 +1,11 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool build-release/bin/opt --version 5
-; RUN: opt < %s -S -passes="msan<eager-checks;track-origins=2>" -msan-instrumentation-with-call-threshold=0 -disable-verify | FileCheck %s
-;
-; UNSUPPORTED: target={{.*}}
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool build/bin/opt --version 5
+; RUN: opt < %s -S -passes="msan<eager-checks;track-origins=2>" -msan-instrumentation-with-call-threshold=0 | FileCheck %s
;
; This test illustrates a bug in MemorySanitizer that will shortly be fixed
; (https://github.com/llvm/llvm-project/pull/96722).
;
; '-msan-instrumentation-with-call-threshold=0' makes it possible to detect the
; bug with a short test case.
-;
-; '-disable-verify' with a release build is needed to avoid a compiler crash
-; (e.g., to autogenerate the assertions).
-;
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
target triple = "aarch64-grtev4-linux-gnu"
@@ -28,36 +22,31 @@ define dso_local void @_Z1cv() local_unnamed_addr #0 {
; CHECK-NEXT: br label %[[FOR_COND:.*]]
; CHECK: [[FOR_COND]]:
; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi <4 x i16> [ [[_MSLD]], %[[ENTRY]] ], [ [[_MSLD3:%.*]], %[[FOR_COND]] ]
-; CHECK-NEXT: [[_MSPHI_O:%.*]] = phi i32 [ [[TMP0]], %[[ENTRY]] ], [ [[TMP15:%.*]], %[[FOR_COND]] ]
+; CHECK-NEXT: [[_MSPHI_O:%.*]] = phi i32 [ [[TMP0]], %[[ENTRY]] ], [ [[TMP11:%.*]], %[[FOR_COND]] ]
; CHECK-NEXT: [[TMP1:%.*]] = phi <4 x i16> [ [[DOTPRE]], %[[ENTRY]] ], [ [[TMP5:%.*]], %[[FOR_COND]] ]
; CHECK-NEXT: [[_MSPHI_S1:%.*]] = phi <4 x i16> [ <i16 -1, i16 -1, i16 -1, i16 -1>, %[[ENTRY]] ], [ [[_MSLD3]], %[[FOR_COND]] ]
-; CHECK-NEXT: [[_MSPHI_O2:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[TMP15]], %[[FOR_COND]] ]
+; CHECK-NEXT: [[_MSPHI_O2:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[TMP11]], %[[FOR_COND]] ]
; CHECK-NEXT: [[E_0:%.*]] = phi <4 x i16> [ undef, %[[ENTRY]] ], [ [[TMP5]], %[[FOR_COND]] ]
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[_MSPHI_S1]], <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-NEXT: [[LANE:%.*]] = shufflevector <4 x i16> [[E_0]], <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
-;
-; Editor's note: the following zext instructions are invalid
-; ('zext source and destination must both be a vector or neither')
-;
-; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[_MSPHI_S]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[_MSPHI_S]] to i64
; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP2]], i32 zeroext [[_MSPHI_O]])
-; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[_MSPROP]] to i64
-;
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[_MSPROP]] to i64
; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP3]], i32 zeroext [[_MSPHI_O2]])
; CHECK-NEXT: [[CALL:%.*]] = tail call noundef i32 @_Z1b11__Int16x4_tS_(<4 x i16> noundef [[TMP1]], <4 x i16> noundef [[LANE]])
; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[CALL]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[CONV]] to ptr
-; CHECK-NEXT: [[TMP5]] = load <4 x i16>, ptr [[TMP8]], align 8, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP8]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[_MSLD3]] = load <4 x i16>, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP15]] = load i32, ptr [[TMP14]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[CONV]] to ptr
+; CHECK-NEXT: [[TMP5]] = load <4 x i16>, ptr [[TMP4]], align 8, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP7]], 35184372088832
+; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT: [[_MSLD3]] = load <4 x i16>, ptr [[TMP8]], align 8
+; CHECK-NEXT: [[TMP11]] = load i32, ptr [[TMP10]], align 8
; CHECK-NEXT: store <4 x i16> [[_MSLD3]], ptr inttoptr (i64 xor (i64 ptrtoint (ptr @_Z1cv to i64), i64 193514046488576) to ptr), align 8
-; CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x i16> [[_MSLD3]] to i64
-; CHECK-NEXT: call void @__msan_maybe_store_origin_8(i64 zeroext [[TMP16]], ptr @_Z1cv, i32 zeroext [[TMP15]])
+; CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x i16> [[_MSLD3]] to i64
+; CHECK-NEXT: call void @__msan_maybe_store_origin_8(i64 zeroext [[TMP12]], ptr @_Z1cv, i32 zeroext [[TMP11]])
; CHECK-NEXT: store <4 x i16> [[TMP5]], ptr @_Z1cv, align 8, !tbaa [[TBAA0]]
; CHECK-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
;
diff --git a/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-struct.ll b/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-struct.ll
index 5eae441f05eae..c6d0f8fcebc03 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-struct.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/vector-track-origins-struct.ll
@@ -1,7 +1,5 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool build-release/bin/opt --version 5
-; RUN: opt < %s -S -passes="msan<eager-checks;track-origins=2>" -msan-instrumentation-with-call-threshold=0 -disable-verify | FileCheck %s
-;
-; UNSUPPORTED: target={{.*}}
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool build/bin/opt --version 5
+; RUN: opt < %s -S -passes="msan<eager-checks;track-origins=2>" -msan-instrumentation-with-call-threshold=0 | FileCheck %s
;
; This test illustrates a bug in MemorySanitizer that will shortly be fixed
; (https://github.com/llvm/llvm-project/pull/96722).
@@ -9,9 +7,6 @@
; '-msan-instrumentation-with-call-threshold=0' makes it possible to detect the
; bug with a short test case.
;
-; '-disable-verify' with a release build is needed to avoid a compiler crash
-; (e.g., to autogenerate the assertions).
-;
; This is based on check-struct.ll.
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -31,13 +26,13 @@ define { i32, i8 } @main() sanitize_memory {
; CHECK-NEXT: [[_MSLD:%.*]] = load { i32, i8 }, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: store { i32, i8 } zeroinitializer, ptr @__msan_retval_tls, align 8
-;
-; Editor's note: the following zext instruction is invalid
-; ('ZExt only operates on integer')
-;
-; CHECK-NEXT: [[TMP7:%.*]] = zext { i32, i8 } [[_MSLD]] to i64
-;
-; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP7]], i32 zeroext [[TMP6]])
+; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i8 } [[_MSLD]], 0
+; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i8 } [[_MSLD]], 1
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
+; CHECK-NEXT: [[TMP11:%.*]] = or i1 [[TMP8]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = zext i1 [[TMP11]] to i64
+; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP12]], i32 zeroext [[TMP6]])
; CHECK-NEXT: ret { i32, i8 } [[O]]
;
%p = inttoptr i64 0 to ptr
>From a1bc606b5fb9a91eb16fc0c012aa785323788c90 Mon Sep 17 00:00:00 2001
From: agozillon <Andrew.Gozillon at amd.com>
Date: Wed, 3 Jul 2024 21:44:50 +0200
Subject: [PATCH 173/246] [Flang][Transform] Modify stack reclaim pass to use
allocation address space when generating intrinsics (#96836)
This PR aims to factor in the allocation address space provided by an
architectures data layout when generating the intrinsic instructions,
this allows them to be lowered later with the address spaces in tow.
This aligns the intrinsic creation with the LLVM IRBuilder's
https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/IR/IRBuilder.h#L1053
This is also necessary for the below example to compile for OpenMP AMD
GPU and not ICE the compiler in ISEL as AMD's stackrestore and stacksave
are expected to have the appropriate allocation address space for AMD
GPU.
program main
integer(4), allocatable :: test
allocate(test)
!$omp target map(tofrom:test)
do i = 1, 10
test = test + 50
end do
!$omp end target
deallocate(test)
end program
The PR also fixes the issue I opened a while ago which hits the same
error when compiling for AMDGPU:
https://github.com/llvm/llvm-project/issues/82368
Although, you have to have the appropriate GPU LIBC and Fortran offload
runtime (both compiled for AMDGPU) added to the linker for the command
or it will reach another ISEL error and ICE weirdly. But with the
pre-requisites it works fine with this PR.
---
flang/lib/Optimizer/Transforms/StackReclaim.cpp | 14 +++++++++++++-
flang/test/Transforms/stack-reclaime.fir | 17 +++++++++++++++++
2 files changed, 30 insertions(+), 1 deletion(-)
diff --git a/flang/lib/Optimizer/Transforms/StackReclaim.cpp b/flang/lib/Optimizer/Transforms/StackReclaim.cpp
index e5e0e4eab8298..8a60a9e64f704 100644
--- a/flang/lib/Optimizer/Transforms/StackReclaim.cpp
+++ b/flang/lib/Optimizer/Transforms/StackReclaim.cpp
@@ -31,11 +31,23 @@ class StackReclaimPass : public fir::impl::StackReclaimBase<StackReclaimPass> {
};
} // namespace
+uint64_t getAllocaAddressSpace(Operation *op) {
+ mlir::ModuleOp module = mlir::dyn_cast_or_null<mlir::ModuleOp>(op);
+ if (!module)
+ module = op->getParentOfType<mlir::ModuleOp>();
+
+ if (mlir::Attribute addrSpace =
+ mlir::DataLayout(module).getAllocaMemorySpace())
+ return llvm::cast<mlir::IntegerAttr>(addrSpace).getUInt();
+ return 0;
+}
+
void StackReclaimPass::runOnOperation() {
auto *op = getOperation();
auto *context = &getContext();
mlir::OpBuilder builder(context);
- mlir::Type voidPtr = mlir::LLVM::LLVMPointerType::get(context);
+ mlir::Type voidPtr =
+ mlir::LLVM::LLVMPointerType::get(context, getAllocaAddressSpace(op));
op->walk([&](fir::DoLoopOp loopOp) {
mlir::Location loc = loopOp.getLoc();
diff --git a/flang/test/Transforms/stack-reclaime.fir b/flang/test/Transforms/stack-reclaime.fir
index b53cc96035751..96416df8b2013 100644
--- a/flang/test/Transforms/stack-reclaime.fir
+++ b/flang/test/Transforms/stack-reclaime.fir
@@ -12,3 +12,20 @@ func.func @alloca_in_loop(%lb : index, %ub : index, %step : index, %b : i1, %add
// CHECK: %[[STACKPTR:.*]] = llvm.intr.stacksave : !llvm.ptr
// CHECK: %{{.*}} = fir.alloca !fir.box<!fir.heap<!fir.char<1,?>>>
// CHECK: llvm.intr.stackrestore %0 : !llvm.ptr
+
+// -----
+
+module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memory_space", 5 : ui64>>} {
+ func.func @stack_restore_save_alloca_address(%lb : index, %ub : index, %step : index, %b : i1, %addr : !fir.ref<index>) {
+ fir.do_loop %iv = %lb to %ub step %step unordered {
+ %0 = fir.alloca !fir.box<!fir.heap<!fir.char<1,?>>>
+ }
+ return
+ }
+}
+
+// CHECK-LABEL: func.func @stack_restore_save_alloca_address
+// CHECK: fir.do_loop
+// CHECK: %[[STACKPTR:.*]] = llvm.intr.stacksave : !llvm.ptr<5>
+// CHECK: %{{.*}} = fir.alloca !fir.box<!fir.heap<!fir.char<1,?>>>
+// CHECK: llvm.intr.stackrestore %0 : !llvm.ptr<5>
>From a379b2260fc3bada0c11a6a1cd7891a1a6e1fb99 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Wed, 3 Jul 2024 12:53:06 -0700
Subject: [PATCH 174/246] [IR] Use range-based for loops (NFC) (#97575)
---
llvm/lib/IR/DIBuilder.cpp | 6 +++---
llvm/lib/IR/LegacyPassManager.cpp | 5 ++---
2 files changed, 5 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/IR/DIBuilder.cpp b/llvm/lib/IR/DIBuilder.cpp
index f39149ae0dad4..30b79b6d6f60f 100644
--- a/llvm/lib/IR/DIBuilder.cpp
+++ b/llvm/lib/IR/DIBuilder.cpp
@@ -79,9 +79,9 @@ void DIBuilder::finalize() {
// list. Use a set to remove the duplicates while we transform the
// TrackingVHs back into Values.
SmallPtrSet<Metadata *, 16> RetainSet;
- for (unsigned I = 0, E = AllRetainTypes.size(); I < E; I++)
- if (RetainSet.insert(AllRetainTypes[I]).second)
- RetainValues.push_back(AllRetainTypes[I]);
+ for (const TrackingMDNodeRef &N : AllRetainTypes)
+ if (RetainSet.insert(N).second)
+ RetainValues.push_back(N);
if (!RetainValues.empty())
CUNode->replaceRetainedTypes(MDTuple::get(VMContext, RetainValues));
diff --git a/llvm/lib/IR/LegacyPassManager.cpp b/llvm/lib/IR/LegacyPassManager.cpp
index d361bd9a98391..01aaedcf7d547 100644
--- a/llvm/lib/IR/LegacyPassManager.cpp
+++ b/llvm/lib/IR/LegacyPassManager.cpp
@@ -823,9 +823,8 @@ void PMTopLevelManager::dumpPasses() const {
return;
// Print out the immutable passes
- for (unsigned i = 0, e = ImmutablePasses.size(); i != e; ++i) {
- ImmutablePasses[i]->dumpPassStructure(0);
- }
+ for (ImmutablePass *Pass : ImmutablePasses)
+ Pass->dumpPassStructure(0);
// Every class that derives from PMDataManager also derives from Pass
// (sometimes indirectly), but there's no inheritance relationship
>From 92f4001906a18fca29929a333e61fdd662a9b0bd Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Wed, 3 Jul 2024 12:53:19 -0700
Subject: [PATCH 175/246] [Transforms] Use range-based for loops (NFC) (#97576)
---
llvm/lib/Transforms/InstCombine/InstructionCombining.cpp | 4 ++--
llvm/lib/Transforms/Utils/SimplifyCFG.cpp | 3 +--
2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index abcd94bea93e3..0d8e7e92c5c8e 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -4399,8 +4399,8 @@ Instruction *InstCombinerImpl::visitLandingPadInst(LandingPadInst &LI) {
if (MakeNewInstruction) {
LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
NewClauses.size());
- for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
- NLI->addClause(NewClauses[i]);
+ for (Constant *C : NewClauses)
+ NLI->addClause(C);
// A landing pad with no clauses must have the cleanup flag set. It is
// theoretically possible, though highly unlikely, that we eliminated all
// clauses. If so, force the cleanup flag to true.
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 69daa6352f95e..3fa3c0f1f52b0 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -5339,8 +5339,7 @@ bool SimplifyCFGOpt::simplifyUnreachable(UnreachableInst *UI) {
std::vector<DominatorTree::UpdateType> Updates;
SmallSetVector<BasicBlock *, 8> Preds(pred_begin(BB), pred_end(BB));
- for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
- auto *Predecessor = Preds[i];
+ for (BasicBlock *Predecessor : Preds) {
Instruction *TI = Predecessor->getTerminator();
IRBuilder<> Builder(TI);
if (auto *BI = dyn_cast<BranchInst>(TI)) {
>From fd524d4df797d3c25a1f50b03b1d8ffcbbdd87af Mon Sep 17 00:00:00 2001
From: shawbyoung <shawbyoung at gmail.com>
Date: Wed, 3 Jul 2024 12:21:03 -0700
Subject: [PATCH 176/246] [BOLT] Add Demangle to Profile link components
Added Demangle to Profile link components to fix shared build.
---
bolt/lib/Profile/CMakeLists.txt | 1 +
1 file changed, 1 insertion(+)
diff --git a/bolt/lib/Profile/CMakeLists.txt b/bolt/lib/Profile/CMakeLists.txt
index ca8b9c34e63b1..39708baac4036 100644
--- a/bolt/lib/Profile/CMakeLists.txt
+++ b/bolt/lib/Profile/CMakeLists.txt
@@ -10,6 +10,7 @@ add_llvm_library(LLVMBOLTProfile
DISABLE_LLVM_LINK_LLVM_DYLIB
LINK_COMPONENTS
+ Demangle
Support
TransformUtils
)
>From 873c3f7e7813223906d3ebf5acb4359a8b5726bc Mon Sep 17 00:00:00 2001
From: Alexey Bataev <a.bataev at outlook.com>
Date: Wed, 3 Jul 2024 13:04:56 -0700
Subject: [PATCH 177/246] Revert "[SLP]Remove operands upon marking instruction
for deletion."
This reverts commit bbd52dd44ceee80e3b6ba6a9b2bd8ee9a9713833 to fix
a crash revealed in https://lab.llvm.org/buildbot/#/builders/4/builds/505
---
.../Transforms/Vectorize/SLPVectorizer.cpp | 60 +++++--------------
.../SLPVectorizer/X86/arith-add-ssat.ll | 4 +-
.../SLPVectorizer/X86/arith-add-usat.ll | 2 +-
.../Transforms/SLPVectorizer/X86/arith-add.ll | 4 +-
.../Transforms/SLPVectorizer/X86/arith-fix.ll | 8 +--
.../SLPVectorizer/X86/arith-fshl-rot.ll | 2 +-
.../SLPVectorizer/X86/arith-fshl.ll | 20 +++----
.../SLPVectorizer/X86/arith-fshr-rot.ll | 2 +-
.../SLPVectorizer/X86/arith-fshr.ll | 20 +++----
.../Transforms/SLPVectorizer/X86/arith-mul.ll | 6 +-
.../SLPVectorizer/X86/arith-smax.ll | 2 +-
.../SLPVectorizer/X86/arith-smin.ll | 2 +-
.../SLPVectorizer/X86/arith-sub-ssat.ll | 4 +-
.../SLPVectorizer/X86/arith-sub-usat.ll | 2 +-
.../Transforms/SLPVectorizer/X86/arith-sub.ll | 4 +-
.../SLPVectorizer/X86/arith-umax.ll | 2 +-
.../SLPVectorizer/X86/arith-umin.ll | 2 +-
.../SLPVectorizer/X86/horizontal-list.ll | 11 +++-
.../SLPVectorizer/X86/shift-ashr.ll | 2 +-
.../SLPVectorizer/X86/shift-lshr.ll | 2 +-
.../Transforms/SLPVectorizer/X86/shift-shl.ll | 2 +-
21 files changed, 70 insertions(+), 93 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index bba80c3f675d7..11f9ad70dc725 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -1163,12 +1163,6 @@ class BoUpSLP {
return VectorizableTree.front()->Scalars;
}
- /// Checks if the root graph node can be emitted with narrower bitwidth at
- /// codegen and returns it signedness, if so.
- bool isSignedMinBitwidthRootNode() const {
- return MinBWs.at(VectorizableTree.front().get()).second;
- }
-
/// Builds external uses of the vectorized scalars, i.e. the list of
/// vectorized scalars to be extracted, their lanes and their scalar users. \p
/// ExternallyUsedValues contains additional list of external uses to handle
@@ -2436,21 +2430,6 @@ class BoUpSLP {
DeletedInstructions.insert(I);
}
- /// Clear the operands of \p I, marking for deletion trivially dead operands.
- void clearOperands(Instruction *I, const TreeEntry *Entry = nullptr) {
- for (unsigned Idx : seq<unsigned>(I->getNumOperands())) {
- // Ignore pointer operand of stores to keep correct DIAssignID.
- if (isa<StoreInst>(I) && Idx == 1)
- continue;
- Value *Op = I->getOperand(Idx);
- I->setOperand(Idx, PoisonValue::get(Op->getType()));
- if (auto *OpI = dyn_cast<Instruction>(Op))
- if (!isDeleted(OpI) && isInstructionTriviallyDead(OpI, TLI) &&
- (!Entry || Entry->VectorizedValue != OpI))
- eraseInstruction(OpI);
- }
- }
-
/// Checks if the instruction was already analyzed for being possible
/// reduction root.
bool isAnalyzedReductionRoot(Instruction *I) const {
@@ -3816,7 +3795,7 @@ class BoUpSLP {
/// Performs the "real" scheduling. Done before vectorization is actually
/// performed in a basic block.
- void scheduleBlock(BlockScheduling *BS, BoUpSLP &R);
+ void scheduleBlock(BlockScheduling *BS);
/// List of users to ignore during scheduling and that don't need extracting.
const SmallDenseSet<Value *> *UserIgnoreList = nullptr;
@@ -13545,7 +13524,7 @@ Value *BoUpSLP::vectorizeTree(
Instruction *ReductionRoot) {
// All blocks must be scheduled before any instructions are inserted.
for (auto &BSIter : BlocksSchedules) {
- scheduleBlock(BSIter.second.get(), *this);
+ scheduleBlock(BSIter.second.get());
}
// Clean Entry-to-LastInstruction table. It can be affected after scheduling,
// need to rebuild it.
@@ -14085,14 +14064,11 @@ Value *BoUpSLP::vectorizeTree(
}
#endif
LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
- auto *I = cast<Instruction>(Scalar);
- // Clear the operands, marking for deletion trivially dead operands.
- clearOperands(I, Entry);
- eraseInstruction(I);
+ eraseInstruction(cast<Instruction>(Scalar));
// Retain to-be-deleted instructions for some debug-info
// bookkeeping. NOTE: eraseInstruction only marks the instruction for
// deletion - instructions are not deleted until later.
- RemovedInsts.push_back(I);
+ RemovedInsts.push_back(cast<Instruction>(Scalar));
}
}
@@ -14705,8 +14681,6 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
for (; DepDest; DepDest = DepDest->NextLoadStore) {
assert(isInSchedulingRegion(DepDest));
- if (SLP->isDeleted(DepDest->Inst))
- continue;
// We have two limits to reduce the complexity:
// 1) AliasedCheckLimit: It's a small limit to reduce calls to
@@ -14776,7 +14750,7 @@ void BoUpSLP::BlockScheduling::resetSchedule() {
ReadyInsts.clear();
}
-void BoUpSLP::scheduleBlock(BlockScheduling *BS, BoUpSLP &R) {
+void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
if (!BS->ScheduleStart)
return;
@@ -14833,8 +14807,6 @@ void BoUpSLP::scheduleBlock(BlockScheduling *BS, BoUpSLP &R) {
for (ScheduleData *BundleMember = Picked; BundleMember;
BundleMember = BundleMember->NextInBundle) {
Instruction *PickedInst = BundleMember->Inst;
- if (R.isDeleted(PickedInst))
- continue;
if (PickedInst->getNextNonDebugInstruction() != LastScheduledInst)
PickedInst->moveAfter(LastScheduledInst->getPrevNode());
LastScheduledInst = PickedInst;
@@ -17372,11 +17344,14 @@ class HorizontalReduction {
Value *ReducedSubTree =
emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI);
if (ReducedSubTree->getType() != VL.front()->getType()) {
- assert(ReducedSubTree->getType() != VL.front()->getType() &&
- "Expected different reduction type.");
- ReducedSubTree =
- Builder.CreateIntCast(ReducedSubTree, VL.front()->getType(),
- V.isSignedMinBitwidthRootNode());
+ ReducedSubTree = Builder.CreateIntCast(
+ ReducedSubTree, VL.front()->getType(), any_of(VL, [&](Value *R) {
+ KnownBits Known = computeKnownBits(
+ R, cast<Instruction>(ReductionOps.front().front())
+ ->getModule()
+ ->getDataLayout());
+ return !Known.isNonNegative();
+ }));
}
// Improved analysis for add/fadd/xor reductions with same scale factor
@@ -17538,13 +17513,10 @@ class HorizontalReduction {
}
#endif
if (!Ignore->use_empty()) {
- Value *P = PoisonValue::get(Ignore->getType());
- Ignore->replaceAllUsesWith(P);
+ Value *Undef = UndefValue::get(Ignore->getType());
+ Ignore->replaceAllUsesWith(Undef);
}
- auto *I = cast<Instruction>(Ignore);
- // Clear the operands, marking for deletion trivially dead operands.
- V.clearOperands(I);
- V.eraseInstruction(I);
+ V.eraseInstruction(cast<Instruction>(Ignore));
}
}
} else if (!CheckForReusedReductionOps) {
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-add-ssat.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-add-ssat.ll
index 8c4903dbc92bb..24c5fcb068086 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-add-ssat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-add-ssat.ll
@@ -503,10 +503,10 @@ define void @add_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -522,10 +522,10 @@ define void @add_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-add-usat.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-add-usat.ll
index cb8d45b1a21a2..fab022d691c07 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-add-usat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-add-usat.ll
@@ -401,10 +401,10 @@ define void @add_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-add.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-add.ll
index a7ae2d9e02ff4..dafed43e6e71c 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-add.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-add.ll
@@ -439,10 +439,10 @@ define void @add_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = add <16 x i8> [[TMP7]], [[TMP8]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = add <16 x i8> [[TMP10]], [[TMP11]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -458,10 +458,10 @@ define void @add_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = add <16 x i8> [[TMP7]], [[TMP8]]
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = add <16 x i8> [[TMP10]], [[TMP11]]
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fix.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fix.ll
index d4eafdeb50a47..e4c76daddb02e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fix.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fix.ll
@@ -520,10 +520,10 @@ define void @smul_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.smul.fix.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]], i32 3)
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.smul.fix.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], i32 3)
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -539,10 +539,10 @@ define void @smul_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.smul.fix.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]], i32 3)
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.smul.fix.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], i32 3)
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
@@ -1323,10 +1323,10 @@ define void @umul_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.umul.fix.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]], i32 3)
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.umul.fix.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], i32 3)
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -1342,10 +1342,10 @@ define void @umul_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.umul.fix.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]], i32 3)
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.umul.fix.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], i32 3)
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
index 16977c025e3ea..9b8480cd0088a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
@@ -480,10 +480,10 @@ define void @fshl_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
index 609a9024e5bf7..daf28b9a0bb4d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
@@ -575,21 +575,21 @@ define void @fshl_v64i8() {
; SSE-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @b8, align 1
; SSE-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @c8, align 1
; SSE-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]])
-; SSE-NEXT: store <16 x i8> [[TMP4]], ptr @d8, align 1
; SSE-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP8:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP5]], <16 x i8> [[TMP6]], <16 x i8> [[TMP7]])
-; SSE-NEXT: store <16 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
-; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
-; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
-; SSE-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP14:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP15:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP16:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP13]], <16 x i8> [[TMP14]], <16 x i8> [[TMP15]])
+; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP12:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
+; SSE-NEXT: [[TMP14:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP15:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i8> [[TMP13]])
+; SSE-NEXT: [[TMP16:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP14]])
+; SSE-NEXT: store <16 x i8> [[TMP4]], ptr @d8, align 1
+; SSE-NEXT: store <16 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
+; SSE-NEXT: store <16 x i8> [[TMP15]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP16]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -598,11 +598,11 @@ define void @fshl_v64i8() {
; AVX-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @b8, align 1
; AVX-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr @c8, align 1
; AVX-NEXT: [[TMP4:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[TMP1]], <32 x i8> [[TMP2]], <32 x i8> [[TMP3]])
-; AVX-NEXT: store <32 x i8> [[TMP4]], ptr @d8, align 1
; AVX-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP7:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP8:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[TMP5]], <32 x i8> [[TMP6]], <32 x i8> [[TMP7]])
+; AVX-NEXT: store <32 x i8> [[TMP4]], ptr @d8, align 1
; AVX-NEXT: store <32 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; AVX-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
index 090a9daa6a113..f3e73d0e6840e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
@@ -480,10 +480,10 @@ define void @fshr_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
index 3dc7d164f5bc9..fb7532768c4b3 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
@@ -575,21 +575,21 @@ define void @fshr_v64i8() {
; SSE-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @b8, align 1
; SSE-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @c8, align 1
; SSE-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]])
-; SSE-NEXT: store <16 x i8> [[TMP4]], ptr @d8, align 1
; SSE-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP8:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP5]], <16 x i8> [[TMP6]], <16 x i8> [[TMP7]])
-; SSE-NEXT: store <16 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
; SSE-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
-; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
-; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
-; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
-; SSE-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP14:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP15:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
-; SSE-NEXT: [[TMP16:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP13]], <16 x i8> [[TMP14]], <16 x i8> [[TMP15]])
+; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP12:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
+; SSE-NEXT: [[TMP14:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
+; SSE-NEXT: [[TMP15:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i8> [[TMP13]])
+; SSE-NEXT: [[TMP16:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP14]])
+; SSE-NEXT: store <16 x i8> [[TMP4]], ptr @d8, align 1
+; SSE-NEXT: store <16 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
+; SSE-NEXT: store <16 x i8> [[TMP15]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP16]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -598,11 +598,11 @@ define void @fshr_v64i8() {
; AVX-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @b8, align 1
; AVX-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr @c8, align 1
; AVX-NEXT: [[TMP4:%.*]] = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> [[TMP1]], <32 x i8> [[TMP2]], <32 x i8> [[TMP3]])
-; AVX-NEXT: store <32 x i8> [[TMP4]], ptr @d8, align 1
; AVX-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP7:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; AVX-NEXT: [[TMP8:%.*]] = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> [[TMP5]], <32 x i8> [[TMP6]], <32 x i8> [[TMP7]])
+; AVX-NEXT: store <32 x i8> [[TMP4]], ptr @d8, align 1
; AVX-NEXT: store <32 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
; AVX-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-mul.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-mul.ll
index 51cf32242bfdf..94976a8cdee25 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-mul.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-mul.ll
@@ -528,10 +528,10 @@ define void @mul_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = mul <16 x i8> [[TMP7]], [[TMP8]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = mul <16 x i8> [[TMP10]], [[TMP11]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -547,10 +547,10 @@ define void @mul_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = mul <16 x i8> [[TMP7]], [[TMP8]]
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = mul <16 x i8> [[TMP10]], [[TMP11]]
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
@@ -566,10 +566,10 @@ define void @mul_v64i8() {
; AVX128-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; AVX128-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; AVX128-NEXT: [[TMP9:%.*]] = mul <16 x i8> [[TMP7]], [[TMP8]]
-; AVX128-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; AVX128-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; AVX128-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; AVX128-NEXT: [[TMP12:%.*]] = mul <16 x i8> [[TMP10]], [[TMP11]]
+; AVX128-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; AVX128-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; AVX128-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-smax.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-smax.ll
index dd76992c2570b..c63b672f4187c 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-smax.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-smax.ll
@@ -385,10 +385,10 @@ define void @smax_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.smax.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.smax.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-smin.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-smin.ll
index 678477fa1e397..826f97f2a2d89 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-smin.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-smin.ll
@@ -385,10 +385,10 @@ define void @smin_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.smin.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.smin.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-ssat.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-ssat.ll
index 65e2a011cc9a1..afaab8b8ca642 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-ssat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-ssat.ll
@@ -503,10 +503,10 @@ define void @sub_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -522,10 +522,10 @@ define void @sub_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-usat.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-usat.ll
index 18df499c6646e..3510863c88930 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-usat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub-usat.ll
@@ -401,10 +401,10 @@ define void @sub_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub.ll
index 9d34edbb506c0..be54c1e04ca39 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-sub.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-sub.ll
@@ -439,10 +439,10 @@ define void @sub_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = sub <16 x i8> [[TMP7]], [[TMP8]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = sub <16 x i8> [[TMP10]], [[TMP11]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
@@ -458,10 +458,10 @@ define void @sub_v64i8() {
; SLM-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP9:%.*]] = sub <16 x i8> [[TMP7]], [[TMP8]]
-; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SLM-NEXT: [[TMP12:%.*]] = sub <16 x i8> [[TMP10]], [[TMP11]]
+; SLM-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SLM-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SLM-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-umax.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-umax.ll
index a3f2b97a08a6e..3a187930055f0 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-umax.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-umax.ll
@@ -385,10 +385,10 @@ define void @umax_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.umax.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.umax.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-umin.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-umin.ll
index 0c7688345ac48..15119a9628067 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-umin.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-umin.ll
@@ -385,10 +385,10 @@ define void @umin_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.umin.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = call <16 x i8> @llvm.umin.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
index f036801865048..e3dc67558af02 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
@@ -810,9 +810,14 @@ define float @extra_args_same_several_times(ptr nocapture readonly %x, i32 %a, i
; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
; THRESHOLD-NEXT: [[TMP0:%.*]] = load <8 x float>, ptr [[X:%.*]], align 4
; THRESHOLD-NEXT: [[TMP1:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[TMP0]])
-; THRESHOLD-NEXT: [[OP_RDX:%.*]] = fadd fast float [[TMP1]], 1.300000e+01
-; THRESHOLD-NEXT: [[TMP2:%.*]] = fmul fast float [[CONV]], 2.000000e+00
-; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], [[TMP2]]
+; THRESHOLD-NEXT: [[TMP2:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i32 0
+; THRESHOLD-NEXT: [[TMP3:%.*]] = insertelement <2 x float> [[TMP2]], float [[CONV]], i32 1
+; THRESHOLD-NEXT: [[TMP4:%.*]] = fadd fast <2 x float> [[TMP3]], <float 1.300000e+01, float 2.000000e+00>
+; THRESHOLD-NEXT: [[TMP5:%.*]] = fmul fast <2 x float> [[TMP3]], <float 1.300000e+01, float 2.000000e+00>
+; THRESHOLD-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> [[TMP5]], <2 x i32> <i32 0, i32 3>
+; THRESHOLD-NEXT: [[TMP7:%.*]] = extractelement <2 x float> [[TMP6]], i32 0
+; THRESHOLD-NEXT: [[TMP8:%.*]] = extractelement <2 x float> [[TMP6]], i32 1
+; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[TMP7]], [[TMP8]]
; THRESHOLD-NEXT: ret float [[OP_RDX1]]
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/shift-ashr.ll b/llvm/test/Transforms/SLPVectorizer/X86/shift-ashr.ll
index 88aafb2bf148b..51798deae694a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/shift-ashr.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/shift-ashr.ll
@@ -464,10 +464,10 @@ define void @ashr_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = ashr <16 x i8> [[TMP7]], [[TMP8]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = ashr <16 x i8> [[TMP10]], [[TMP11]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/shift-lshr.ll b/llvm/test/Transforms/SLPVectorizer/X86/shift-lshr.ll
index 96977cd4fb7d7..7583561bbecf9 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/shift-lshr.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/shift-lshr.ll
@@ -413,10 +413,10 @@ define void @lshr_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = lshr <16 x i8> [[TMP7]], [[TMP8]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = lshr <16 x i8> [[TMP10]], [[TMP11]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/shift-shl.ll b/llvm/test/Transforms/SLPVectorizer/X86/shift-shl.ll
index 789316ab33c43..5ec327c131fb7 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/shift-shl.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/shift-shl.ll
@@ -461,10 +461,10 @@ define void @shl_v64i8() {
; SSE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP9:%.*]] = shl <16 x i8> [[TMP7]], [[TMP8]]
-; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
; SSE-NEXT: [[TMP12:%.*]] = shl <16 x i8> [[TMP10]], [[TMP11]]
+; SSE-NEXT: store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
; SSE-NEXT: store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
; SSE-NEXT: ret void
;
>From af784a5c13328aa4a8ce622260563b459856a8d4 Mon Sep 17 00:00:00 2001
From: Mingming Liu <mingmingl at google.com>
Date: Wed, 3 Jul 2024 13:15:17 -0700
Subject: [PATCH 178/246] [ThinLTO] Use a set rather than a map to track
exported ValueInfos. (#97360)
https://github.com/llvm/llvm-project/pull/95482 is a reland of
https://github.com/llvm/llvm-project/pull/88024.
https://github.com/llvm/llvm-project/pull/95482 keeps indexing memory
usage reasonable by using unordered_map and doesn't make other changes
to originally reviewed code.
While discussing possible ways to minimize indexing memory usage, Teresa
asked whether I need `ExportSetTy` as a map or a set is sufficient. This
PR implements the idea. It uses a set rather than a map to track exposed
ValueInfos.
Currently, `ExportLists` has two use cases, and neither needs to track a
ValueInfo's import/export status. So using a set is sufficient and
correct.
1) In both in-process and distributed ThinLTO, it's used to decide if a
function or global variable is visible [1] from another module after importing
creates additional cross-module references.
* If a cross-module call edge is seen today, the callee must be visible
to another module without keeping track of its export status already.
For instance, this [2] is how callees of direct calls get exported.
2) For in-process ThinLTO [3], it's used to compute lto cache key.
* The cache key computation already hashes [4] 'ImportList' , and 'ExportList' is
determined by 'ImportList'. So it's fine to not track 'import type' for export list.
[1] https://github.com/llvm/llvm-project/blob/66cd8ec4c08252ebc73c82e4883a8da247ed146b/llvm/lib/LTO/LTO.cpp#L1815-L1819
[2] https://github.com/llvm/llvm-project/blob/66cd8ec4c08252ebc73c82e4883a8da247ed146b/llvm/lib/LTO/LTO.cpp#L1783-L1794
[3] https://github.com/llvm/llvm-project/blob/66cd8ec4c08252ebc73c82e4883a8da247ed146b/llvm/lib/LTO/LTO.cpp#L1494-L1496
[4] https://github.com/llvm/llvm-project/blob/b76100e220591fab2bf0a4917b216439f7aa4b09/llvm/lib/LTO/LTO.cpp#L194-L222
---
.../llvm/Transforms/IPO/FunctionImport.h | 11 +-
llvm/lib/LTO/LTO.cpp | 14 +--
llvm/lib/Transforms/IPO/FunctionImport.cpp | 112 ++++++++----------
llvm/test/ThinLTO/X86/funcimport-stats.ll | 2 +-
.../Transforms/FunctionImport/funcimport.ll | 2 +-
5 files changed, 61 insertions(+), 80 deletions(-)
diff --git a/llvm/include/llvm/Transforms/IPO/FunctionImport.h b/llvm/include/llvm/Transforms/IPO/FunctionImport.h
index d8c142ec89d82..3b03ba82b9272 100644
--- a/llvm/include/llvm/Transforms/IPO/FunctionImport.h
+++ b/llvm/include/llvm/Transforms/IPO/FunctionImport.h
@@ -104,13 +104,10 @@ class FunctionImporter {
/// index's module path string table).
using ImportMapTy = DenseMap<StringRef, FunctionsToImportTy>;
- /// The map contains an entry for every global value the module exports.
- /// The key is ValueInfo, and the value indicates whether the definition
- /// or declaration is visible to another module. If a function's definition is
- /// visible to other modules, the global values this function referenced are
- /// visible and shouldn't be internalized.
- /// TODO: Rename to `ExportMapTy`.
- using ExportSetTy = DenseMap<ValueInfo, GlobalValueSummary::ImportKind>;
+ /// The set contains an entry for every global value that the module exports.
+ /// Depending on the user context, this container is allowed to contain
+ /// definitions, declarations or a mix of both.
+ using ExportSetTy = DenseSet<ValueInfo>;
/// A function of this type is used to load modules referenced by the index.
using ModuleLoaderTy =
diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp
index 6bbec535d8e98..5382b1158cb04 100644
--- a/llvm/lib/LTO/LTO.cpp
+++ b/llvm/lib/LTO/LTO.cpp
@@ -161,19 +161,17 @@ void llvm::computeLTOCacheKey(
auto ModHash = Index.getModuleHash(ModuleID);
Hasher.update(ArrayRef<uint8_t>((uint8_t *)&ModHash[0], sizeof(ModHash)));
- std::vector<std::pair<uint64_t, uint8_t>> ExportsGUID;
+ // TODO: `ExportList` is determined by `ImportList`. Since `ImportList` is
+ // used to compute cache key, we could omit hashing `ExportList` here.
+ std::vector<uint64_t> ExportsGUID;
ExportsGUID.reserve(ExportList.size());
- for (const auto &[VI, ExportType] : ExportList)
- ExportsGUID.push_back(
- std::make_pair(VI.getGUID(), static_cast<uint8_t>(ExportType)));
+ for (const auto &VI : ExportList)
+ ExportsGUID.push_back(VI.getGUID());
// Sort the export list elements GUIDs.
llvm::sort(ExportsGUID);
- for (auto [GUID, ExportType] : ExportsGUID) {
- // The export list can impact the internalization, be conservative here
+ for (auto GUID : ExportsGUID)
Hasher.update(ArrayRef<uint8_t>((uint8_t *)&GUID, sizeof(GUID)));
- AddUint8(ExportType);
- }
// Include the hash for every module we import functions from. The set of
// imported symbols for each module may affect code generation and is
diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp
index ec5294b9512cf..2b0ee46f489ca 100644
--- a/llvm/lib/Transforms/IPO/FunctionImport.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp
@@ -400,8 +400,7 @@ class GlobalsImporter final {
// later, in ComputeCrossModuleImport, after import decisions are
// complete, which is more efficient than adding them here.
if (ExportLists)
- (*ExportLists)[RefSummary->modulePath()][VI] =
- GlobalValueSummary::Definition;
+ (*ExportLists)[RefSummary->modulePath()].insert(VI);
// If variable is not writeonly we attempt to recursively analyze
// its references in order to import referenced constants.
@@ -582,7 +581,7 @@ class WorkloadImportsManager : public ModuleImportsManager {
GlobalValueSummary::Definition;
GVI.onImportingSummary(*GVS);
if (ExportLists)
- (*ExportLists)[ExportingModule][VI] = GlobalValueSummary::Definition;
+ (*ExportLists)[ExportingModule].insert(VI);
}
LLVM_DEBUG(dbgs() << "[Workload] Done\n");
}
@@ -818,10 +817,8 @@ static void computeImportForFunction(
// Since definition takes precedence over declaration for the same VI,
// try emplace <VI, declaration> pair without checking insert result.
// If insert doesn't happen, there must be an existing entry keyed by
- // VI.
- if (ExportLists)
- (*ExportLists)[DeclSourceModule].try_emplace(
- VI, GlobalValueSummary::Declaration);
+ // VI. Note `ExportLists` only keeps track of exports due to imported
+ // definitions.
ImportList[DeclSourceModule].try_emplace(
VI.getGUID(), GlobalValueSummary::Declaration);
}
@@ -892,7 +889,7 @@ static void computeImportForFunction(
// later, in ComputeCrossModuleImport, after import decisions are
// complete, which is more efficient than adding them here.
if (ExportLists)
- (*ExportLists)[ExportModulePath][VI] = GlobalValueSummary::Definition;
+ (*ExportLists)[ExportModulePath].insert(VI);
}
auto GetAdjustedThreshold = [](unsigned Threshold, bool IsHotCallsite) {
@@ -998,19 +995,29 @@ static bool isGlobalVarSummary(const ModuleSummaryIndex &Index,
return false;
}
-template <class T>
-static unsigned numGlobalVarSummaries(const ModuleSummaryIndex &Index, T &Cont,
- unsigned &DefinedGVS,
- unsigned &DefinedFS) {
+// Return the number of global variable summaries in ExportSet.
+static unsigned
+numGlobalVarSummaries(const ModuleSummaryIndex &Index,
+ FunctionImporter::ExportSetTy &ExportSet) {
+ unsigned NumGVS = 0;
+ for (auto &VI : ExportSet)
+ if (isGlobalVarSummary(Index, VI.getGUID()))
+ ++NumGVS;
+ return NumGVS;
+}
+
+// Given ImportMap, return the number of global variable summaries and record
+// the number of defined function summaries as output parameter.
+static unsigned
+numGlobalVarSummaries(const ModuleSummaryIndex &Index,
+ FunctionImporter::FunctionsToImportTy &ImportMap,
+ unsigned &DefinedFS) {
unsigned NumGVS = 0;
- DefinedGVS = 0;
DefinedFS = 0;
- for (auto &[GUID, Type] : Cont) {
- if (isGlobalVarSummary(Index, GUID)) {
- if (Type == GlobalValueSummary::Definition)
- ++DefinedGVS;
+ for (auto &[GUID, Type] : ImportMap) {
+ if (isGlobalVarSummary(Index, GUID))
++NumGVS;
- } else if (Type == GlobalValueSummary::Definition)
+ else if (Type == GlobalValueSummary::Definition)
++DefinedFS;
}
return NumGVS;
@@ -1046,7 +1053,7 @@ static bool checkVariableImport(
};
for (auto &ExportPerModule : ExportLists)
- for (auto &[VI, Unused] : ExportPerModule.second)
+ for (auto &VI : ExportPerModule.second)
if (!FlattenedImports.count(VI.getGUID()) &&
IsReadOrWriteOnlyVarNeedingImporting(ExportPerModule.first, VI))
return false;
@@ -1079,14 +1086,12 @@ void llvm::ComputeCrossModuleImport(
// since we may import the same values multiple times into different modules
// during the import computation.
for (auto &ELI : ExportLists) {
+ // `NewExports` tracks the VI that gets exported because the full definition
+ // of its user/referencer gets exported.
FunctionImporter::ExportSetTy NewExports;
const auto &DefinedGVSummaries =
ModuleToDefinedGVSummaries.lookup(ELI.first);
- for (auto &[EI, Type] : ELI.second) {
- // If a variable is exported as a declaration, its 'refs' and 'calls' are
- // not further exported.
- if (Type == GlobalValueSummary::Declaration)
- continue;
+ for (auto &EI : ELI.second) {
// Find the copy defined in the exporting module so that we can mark the
// values it references in that specific definition as exported.
// Below we will add all references and called values, without regard to
@@ -1105,23 +1110,14 @@ void llvm::ComputeCrossModuleImport(
// we convert such variables initializers to "zeroinitializer".
// See processGlobalForThinLTO.
if (!Index.isWriteOnly(GVS))
- for (const auto &VI : GVS->refs()) {
- // Try to emplace the declaration entry. If a definition entry
- // already exists for key `VI`, this is a no-op.
- NewExports.try_emplace(VI, GlobalValueSummary::Declaration);
- }
+ for (const auto &VI : GVS->refs())
+ NewExports.insert(VI);
} else {
auto *FS = cast<FunctionSummary>(S);
- for (const auto &Edge : FS->calls()) {
- // Try to emplace the declaration entry. If a definition entry
- // already exists for key `VI`, this is a no-op.
- NewExports.try_emplace(Edge.first, GlobalValueSummary::Declaration);
- }
- for (const auto &Ref : FS->refs()) {
- // Try to emplace the declaration entry. If a definition entry
- // already exists for key `VI`, this is a no-op.
- NewExports.try_emplace(Ref, GlobalValueSummary::Declaration);
- }
+ for (const auto &Edge : FS->calls())
+ NewExports.insert(Edge.first);
+ for (const auto &Ref : FS->refs())
+ NewExports.insert(Ref);
}
}
// Prune list computed above to only include values defined in the
@@ -1129,7 +1125,7 @@ void llvm::ComputeCrossModuleImport(
// the same ref/call target multiple times in above loop, and it is more
// efficient to avoid a set lookup each time.
for (auto EI = NewExports.begin(); EI != NewExports.end();) {
- if (!DefinedGVSummaries.count(EI->first.getGUID()))
+ if (!DefinedGVSummaries.count(EI->getGUID()))
NewExports.erase(EI++);
else
++EI;
@@ -1144,29 +1140,22 @@ void llvm::ComputeCrossModuleImport(
for (auto &ModuleImports : ImportLists) {
auto ModName = ModuleImports.first;
auto &Exports = ExportLists[ModName];
- unsigned DefinedGVS = 0, DefinedFS = 0;
- unsigned NumGVS =
- numGlobalVarSummaries(Index, Exports, DefinedGVS, DefinedFS);
- LLVM_DEBUG(dbgs() << "* Module " << ModName << " exports " << DefinedFS
- << " function as definitions, "
- << Exports.size() - NumGVS - DefinedFS
- << " functions as declarations, " << DefinedGVS
- << " var definitions and " << NumGVS - DefinedGVS
- << " var declarations. Imports from "
- << ModuleImports.second.size() << " modules.\n");
+ unsigned NumGVS = numGlobalVarSummaries(Index, Exports);
+ LLVM_DEBUG(dbgs() << "* Module " << ModName << " exports "
+ << Exports.size() - NumGVS << " functions and " << NumGVS
+ << " vars. Imports from " << ModuleImports.second.size()
+ << " modules.\n");
for (auto &Src : ModuleImports.second) {
auto SrcModName = Src.first;
- unsigned DefinedGVS = 0, DefinedFS = 0;
+ unsigned DefinedFS = 0;
unsigned NumGVSPerMod =
- numGlobalVarSummaries(Index, Src.second, DefinedGVS, DefinedFS);
+ numGlobalVarSummaries(Index, Src.second, DefinedFS);
LLVM_DEBUG(dbgs() << " - " << DefinedFS << " function definitions and "
<< Src.second.size() - NumGVSPerMod - DefinedFS
<< " function declarations imported from " << SrcModName
<< "\n");
- LLVM_DEBUG(dbgs() << " - " << DefinedGVS << " global vars definition and "
- << NumGVSPerMod - DefinedGVS
- << " global vars declaration imported from "
- << SrcModName << "\n");
+ LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod
+ << " global vars imported from " << SrcModName << "\n");
}
}
#endif
@@ -1180,17 +1169,14 @@ static void dumpImportListForModule(const ModuleSummaryIndex &Index,
<< ImportList.size() << " modules.\n");
for (auto &Src : ImportList) {
auto SrcModName = Src.first;
- unsigned DefinedGVS = 0, DefinedFS = 0;
- unsigned NumGVSPerMod =
- numGlobalVarSummaries(Index, Src.second, DefinedGVS, DefinedFS);
+ unsigned DefinedFS = 0;
+ unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second, DefinedFS);
LLVM_DEBUG(dbgs() << " - " << DefinedFS << " function definitions and "
<< Src.second.size() - DefinedFS - NumGVSPerMod
<< " function declarations imported from " << SrcModName
<< "\n");
- LLVM_DEBUG(dbgs() << " - " << DefinedGVS << " var definitions and "
- << NumGVSPerMod - DefinedGVS
- << " var declarations imported from " << SrcModName
- << "\n");
+ LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod << " vars imported from "
+ << SrcModName << "\n");
}
}
#endif
diff --git a/llvm/test/ThinLTO/X86/funcimport-stats.ll b/llvm/test/ThinLTO/X86/funcimport-stats.ll
index 7fcd33855fe1a..1c2fd092ccb49 100644
--- a/llvm/test/ThinLTO/X86/funcimport-stats.ll
+++ b/llvm/test/ThinLTO/X86/funcimport-stats.ll
@@ -10,7 +10,7 @@
; RUN: cat %t4 | FileCheck %s
; CHECK: - [[NUM_FUNCS:[0-9]+]] function definitions and 0 function declarations imported from
-; CHECK-NEXT: - [[NUM_VARS:[0-9]+]] global vars definition and 0 global vars declaration imported from
+; CHECK-NEXT: - [[NUM_VARS:[0-9]+]] global vars imported from
; CHECK: [[NUM_FUNCS]] function-import - Number of functions imported in backend
; CHECK-NEXT: [[NUM_FUNCS]] function-import - Number of functions thin link decided to import
diff --git a/llvm/test/Transforms/FunctionImport/funcimport.ll b/llvm/test/Transforms/FunctionImport/funcimport.ll
index 635750b33fff0..8f7e8340d4909 100644
--- a/llvm/test/Transforms/FunctionImport/funcimport.ll
+++ b/llvm/test/Transforms/FunctionImport/funcimport.ll
@@ -167,7 +167,7 @@ declare void @variadic_va_start(...)
; DUMP: Module [[M1:.*]] imports from 1 module
; DUMP-NEXT: 15 function definitions and 0 function declarations imported from [[M2:.*]]
-; DUMP-NEXT: 4 var definitions and 0 var declarations imported from [[M2]]
+; DUMP-NEXT: 4 vars imported from [[M2]]
; DUMP: Imported 15 functions for Module [[M1]]
; DUMP-NEXT: Imported 4 global variables for Module [[M1]]
>From a3c5c83273358a85a4e02f5f76379b1a276e7714 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 3 Jul 2024 13:00:36 -0700
Subject: [PATCH 179/246] [DAGCombiner] Remove unneeded getValueType() calls in
visitMULHS/MULHU. NFC
We have an existing VT variable that should match N0.getValueType.
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 854e92369edd1..d81a54d2ecaaa 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5101,9 +5101,9 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
// fold (mulhs x, 1) -> (sra x, size(x)-1)
if (isOneConstant(N1))
- return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0,
+ return DAG.getNode(ISD::SRA, DL, VT, N0,
DAG.getConstant(N0.getScalarValueSizeInBits() - 1, DL,
- getShiftAmountTy(N0.getValueType())));
+ getShiftAmountTy(VT)));
// fold (mulhs x, undef) -> 0
if (N0.isUndef() || N1.isUndef())
@@ -5161,7 +5161,7 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
// fold (mulhu x, 1) -> 0
if (isOneConstant(N1))
- return DAG.getConstant(0, DL, N0.getValueType());
+ return DAG.getConstant(0, DL, VT);
// fold (mulhu x, undef) -> 0
if (N0.isUndef() || N1.isUndef())
>From b5864988b3230324f5426036f45aab43d20a5b94 Mon Sep 17 00:00:00 2001
From: Mats Petersson <mats.petersson at arm.com>
Date: Wed, 3 Jul 2024 21:38:27 +0100
Subject: [PATCH 180/246] [flang] Fix failing test (#97634)
Add requires line to not test when the target architecture isn't
supported.
Technically we could make it a bit less restrictive, but want green
builds.
---
flang/test/Lower/mcmodel.f90 | 2 ++
1 file changed, 2 insertions(+)
diff --git a/flang/test/Lower/mcmodel.f90 b/flang/test/Lower/mcmodel.f90
index dd9eb145f5e2a..971dc3b9056fe 100644
--- a/flang/test/Lower/mcmodel.f90
+++ b/flang/test/Lower/mcmodel.f90
@@ -1,3 +1,5 @@
+! REQUIRES: aarch64-registered-target && x86-registered-target
+
! RUN: %flang_fc1 -triple aarch64 -emit-llvm -mcmodel=tiny %s -o - | FileCheck %s -check-prefix=CHECK-TINY
! RUN: %flang_fc1 -emit-llvm -mcmodel=small %s -o - | FileCheck %s -check-prefix=CHECK-SMALL
! RUN: %flang_fc1 -triple x86_64-unknown-linux-gnu -emit-llvm -mcmodel=kernel %s -o - | FileCheck %s -check-prefix=CHECK-KERNEL
>From 04a1a3482ce3ee00b5bbec1ce852e58410e4b6ad Mon Sep 17 00:00:00 2001
From: Fangrui Song <i at maskray.me>
Date: Wed, 3 Jul 2024 13:45:48 -0700
Subject: [PATCH 181/246] [Driver] Add -Wa, options --crel and
--allow-experimental-crel
The two options are discussed in a few comments around
https://github.com/llvm/llvm-project/pull/91280#issuecomment-2099344079
* -Wa,--crel: error "-Wa,--allow-experimental-crel must be specified to use -Wa,--crel..."
* -Wa,--allow-experimental-crel: no-op
* -Wa,--crel,--allow-experimental-crel: enable CREL in the integrated assembler (#91280)
MIPS's little-endian n64 ABI messed up the `r_info` field in
relocations. While this could be fixed with CREL, my intention is to
avoid complication in assembler/linker. The implementation simply
doesn't allow CREL for MIPS.
Link: https://discourse.llvm.org/t/rfc-crel-a-compact-relocation-format-for-elf/77600
Pull Request: https://github.com/llvm/llvm-project/pull/97378
---
clang/include/clang/Basic/CodeGenOptions.def | 1 +
.../clang/Basic/DiagnosticDriverKinds.td | 4 +++
clang/include/clang/Driver/Options.td | 3 ++
clang/lib/CodeGen/BackendUtil.cpp | 1 +
clang/lib/Driver/ToolChains/Clang.cpp | 18 +++++++++++
clang/lib/Driver/ToolChains/CommonArgs.cpp | 21 +++++++++++++
clang/test/Driver/crel.c | 31 +++++++++++++++++++
clang/test/Misc/cc1as-crel.s | 6 ++++
clang/tools/driver/cc1as_main.cpp | 6 ++++
9 files changed, 91 insertions(+)
create mode 100644 clang/test/Driver/crel.c
create mode 100644 clang/test/Misc/cc1as-crel.s
diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def
index e3f6da4a84f69..25de2204f04c0 100644
--- a/clang/include/clang/Basic/CodeGenOptions.def
+++ b/clang/include/clang/Basic/CodeGenOptions.def
@@ -36,6 +36,7 @@ VALUE_CODEGENOPT(Name, Bits, Default)
#endif
CODEGENOPT(DisableIntegratedAS, 1, 0) ///< -no-integrated-as
+CODEGENOPT(Crel, 1, 0) ///< -Wa,--crel
CODEGENOPT(RelaxELFRelocations, 1, 1) ///< -Wa,-mrelax-relocations={yes,no}
CODEGENOPT(AsmVerbose , 1, 0) ///< -dA, -fverbose-asm.
CODEGENOPT(PreserveAsmComments, 1, 1) ///< -dA, -fno-preserve-as-comments.
diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td
index 1ca2cb85565a1..a62bdc21298ee 100644
--- a/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -801,6 +801,10 @@ def warn_drv_missing_multilib : Warning<
def note_drv_available_multilibs : Note<
"available multilibs are:%0">;
+def err_drv_experimental_crel : Error<
+ "-Wa,--allow-experimental-crel must be specified to use -Wa,--crel. "
+ "CREL is experimental and uses a non-standard section type code">;
+
def warn_android_unversioned_fallback : Warning<
"using unversioned Android target directory %0 for target %1; unversioned "
"directories will not be used in Clang 19 -- provide a versioned directory "
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index af5ed95510ceb..58ca6f2bea9e4 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -7027,6 +7027,9 @@ def massembler_no_warn : Flag<["-"], "massembler-no-warn">,
def massembler_fatal_warnings : Flag<["-"], "massembler-fatal-warnings">,
HelpText<"Make assembler warnings fatal">,
MarshallingInfoFlag<CodeGenOpts<"FatalWarnings">>;
+def crel : Flag<["--"], "crel">,
+ HelpText<"Enable CREL relocation format (ELF only)">,
+ MarshallingInfoFlag<CodeGenOpts<"Crel">>;
def mrelax_relocations_no : Flag<["-"], "mrelax-relocations=no">,
HelpText<"Disable x86 relax relocations">,
MarshallingInfoNegativeFlag<CodeGenOpts<"RelaxELFRelocations">>;
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index 4195bb87cf0dd..e765bbf637a66 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -470,6 +470,7 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose;
Options.MCOptions.Dwarf64 = CodeGenOpts.Dwarf64;
Options.MCOptions.PreserveAsmComments = CodeGenOpts.PreserveAsmComments;
+ Options.MCOptions.Crel = CodeGenOpts.Crel;
Options.MCOptions.X86RelaxRelocations = CodeGenOpts.RelaxELFRelocations;
Options.MCOptions.CompressDebugSections =
CodeGenOpts.getCompressDebugSections();
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index cf1767a1f644f..aa285c39f14b4 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -2500,6 +2500,8 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
// arg after parsing the '-I' arg.
bool TakeNextArg = false;
+ const llvm::Triple &Triple = C.getDefaultToolChain().getTriple();
+ bool Crel = false, ExperimentalCrel = false;
bool UseRelaxRelocations = C.getDefaultToolChain().useRelaxRelocations();
bool UseNoExecStack = false;
const char *MipsTargetFeature = nullptr;
@@ -2623,6 +2625,12 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
Value == "-nocompress-debug-sections" ||
Value == "--nocompress-debug-sections") {
CmdArgs.push_back(Value.data());
+ } else if (Value == "--crel") {
+ Crel = true;
+ } else if (Value == "--no-crel") {
+ Crel = false;
+ } else if (Value == "--allow-experimental-crel") {
+ ExperimentalCrel = true;
} else if (Value == "-mrelax-relocations=yes" ||
Value == "--mrelax-relocations=yes") {
UseRelaxRelocations = true;
@@ -2688,6 +2696,16 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
}
if (ImplicitIt.size())
AddARMImplicitITArgs(Args, CmdArgs, ImplicitIt);
+ if (Crel) {
+ if (!ExperimentalCrel)
+ D.Diag(diag::err_drv_experimental_crel);
+ if (Triple.isOSBinFormatELF() && !Triple.isMIPS()) {
+ CmdArgs.push_back("--crel");
+ } else {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << "-Wa,--crel" << D.getTargetTriple();
+ }
+ }
if (!UseRelaxRelocations)
CmdArgs.push_back("-mrelax-relocations=no");
if (UseNoExecStack)
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 2cb152f77e501..c56a0c2c46c47 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -1133,6 +1133,27 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
addMachineOutlinerArgs(D, Args, CmdArgs, ToolChain.getEffectiveTriple(),
/*IsLTO=*/true, PluginOptPrefix);
+
+ for (const Arg *A : Args.filtered(options::OPT_Wa_COMMA)) {
+ bool Crel = false;
+ for (StringRef V : A->getValues()) {
+ if (V == "--crel")
+ Crel = true;
+ else if (V == "--no-crel")
+ Crel = false;
+ else
+ continue;
+ A->claim();
+ }
+ if (Crel) {
+ if (Triple.isOSBinFormatELF() && !Triple.isMIPS()) {
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) + "-crel"));
+ } else {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << "-Wa,--crel" << D.getTargetTriple();
+ }
+ }
+ }
}
/// Adds the '-lcgpu' and '-lmgpu' libraries to the compilation to include the
diff --git a/clang/test/Driver/crel.c b/clang/test/Driver/crel.c
new file mode 100644
index 0000000000000..a47e7ebfeedd8
--- /dev/null
+++ b/clang/test/Driver/crel.c
@@ -0,0 +1,31 @@
+// RUN: not %clang -### -c --target=x86_64 -Wa,--crel %s 2>&1 | FileCheck %s --check-prefix=NOEXP
+
+// NOEXP: error: -Wa,--allow-experimental-crel must be specified to use -Wa,--crel. CREL is experimental and uses a non-standard section type code
+
+// RUN: %clang -### -c --target=x86_64 -Wa,--crel,--allow-experimental-crel %s -Werror 2>&1 | FileCheck %s
+// RUN: %clang -### -c --target=x86_64 -Wa,--crel,--no-crel,--allow-experimental-crel %s -Werror 2>&1 | FileCheck %s --check-prefix=NO
+// RUN: %clang -### -c --target=x86_64 -Wa,--allow-experimental-crel %s -Werror 2>&1 | FileCheck %s --check-prefix=NO
+// RUN: not %clang -### -c --target=arm64-apple-darwin -Wa,--crel,--allow-experimental-crel %s 2>&1 | FileCheck %s --check-prefix=ERR
+// RUN: not %clang -### -c --target=mips64 -Wa,--crel,--allow-experimental-crel %s 2>&1 | FileCheck %s --check-prefix=ERR
+
+// RUN: %clang -### -c --target=aarch64 -Werror -Wa,--crel,--allow-experimental-crel -x assembler %s -Werror 2>&1 | FileCheck %s --check-prefix=ASM
+// RUN: not %clang -### -c --target=mips64 -Wa,--crel,--allow-experimental-crel -x assembler %s 2>&1 | FileCheck %s --check-prefix=ERR
+
+// CHECK: "-cc1" {{.*}}"--crel"
+// NO: "-cc1"
+// NO-NOT: "--crel"
+// ASM: "-cc1as" {{.*}}"--crel"
+// ERR: error: unsupported option '-Wa,--crel' for target '{{.*}}'
+
+/// The --allow-experimental-crel error check is exempted for -fno-integrated-as.
+// RUN: %clang -### -c --target=aarch64 -fno-integrated-as -Wa,--crel %s -Werror 2>&1 | FileCheck %s --check-prefix=GAS
+
+// GAS: "--crel"
+
+/// The --allow-experimental-crel error check doesn't apply to LTO.
+// RUN: %clang -### --target=x86_64-linux -Werror -flto -Wa,--crel %s 2>&1 | FileCheck %s --check-prefix=LTO
+
+// LTO: "-plugin-opt=-crel"
+
+// RUN: touch %t.o
+// RUN: not %clang -### --target=mips64-linux-gnu -flto -Wa,--crel %t.o 2>&1 | FileCheck %s --check-prefix=ERR
diff --git a/clang/test/Misc/cc1as-crel.s b/clang/test/Misc/cc1as-crel.s
new file mode 100644
index 0000000000000..78e78b09bf4c3
--- /dev/null
+++ b/clang/test/Misc/cc1as-crel.s
@@ -0,0 +1,6 @@
+// REQUIRES: x86-registered-target
+// RUN: %clang -cc1as -triple x86_64 %s -filetype obj --crel -o %t.o
+// RUN: llvm-readelf -S %t.o | FileCheck %s
+
+// CHECK: .crel.text CREL
+call foo
diff --git a/clang/tools/driver/cc1as_main.cpp b/clang/tools/driver/cc1as_main.cpp
index ce1e181042609..4e0aa1450563e 100644
--- a/clang/tools/driver/cc1as_main.cpp
+++ b/clang/tools/driver/cc1as_main.cpp
@@ -164,6 +164,9 @@ struct AssemblerInvocation {
LLVM_PREFERRED_TYPE(bool)
unsigned EmitCompactUnwindNonCanonical : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned Crel : 1;
+
/// The name of the relocation model to use.
std::string RelocationModel;
@@ -204,6 +207,7 @@ struct AssemblerInvocation {
EmbedBitcode = 0;
EmitDwarfUnwind = EmitDwarfUnwindType::Default;
EmitCompactUnwindNonCanonical = false;
+ Crel = false;
}
static bool CreateFromArgs(AssemblerInvocation &Res,
@@ -373,6 +377,7 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
Opts.EmitCompactUnwindNonCanonical =
Args.hasArg(OPT_femit_compact_unwind_non_canonical);
+ Opts.Crel = Args.hasArg(OPT_crel);
Opts.AsSecureLogFile = Args.getLastArgValue(OPT_as_secure_log_file);
@@ -430,6 +435,7 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
MCOptions.EmitDwarfUnwind = Opts.EmitDwarfUnwind;
MCOptions.EmitCompactUnwindNonCanonical = Opts.EmitCompactUnwindNonCanonical;
MCOptions.MCSaveTempLabels = Opts.SaveTemporaryLabels;
+ MCOptions.Crel = Opts.Crel;
MCOptions.X86RelaxRelocations = Opts.RelaxELFRelocations;
MCOptions.CompressDebugSections = Opts.CompressDebugSections;
MCOptions.AsSecureLogFile = Opts.AsSecureLogFile;
>From 4c63672ca706c708de1e49bb29d026a705daa0d2 Mon Sep 17 00:00:00 2001
From: Hansang Bae <hansang.bae at intel.com>
Date: Wed, 3 Jul 2024 15:50:59 -0500
Subject: [PATCH 182/246] [OpenMP] Fix use of ompt_start_tool in ompd test.
(#97616)
---
openmp/libompd/test/ompt_plugin.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/openmp/libompd/test/ompt_plugin.h b/openmp/libompd/test/ompt_plugin.h
index 4488e6884a06c..74cb625ecb375 100644
--- a/openmp/libompd/test/ompt_plugin.h
+++ b/openmp/libompd/test/ompt_plugin.h
@@ -127,8 +127,8 @@ static void ompt_finalize(ompt_data_t *tool_data) {}
// "This func will be invoked by OpenMP implementation, refer spec: 4.2.1"
// NOLINTNEXTLINE
-static ompt_start_tool_result_t *ompt_start_tool(unsigned int omp_version,
- const char *runtime_version) {
+ompt_start_tool_result_t *ompt_start_tool(unsigned int omp_version,
+ const char *runtime_version) {
static ompt_start_tool_result_t ompt_start_tool_result = {
&ompt_initialize, &ompt_finalize, {0}};
return &ompt_start_tool_result;
>From 6aed0d5afb53434068f37f1b2909590e9dda3d8f Mon Sep 17 00:00:00 2001
From: aaryanshukla <53713108+aaryanshukla at users.noreply.github.com>
Date: Wed, 3 Jul 2024 14:29:51 -0700
Subject: [PATCH 183/246] [libc] created integration tests for newhdrgen
(#97361)
- created integration tests for libc hdrgen
- implemented sorting function names in yaml files through script
---
libc/CMakeLists.txt | 4 +
libc/newhdrgen/CMakeLists.txt | 17 +++++
.../classes/enumeration.py | 2 +-
.../class_implementation/classes/function.py | 6 +-
.../class_implementation/classes/object.py | 2 +-
libc/newhdrgen/header.py | 24 +++---
.../tests/expected_output/test_header.h | 42 +++++++++++
libc/newhdrgen/tests/input/test_small.h.def | 17 +++++
libc/newhdrgen/tests/input/test_small.yaml | 36 +++++++++
libc/newhdrgen/tests/output/test_small.h | 42 +++++++++++
libc/newhdrgen/tests/test_integration.py | 74 +++++++++++++++++++
libc/newhdrgen/yaml_to_classes.py | 7 +-
12 files changed, 254 insertions(+), 19 deletions(-)
create mode 100644 libc/newhdrgen/CMakeLists.txt
create mode 100644 libc/newhdrgen/tests/expected_output/test_header.h
create mode 100644 libc/newhdrgen/tests/input/test_small.h.def
create mode 100644 libc/newhdrgen/tests/input/test_small.yaml
create mode 100644 libc/newhdrgen/tests/output/test_small.h
create mode 100644 libc/newhdrgen/tests/test_integration.py
diff --git a/libc/CMakeLists.txt b/libc/CMakeLists.txt
index 013b17b03f570..6ba54475d0fd1 100644
--- a/libc/CMakeLists.txt
+++ b/libc/CMakeLists.txt
@@ -50,6 +50,10 @@ set(LIBC_NAMESPACE ${default_namespace}
CACHE STRING "The namespace to use to enclose internal implementations. Must start with '__llvm_libc'."
)
+
+add_subdirectory(newhdrgen)
+
+
if(LLVM_LIBC_FULL_BUILD OR LLVM_LIBC_GPU_BUILD)
if(NOT LIBC_HDRGEN_EXE)
# We need to set up hdrgen first since other targets depend on it.
diff --git a/libc/newhdrgen/CMakeLists.txt b/libc/newhdrgen/CMakeLists.txt
new file mode 100644
index 0000000000000..33750d181c4b3
--- /dev/null
+++ b/libc/newhdrgen/CMakeLists.txt
@@ -0,0 +1,17 @@
+if(LLVM_LIBC_FULL_BUILD)
+
+ enable_testing()
+
+ set(NEWHDGEN_TESTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/tests)
+
+ add_test(
+ NAME newhdrgen_integration_test
+ COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${CMAKE_CURRENT_SOURCE_DIR} python3 ${NEWHDGEN_TESTS_DIR}/test_integration.py
+ )
+
+ add_custom_target(check-newhdrgen
+ COMMAND ${CMAKE_CTEST_COMMAND} -R newhdrgen_integration_test
+ )
+
+ message(STATUS "Integration test for newhdrgen added.")
+endif()
diff --git a/libc/newhdrgen/class_implementation/classes/enumeration.py b/libc/newhdrgen/class_implementation/classes/enumeration.py
index be03dbf603c2b..a01fa74210101 100644
--- a/libc/newhdrgen/class_implementation/classes/enumeration.py
+++ b/libc/newhdrgen/class_implementation/classes/enumeration.py
@@ -10,7 +10,7 @@
class Enumeration:
- def __init__(self, name, value=None):
+ def __init__(self, name, value):
self.name = name
self.value = value
diff --git a/libc/newhdrgen/class_implementation/classes/function.py b/libc/newhdrgen/class_implementation/classes/function.py
index 3c464e48b6e3b..f79b53de9ade4 100644
--- a/libc/newhdrgen/class_implementation/classes/function.py
+++ b/libc/newhdrgen/class_implementation/classes/function.py
@@ -11,19 +11,19 @@
class Function:
def __init__(
- self, standards, return_type, name, arguments, guard=None, attributes=[]
+ self, return_type, name, arguments, standards, guard=None, attributes=[]
):
- self.standards = standards
self.return_type = return_type
self.name = name
self.arguments = [
arg if isinstance(arg, str) else arg["type"] for arg in arguments
]
+ self.standards = standards
self.guard = guard
self.attributes = attributes or []
def __str__(self):
- attributes_str = " ".join(self.attributes)
+ attributes_str = self.attributes
arguments_str = ", ".join(self.arguments)
result = f"{self.return_type} {self.name}({arguments_str}){attributes_str};"
if self.guard:
diff --git a/libc/newhdrgen/class_implementation/classes/object.py b/libc/newhdrgen/class_implementation/classes/object.py
index c65a82e1a660d..02f30cba1c48c 100644
--- a/libc/newhdrgen/class_implementation/classes/object.py
+++ b/libc/newhdrgen/class_implementation/classes/object.py
@@ -15,4 +15,4 @@ def __init__(self, name, type):
self.type = type
def __str__(self):
- return f"extern {self.type} {self.name}"
+ return f"extern {self.type} {self.name};"
diff --git a/libc/newhdrgen/header.py b/libc/newhdrgen/header.py
index 7ce356831677e..4eaf7dccca9e8 100644
--- a/libc/newhdrgen/header.py
+++ b/libc/newhdrgen/header.py
@@ -44,24 +44,24 @@ def __str__(self):
content.append(str(include))
for macro in self.macros:
- content.append(str(macro))
-
- for object in self.objects:
- content.append(str(object))
+ content.append(f"{macro}\n")
for type_ in self.types:
- content.append(str(type_))
+ content.append(f"{type_}")
if self.enumerations:
- content.append("enum {")
- for enum in self.enumerations:
- content.append(f"\t{str(enum)},")
- content.append("};")
+ combined_enum_content = ",\n ".join(
+ str(enum) for enum in self.enumerations
+ )
+ content.append(f"\nenum {{\n {combined_enum_content},\n}};")
+
+ content.append("\n__BEGIN_C_DECLS\n")
- # TODO: replace line below with common.h functionality
- content.append("__BEGIN_C_DECLS\n")
for function in self.functions:
content.append(str(function))
content.append("")
- content.append("__END_C_DECLS\n")
+ for object in self.objects:
+ content.append(str(object))
+ content.append("\n__END_C_DECLS")
+
return "\n".join(content)
diff --git a/libc/newhdrgen/tests/expected_output/test_header.h b/libc/newhdrgen/tests/expected_output/test_header.h
new file mode 100644
index 0000000000000..d6ae0d0e282e7
--- /dev/null
+++ b/libc/newhdrgen/tests/expected_output/test_header.h
@@ -0,0 +1,42 @@
+//===-- C standard library header test_small-------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SMALL_H
+#define LLVM_LIBC_TEST_SMALL_H
+
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/test_small-macros.h"
+
+#define MACRO_A 1
+
+#define MACRO_B 2
+
+#include <llvm-libc-types/type_a.h>
+#include <llvm-libc-types/type_b.h>
+
+enum {
+ enum_a = value_1,
+ enum_b = value_2,
+};
+
+__BEGIN_C_DECLS
+
+#ifdef FUNC_A_16
+void func_a()CONST_FUNC_A;
+#endif // FUNC_A_16
+
+#ifdef FUNC_B_16
+int func_b(int, float)CONST_FUNC_B;
+#endif // FUNC_B_16
+
+extern obj object_1;
+extern obj object_2;
+
+__END_C_DECLS
+
+#endif // LLVM_LIBC_TEST_SMALL_H
diff --git a/libc/newhdrgen/tests/input/test_small.h.def b/libc/newhdrgen/tests/input/test_small.h.def
new file mode 100644
index 0000000000000..de39a8b7e254d
--- /dev/null
+++ b/libc/newhdrgen/tests/input/test_small.h.def
@@ -0,0 +1,17 @@
+//===-- C standard library header test_small-------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SMALL_H
+#define LLVM_LIBC_TEST_SMALL_H
+
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/test_small-macros.h"
+
+%%public_api()
+
+#endif // LLVM_LIBC_TEST_SMALL_H
diff --git a/libc/newhdrgen/tests/input/test_small.yaml b/libc/newhdrgen/tests/input/test_small.yaml
new file mode 100644
index 0000000000000..0bc292db72396
--- /dev/null
+++ b/libc/newhdrgen/tests/input/test_small.yaml
@@ -0,0 +1,36 @@
+header: test_header.h
+macros:
+ - macro_name: MACRO_A
+ macro_value: 1
+ - macro_name: MACRO_B
+ macro_value: 2
+types:
+ - type_name: type_a
+ - type_name: type_b
+enums:
+ - name: enum_a
+ value: value_1
+ - name: enum_b
+ value: value_2
+objects:
+ - object_name: object_1
+ object_type: obj
+ - object_name: object_2
+ object_type: obj
+functions:
+ - name: func_a
+ return_type: void
+ arguments: []
+ standards:
+ - stdc
+ guard: FUNC_A_16
+ attributes: CONST_FUNC_A
+ - name: func_b
+ return_type: int
+ arguments:
+ - type: int
+ - type: float
+ standards:
+ - stdc
+ guard: FUNC_B_16
+ attributes: CONST_FUNC_B
diff --git a/libc/newhdrgen/tests/output/test_small.h b/libc/newhdrgen/tests/output/test_small.h
new file mode 100644
index 0000000000000..d6ae0d0e282e7
--- /dev/null
+++ b/libc/newhdrgen/tests/output/test_small.h
@@ -0,0 +1,42 @@
+//===-- C standard library header test_small-------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SMALL_H
+#define LLVM_LIBC_TEST_SMALL_H
+
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/test_small-macros.h"
+
+#define MACRO_A 1
+
+#define MACRO_B 2
+
+#include <llvm-libc-types/type_a.h>
+#include <llvm-libc-types/type_b.h>
+
+enum {
+ enum_a = value_1,
+ enum_b = value_2,
+};
+
+__BEGIN_C_DECLS
+
+#ifdef FUNC_A_16
+void func_a()CONST_FUNC_A;
+#endif // FUNC_A_16
+
+#ifdef FUNC_B_16
+int func_b(int, float)CONST_FUNC_B;
+#endif // FUNC_B_16
+
+extern obj object_1;
+extern obj object_2;
+
+__END_C_DECLS
+
+#endif // LLVM_LIBC_TEST_SMALL_H
diff --git a/libc/newhdrgen/tests/test_integration.py b/libc/newhdrgen/tests/test_integration.py
new file mode 100644
index 0000000000000..228fecc7f41c0
--- /dev/null
+++ b/libc/newhdrgen/tests/test_integration.py
@@ -0,0 +1,74 @@
+import subprocess
+import unittest
+from pathlib import Path
+import os
+import argparse
+
+
+class TestHeaderGenIntegration(unittest.TestCase):
+ def setUp(self):
+ parser = argparse.ArgumentParser(
+ description="TestHeaderGenIntegration arguments"
+ )
+ parser.add_argument(
+ "--output_dir", type=str, help="Output directory for generated headers"
+ )
+ args, _ = parser.parse_known_args()
+ output_dir_env = os.getenv("TEST_OUTPUT_DIR")
+
+ self.output_dir = Path(
+ args.output_dir
+ if args.output_dir
+ else output_dir_env if output_dir_env else "libc/newhdrgen/tests/output"
+ )
+
+ self.maxDiff = None
+ # Adjust based on your directory structure such as being in build etc.
+ self.source_dir = Path(__file__).resolve().parent.parent.parent.parent
+
+ def run_script(self, yaml_file, h_def_file, output_dir):
+ yaml_file = self.source_dir / yaml_file
+ h_def_file = self.source_dir / h_def_file
+ result = subprocess.run(
+ [
+ "python3",
+ str(self.source_dir / "libc/newhdrgen/yaml_to_classes.py"),
+ str(yaml_file),
+ str(h_def_file),
+ "--output_dir",
+ str(output_dir),
+ ],
+ capture_output=True,
+ text=True,
+ )
+
+ print("STDOUT:", result.stdout)
+ print("STDERR:", result.stderr)
+ result.check_returncode()
+
+ def compare_files(self, generated_file, expected_file):
+ with generated_file.open("r") as gen_file:
+ gen_content = gen_file.read()
+ with expected_file.open("r") as exp_file:
+ exp_content = exp_file.read()
+
+ self.assertEqual(gen_content, exp_content)
+
+ def test_generate_header(self):
+ yaml_file = "libc/newhdrgen/tests/input/test_small.yaml"
+ h_def_file = "libc/newhdrgen/tests/input/test_small.h.def"
+ expected_output_file = (
+ self.source_dir / "libc/newhdrgen/tests/expected_output/test_header.h"
+ )
+ output_file = self.output_dir / "test_small.h"
+
+ if not self.output_dir.exists():
+ self.output_dir.mkdir(parents=True)
+
+ self.run_script(yaml_file, h_def_file, self.output_dir)
+
+ self.compare_files(output_file, expected_output_file)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/libc/newhdrgen/yaml_to_classes.py b/libc/newhdrgen/yaml_to_classes.py
index 9b52c9cf9bb7c..9e0337f4a308e 100644
--- a/libc/newhdrgen/yaml_to_classes.py
+++ b/libc/newhdrgen/yaml_to_classes.py
@@ -46,11 +46,13 @@ def yaml_to_classes(yaml_data):
Enumeration(enum_data["name"], enum_data.get("value", None))
)
- for function_data in yaml_data.get("functions", []):
+ functions = yaml_data.get("functions", [])
+ sorted_functions = sorted(functions, key=lambda x: x["name"])
+ for function_data in sorted_functions:
arguments = [arg["type"] for arg in function_data["arguments"]]
guard = function_data.get("guard", None)
attributes = function_data.get("attributes", None)
- standards = (function_data.get("standards", None),)
+ standards = function_data.get("standards", None)
header.add_function(
Function(
function_data["return_type"],
@@ -99,6 +101,7 @@ def fill_public_api(header_str, h_def_content):
Returns:
The final header content with the public API filled in.
"""
+ header_str = header_str.strip()
return h_def_content.replace("%%public_api()", header_str, 1)
>From 611212fc9a4174723540542e9dbafb2a60275341 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 3 Jul 2024 23:30:05 +0200
Subject: [PATCH 184/246] AMDGPU/GlobalISel: Legalize atomicrmw fmin/fmax
(#97048)
We only handled the easy LDS case before. Handle the other address
spaces
with the more complicated legality logic.
---
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 16 +-
.../AMDGPU/GlobalISel/atomicrmw_fmax.ll | 2965 +++++++++++++++++
.../AMDGPU/GlobalISel/atomicrmw_fmin.ll | 2965 +++++++++++++++++
3 files changed, 5944 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index a219d01518458..88e40da110555 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1670,10 +1670,22 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
if (ST.hasAtomicFlatPkAdd16Insts())
Atomic.legalFor({{V2F16, FlatPtr}, {V2BF16, FlatPtr}});
- // FIXME: Handle flat, global and buffer cases.
- getActionDefinitionsBuilder({G_ATOMICRMW_FMIN, G_ATOMICRMW_FMAX})
+
+ // Most of the legalization work here is done by AtomicExpand. We could
+ // probably use a simpler legality rule that just assumes anything is OK.
+ auto &AtomicFMinFMax =
+ getActionDefinitionsBuilder({G_ATOMICRMW_FMIN, G_ATOMICRMW_FMAX})
.legalFor({{F32, LocalPtr}, {F64, LocalPtr}});
+ if (ST.hasAtomicFMinFMaxF32GlobalInsts())
+ AtomicFMinFMax.legalFor({{F32, GlobalPtr},{F32, BufferFatPtr}});
+ if (ST.hasAtomicFMinFMaxF64GlobalInsts())
+ AtomicFMinFMax.legalFor({{F64, GlobalPtr}, {F64, BufferFatPtr}});
+ if (ST.hasAtomicFMinFMaxF32FlatInsts())
+ AtomicFMinFMax.legalFor({F32, FlatPtr});
+ if (ST.hasAtomicFMinFMaxF64FlatInsts())
+ AtomicFMinFMax.legalFor({F64, FlatPtr});
+
// BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, and output
// demarshalling
getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
new file mode 100644
index 0000000000000..9be8620b024eb
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
@@ -0,0 +1,2965 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx940 < %s | FileCheck -check-prefix=GFX940 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tonga < %s | FileCheck -check-prefix=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii < %s | FileCheck -check-prefix=GFX7 %s
+
+; TODO: Delete this and add run lines to use *-atomicrmw-fmax.ll tests
+
+define float @local_atomic_fmax_ret_f32(ptr addrspace(3) %ptr, float %val) {
+; GFX12-LABEL: local_atomic_fmax_ret_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: ds_max_num_rtn_f32 v0, v0, v1
+; GFX12-NEXT: s_wait_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SE
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: local_atomic_fmax_ret_f32:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: ds_max_rtn_f32 v0, v0, v1
+; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: local_atomic_fmax_ret_f32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: ds_max_rtn_f32 v0, v0, v1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: local_atomic_fmax_ret_f32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: ds_max_rtn_f32 v0, v0, v1
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: local_atomic_fmax_ret_f32:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: ds_max_rtn_f32 v0, v0, v1
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: local_atomic_fmax_ret_f32:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: ds_max_rtn_f32 v0, v0, v1
+; GFX908-NEXT: s_waitcnt lgkmcnt(0)
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: local_atomic_fmax_ret_f32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 m0, -1
+; GFX8-NEXT: ds_max_rtn_f32 v0, v0, v1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: local_atomic_fmax_ret_f32:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 m0, -1
+; GFX7-NEXT: ds_max_rtn_f32 v0, v0, v1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(3) %ptr, float %val seq_cst
+ ret float %result
+}
+
+define void @local_atomic_fmax_noret_f32(ptr addrspace(3) %ptr, float %val) {
+; GFX12-LABEL: local_atomic_fmax_noret_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: ds_max_num_f32 v0, v1
+; GFX12-NEXT: s_wait_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SE
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: local_atomic_fmax_noret_f32:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: ds_max_f32 v0, v1
+; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: local_atomic_fmax_noret_f32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: ds_max_f32 v0, v1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: local_atomic_fmax_noret_f32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: ds_max_f32 v0, v1
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: local_atomic_fmax_noret_f32:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: ds_max_f32 v0, v1
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: local_atomic_fmax_noret_f32:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: ds_max_f32 v0, v1
+; GFX908-NEXT: s_waitcnt lgkmcnt(0)
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: local_atomic_fmax_noret_f32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 m0, -1
+; GFX8-NEXT: ds_max_f32 v0, v1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: local_atomic_fmax_noret_f32:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 m0, -1
+; GFX7-NEXT: ds_max_f32 v0, v1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmax ptr addrspace(3) %ptr, float %val seq_cst
+ ret void
+}
+
+define double @local_atomic_fmax_ret_f64(ptr addrspace(3) %ptr, double %val) {
+; GFX12-LABEL: local_atomic_fmax_ret_f64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: ds_max_num_rtn_f64 v[0:1], v0, v[1:2]
+; GFX12-NEXT: s_wait_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SE
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: local_atomic_fmax_ret_f64:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v4, v1
+; GFX940-NEXT: v_mov_b32_e32 v5, v2
+; GFX940-NEXT: ds_max_rtn_f64 v[0:1], v0, v[4:5]
+; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: local_atomic_fmax_ret_f64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: ds_max_rtn_f64 v[0:1], v0, v[1:2]
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: local_atomic_fmax_ret_f64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: ds_max_rtn_f64 v[0:1], v0, v[1:2]
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: local_atomic_fmax_ret_f64:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v4, v1
+; GFX90A-NEXT: v_mov_b32_e32 v5, v2
+; GFX90A-NEXT: ds_max_rtn_f64 v[0:1], v0, v[4:5]
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: local_atomic_fmax_ret_f64:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: ds_max_rtn_f64 v[0:1], v0, v[1:2]
+; GFX908-NEXT: s_waitcnt lgkmcnt(0)
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: local_atomic_fmax_ret_f64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 m0, -1
+; GFX8-NEXT: ds_max_rtn_f64 v[0:1], v0, v[1:2]
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: local_atomic_fmax_ret_f64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 m0, -1
+; GFX7-NEXT: ds_max_rtn_f64 v[0:1], v0, v[1:2]
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(3) %ptr, double %val seq_cst
+ ret double %result
+}
+
+define void @local_atomic_fmax_noret_f64(ptr addrspace(3) %ptr, double %val) {
+; GFX12-LABEL: local_atomic_fmax_noret_f64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: ds_max_num_f64 v0, v[1:2]
+; GFX12-NEXT: s_wait_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SE
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: local_atomic_fmax_noret_f64:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v4, v1
+; GFX940-NEXT: v_mov_b32_e32 v5, v2
+; GFX940-NEXT: ds_max_f64 v0, v[4:5]
+; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: local_atomic_fmax_noret_f64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: ds_max_f64 v0, v[1:2]
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: local_atomic_fmax_noret_f64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: ds_max_f64 v0, v[1:2]
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: local_atomic_fmax_noret_f64:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v4, v1
+; GFX90A-NEXT: v_mov_b32_e32 v5, v2
+; GFX90A-NEXT: ds_max_f64 v0, v[4:5]
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: local_atomic_fmax_noret_f64:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: ds_max_f64 v0, v[1:2]
+; GFX908-NEXT: s_waitcnt lgkmcnt(0)
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: local_atomic_fmax_noret_f64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 m0, -1
+; GFX8-NEXT: ds_max_f64 v0, v[1:2]
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: local_atomic_fmax_noret_f64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 m0, -1
+; GFX7-NEXT: ds_max_f64 v0, v[1:2]
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmax ptr addrspace(3) %ptr, double %val seq_cst
+ ret void
+}
+
+define float @global_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, float %val) {
+; GFX12-LABEL: global_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v2
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v3, v4, v4
+; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB4_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: global_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: global_load_dword v3, v[0:1], off
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX940-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v5, v3
+; GFX940-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX940-NEXT: v_max_f32_e32 v4, v3, v2
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: global_atomic_cmpswap v3, v[0:1], v[4:5], off sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB4_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v0, v3
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: global_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX11-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: global_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: global_load_dword v3, v[0:1], off
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB4_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: global_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: global_load_dword v3, v[0:1], off
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v5, v3
+; GFX90A-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX90A-NEXT: v_max_f32_e32 v4, v3, v2
+; GFX90A-NEXT: global_atomic_cmpswap v3, v[0:1], v[4:5], off glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB4_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v0, v3
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: global_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: global_load_dword v3, v[0:1], off
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX908-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX908-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB4_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v3
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: global_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dword v3, v[0:1]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v4
+; GFX8-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB4_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: global_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX7-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v5
+; GFX7-NEXT: v_max_f32_e32 v4, v3, v2
+; GFX7-NEXT: v_mov_b32_e32 v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v5
+; GFX7-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB4_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v0, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret float %result
+}
+
+define void @global_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, float %val) {
+; GFX12-LABEL: global_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-NEXT: v_max_num_f32_e32 v4, v2, v2
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_max_num_f32_e32 v2, v3, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB5_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: global_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: global_load_dword v3, v[0:1], off
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX940-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX940-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v3, v2
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB5_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: global_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB5_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: global_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: global_load_dword v3, v[0:1], off
+; GFX10-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB5_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: global_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: global_load_dword v3, v[0:1], off
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX90A-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX90A-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB5_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: global_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: global_load_dword v3, v[0:1], off
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX908-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX908-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB5_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: global_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dword v3, v[0:1]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v2
+; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX8-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX8-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB5_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: global_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v2
+; GFX7-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX7-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX7-NEXT: v_mov_b32_e32 v6, v3
+; GFX7-NEXT: v_mov_b32_e32 v5, v2
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v3
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v3, v5
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB5_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, double %val) {
+; GFX12-LABEL: global_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_b64 v[4:5], v[0:1], off
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[6:7], v[6:7]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[4:5], v[2:3]
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cmpswap_b64 v[4:5], v[0:1], v[4:7], off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: global_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX940-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_mov_b64_e32 v[6:7], v[4:5]
+; GFX940-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX940-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB6_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v0, v4
+; GFX940-NEXT: v_mov_b32_e32 v1, v5
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: global_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: global_load_b64 v[4:5], v[0:1], off
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX11-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[4:5], v[0:1], v[4:7], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: global_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX10-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB6_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v1, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: global_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX90A-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX90A-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB6_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v1, v5
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: global_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX908-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
+; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX908-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX908-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB6_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v1, v5
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: global_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX8-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB6_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v1, v5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: global_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v4, v0
+; GFX7-NEXT: v_mov_b32_e32 v5, v1
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v11, v1
+; GFX7-NEXT: v_mov_b32_e32 v10, v0
+; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
+; GFX7-NEXT: v_max_f64 v[8:9], v[0:1], v[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v0, v8
+; GFX7-NEXT: v_mov_b32_e32 v1, v9
+; GFX7-NEXT: v_mov_b32_e32 v2, v10
+; GFX7-NEXT: v_mov_b32_e32 v3, v11
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB6_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret double %result
+}
+
+define void @global_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, double %val) {
+; GFX12-LABEL: global_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_b64 v[4:5], v[0:1], off
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[2:3], v[2:3]
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: global_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX940-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX940-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB7_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: global_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: global_load_b64 v[4:5], v[0:1], off
+; GFX11-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: global_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX10-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB7_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: global_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX90A-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX90A-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB7_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: global_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX908-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX908-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX908-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v3
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB7_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: global_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX8-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB7_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: global_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v11, v5
+; GFX7-NEXT: v_mov_b32_e32 v10, v4
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v4, v8
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v5, v9
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB7_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define float @flat_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory(ptr %ptr, float %val) {
+; GFX12-LABEL: flat_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v2
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v3, v4, v4
+; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: flat_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: flat_load_dword v3, v[0:1]
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX940-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v5, v3
+; GFX940-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX940-NEXT: v_max_f32_e32 v4, v3, v2
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: flat_atomic_cmpswap v3, v[0:1], v[4:5] sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB8_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v0, v3
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: flat_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX11-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: flat_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: flat_load_dword v3, v[0:1]
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB8_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: flat_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: flat_load_dword v3, v[0:1]
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v5, v3
+; GFX90A-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX90A-NEXT: v_max_f32_e32 v4, v3, v2
+; GFX90A-NEXT: flat_atomic_cmpswap v3, v[0:1], v[4:5] glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB8_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v0, v3
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: flat_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: flat_load_dword v3, v[0:1]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX908-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX908-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB8_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v3
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: flat_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dword v3, v[0:1]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v4
+; GFX8-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB8_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: flat_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: flat_load_dword v3, v[0:1]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v4
+; GFX7-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX7-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB8_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmax ptr %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret float %result
+}
+
+define void @flat_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory(ptr %ptr, float %val) {
+; GFX12-LABEL: flat_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-NEXT: v_max_num_f32_e32 v4, v2, v2
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_max_num_f32_e32 v2, v3, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: flat_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: flat_load_dword v3, v[0:1]
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX940-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX940-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v3, v2
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB9_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: flat_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: flat_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: flat_load_dword v3, v[0:1]
+; GFX10-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB9_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: flat_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: flat_load_dword v3, v[0:1]
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX90A-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX90A-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX90A-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB9_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: flat_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: flat_load_dword v3, v[0:1]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX908-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX908-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB9_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: flat_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dword v3, v[0:1]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v2
+; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX8-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX8-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB9_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: flat_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: flat_load_dword v3, v[0:1]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v2
+; GFX7-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX7-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX7-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v3, v2
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB9_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmax ptr %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define double @flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory(ptr %ptr, double %val) {
+; GFX12-LABEL: flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: flat_load_b64 v[4:5], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[6:7], v[6:7]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[4:5], v[2:3]
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: flat_atomic_cmpswap_b64 v[4:5], v[0:1], v[4:7] th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX940-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b64_e32 v[6:7], v[4:5]
+; GFX940-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX940-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB10_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v0, v4
+; GFX940-NEXT: v_mov_b32_e32 v1, v5
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: flat_load_b64 v[4:5], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX11-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: flat_atomic_cmpswap_b64 v[4:5], v[0:1], v[4:7] glc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX10-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB10_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v1, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX90A-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB10_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v1, v5
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX908-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
+; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX908-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX908-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB10_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v1, v5
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, 4, v0
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GFX8-NEXT: flat_load_dword v4, v[0:1]
+; GFX8-NEXT: flat_load_dword v5, v[5:6]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX8-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB10_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v1, v5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v5, vcc, 4, v0
+; GFX7-NEXT: v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GFX7-NEXT: flat_load_dword v4, v[0:1]
+; GFX7-NEXT: flat_load_dword v5, v[5:6]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX7-NEXT: v_max_f64 v[4:5], v[4:5], v[2:3]
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB10_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v4
+; GFX7-NEXT: v_mov_b32_e32 v1, v5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmax ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret double %result
+}
+
+define void @flat_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory(ptr %ptr, double %val) {
+; GFX12-LABEL: flat_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: flat_load_b64 v[4:5], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[2:3], v[2:3]
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: flat_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5] th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: flat_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX940-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX940-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB11_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: flat_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: flat_load_b64 v[4:5], v[0:1]
+; GFX11-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: flat_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5] glc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: flat_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX10-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB11_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: flat_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX90A-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: flat_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX908-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX908-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX908-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v3
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB11_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: flat_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, 4, v0
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GFX8-NEXT: flat_load_dword v4, v[0:1]
+; GFX8-NEXT: flat_load_dword v5, v[5:6]
+; GFX8-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB11_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: flat_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v5, vcc, 4, v0
+; GFX7-NEXT: v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GFX7-NEXT: flat_load_dword v4, v[0:1]
+; GFX7-NEXT: flat_load_dword v5, v[5:6]
+; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[6:7]
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB11_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmax ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, float %val) {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v2, s4
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v3, v1, v1
+; GFX12-NEXT: buffer_load_b32 v0, v2, s[0:3], null offen
+; GFX12-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v5, v0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v0, v5, v5
+; GFX12-NEXT: v_max_num_f32_e32 v4, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v2, s4
+; GFX940-NEXT: v_mov_b32_e32 v1, v0
+; GFX940-NEXT: buffer_load_dword v0, v2, s[0:3], 0 offen
+; GFX940-NEXT: s_mov_b64 s[4:5], 0
+; GFX940-NEXT: v_max_f32_e32 v3, v1, v1
+; GFX940-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v5, v0
+; GFX940-NEXT: v_max_f32_e32 v0, v5, v5
+; GFX940-NEXT: v_max_f32_e32 v4, v0, v3
+; GFX940-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[0:3], 0 offen sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX940-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_cbranch_execnz .LBB12_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v2, s4
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v3, v1, v1
+; GFX11-NEXT: buffer_load_b32 v0, v2, s[0:3], 0 offen
+; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v0, v5, v5
+; GFX11-NEXT: v_max_f32_e32 v4, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v2, s[0:3], 0 offen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v2, s8
+; GFX10-NEXT: v_mov_b32_e32 v1, v0
+; GFX10-NEXT: s_mov_b32 s8, 0
+; GFX10-NEXT: buffer_load_dword v0, v2, s[4:7], 0 offen
+; GFX10-NEXT: v_max_f32_e32 v3, v1, v1
+; GFX10-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_f32_e32 v0, v5, v5
+; GFX10-NEXT: v_max_f32_e32 v4, v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v1, v5
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[4:7], 0 offen glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_cbranch_execnz .LBB12_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v2, s8
+; GFX90A-NEXT: v_mov_b32_e32 v1, v0
+; GFX90A-NEXT: buffer_load_dword v0, v2, s[4:7], 0 offen
+; GFX90A-NEXT: s_mov_b64 s[8:9], 0
+; GFX90A-NEXT: v_max_f32_e32 v3, v1, v1
+; GFX90A-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v5, v0
+; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
+; GFX90A-NEXT: v_max_f32_e32 v4, v0, v3
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[4:7], 0 offen glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX90A-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_cbranch_execnz .LBB12_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v2, s8
+; GFX908-NEXT: v_mov_b32_e32 v1, v0
+; GFX908-NEXT: buffer_load_dword v0, v2, s[4:7], 0 offen
+; GFX908-NEXT: s_mov_b64 s[8:9], 0
+; GFX908-NEXT: v_max_f32_e32 v3, v1, v1
+; GFX908-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v5, v0
+; GFX908-NEXT: v_max_f32_e32 v0, v5, v5
+; GFX908-NEXT: v_max_f32_e32 v4, v0, v3
+; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v1, v5
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[4:7], 0 offen glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX908-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_cbranch_execnz .LBB12_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, s8
+; GFX8-NEXT: v_mov_b32_e32 v1, v0
+; GFX8-NEXT: buffer_load_dword v0, v2, s[4:7], 0 offen
+; GFX8-NEXT: s_mov_b64 s[8:9], 0
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v1
+; GFX8-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v0
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v5
+; GFX8-NEXT: v_max_f32_e32 v4, v0, v3
+; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v1, v5
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[4:7], 0 offen glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX8-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_cbranch_execnz .LBB12_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v2, s8
+; GFX7-NEXT: v_mov_b32_e32 v1, v0
+; GFX7-NEXT: buffer_load_dword v0, v2, s[4:7], 0 offen
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v1
+; GFX7-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v5
+; GFX7-NEXT: v_max_f32_e32 v4, v0, v3
+; GFX7-NEXT: v_mov_b32_e32 v0, v4
+; GFX7-NEXT: v_mov_b32_e32 v1, v5
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[4:7], 0 offen glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB12_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(7) %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret float %result
+}
+
+define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, float %val) {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_max_num_f32 v3, v0, v0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
+; GFX12-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_max_num_f32_e32 v0, v1, v1
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v0, v0, v3
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX12-NEXT: v_mov_b32_e32 v1, v4
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v2, s4
+; GFX940-NEXT: buffer_load_dword v1, v2, s[0:3], 0 offen
+; GFX940-NEXT: s_mov_b64 s[4:5], 0
+; GFX940-NEXT: v_max_f32_e32 v3, v0, v0
+; GFX940-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_max_f32_e32 v0, v1, v1
+; GFX940-NEXT: v_max_f32_e32 v0, v0, v3
+; GFX940-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[0:3], 0 offen sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
+; GFX940-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX940-NEXT: v_mov_b32_e32 v1, v4
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_cbranch_execnz .LBB13_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_max_f32 v3, v0, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
+; GFX11-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_f32_e32 v0, v1, v1
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v0, v0, v3
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX11-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v2, s8
+; GFX10-NEXT: v_max_f32_e32 v3, v0, v0
+; GFX10-NEXT: s_mov_b32 s8, 0
+; GFX10-NEXT: buffer_load_dword v1, v2, s[4:7], 0 offen
+; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v1, v1
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v5, v1
+; GFX10-NEXT: v_mov_b32_e32 v4, v0
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[4:7], 0 offen glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX10-NEXT: v_mov_b32_e32 v1, v4
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_cbranch_execnz .LBB13_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v2, s8
+; GFX90A-NEXT: buffer_load_dword v1, v2, s[4:7], 0 offen
+; GFX90A-NEXT: s_mov_b64 s[8:9], 0
+; GFX90A-NEXT: v_max_f32_e32 v3, v0, v0
+; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1
+; GFX90A-NEXT: v_max_f32_e32 v0, v0, v3
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[4:7], 0 offen glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
+; GFX90A-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX90A-NEXT: v_mov_b32_e32 v1, v4
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_cbranch_execnz .LBB13_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v2, s8
+; GFX908-NEXT: buffer_load_dword v1, v2, s[4:7], 0 offen
+; GFX908-NEXT: s_mov_b64 s[8:9], 0
+; GFX908-NEXT: v_max_f32_e32 v3, v0, v0
+; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_max_f32_e32 v0, v1, v1
+; GFX908-NEXT: v_max_f32_e32 v0, v0, v3
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[4:7], 0 offen glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
+; GFX908-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX908-NEXT: v_mov_b32_e32 v1, v4
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_cbranch_execnz .LBB13_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, s8
+; GFX8-NEXT: buffer_load_dword v1, v2, s[4:7], 0 offen
+; GFX8-NEXT: s_mov_b64 s[8:9], 0
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v0
+; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v1
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v3
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[4:7], 0 offen glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
+; GFX8-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v1, v4
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_cbranch_execnz .LBB13_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v2, s8
+; GFX7-NEXT: buffer_load_dword v1, v2, s[4:7], 0 offen
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v0
+; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v3
+; GFX7-NEXT: v_mov_b32_e32 v5, v1
+; GFX7-NEXT: v_mov_b32_e32 v4, v0
+; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[4:7], 0 offen glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v1, v4
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB13_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmax ptr addrspace(7) %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, double %val) {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
+; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
+; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
+; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v6, s4
+; GFX940-NEXT: v_mov_b32_e32 v2, v0
+; GFX940-NEXT: v_mov_b32_e32 v3, v1
+; GFX940-NEXT: buffer_load_dwordx2 v[0:1], v6, s[0:3], 0 offen
+; GFX940-NEXT: s_mov_b64 s[4:5], 0
+; GFX940-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX940-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_mov_b64_e32 v[10:11], v[0:1]
+; GFX940-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
+; GFX940-NEXT: v_max_f64 v[8:9], v[0:1], v[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[0:1], v[8:9]
+; GFX940-NEXT: v_mov_b64_e32 v[2:3], v[10:11]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[0:3], 0 offen sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX940-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_cbranch_execnz .LBB14_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen
+; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
+; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
+; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v6, s8
+; GFX10-NEXT: v_mov_b32_e32 v2, v0
+; GFX10-NEXT: v_mov_b32_e32 v3, v1
+; GFX10-NEXT: s_mov_b32 s8, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v6, s[4:7], 0 offen
+; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX10-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v10, v1
+; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
+; GFX10-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v0, v7
+; GFX10-NEXT: v_mov_b32_e32 v1, v8
+; GFX10-NEXT: v_mov_b32_e32 v2, v9
+; GFX10-NEXT: v_mov_b32_e32 v3, v10
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[4:7], 0 offen glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_cbranch_execnz .LBB14_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v6, s8
+; GFX90A-NEXT: v_mov_b32_e32 v2, v0
+; GFX90A-NEXT: v_mov_b32_e32 v3, v1
+; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v6, s[4:7], 0 offen
+; GFX90A-NEXT: s_mov_b64 s[8:9], 0
+; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX90A-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1]
+; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
+; GFX90A-NEXT: v_max_f64 v[8:9], v[0:1], v[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[4:7], 0 offen glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX90A-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_cbranch_execnz .LBB14_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v6, s8
+; GFX908-NEXT: v_mov_b32_e32 v2, v0
+; GFX908-NEXT: v_mov_b32_e32 v3, v1
+; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[4:7], 0 offen
+; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX908-NEXT: s_mov_b64 s[8:9], 0
+; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v10, v1
+; GFX908-NEXT: v_mov_b32_e32 v9, v0
+; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
+; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v7
+; GFX908-NEXT: v_mov_b32_e32 v1, v8
+; GFX908-NEXT: v_mov_b32_e32 v2, v9
+; GFX908-NEXT: v_mov_b32_e32 v3, v10
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[4:7], 0 offen glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_cbranch_execnz .LBB14_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v6, s8
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[4:7], 0 offen
+; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX8-NEXT: s_mov_b64 s[8:9], 0
+; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v10, v1
+; GFX8-NEXT: v_mov_b32_e32 v9, v0
+; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
+; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v7
+; GFX8-NEXT: v_mov_b32_e32 v1, v8
+; GFX8-NEXT: v_mov_b32_e32 v2, v9
+; GFX8-NEXT: v_mov_b32_e32 v3, v10
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[4:7], 0 offen glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_cbranch_execnz .LBB14_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v6, s8
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v6, s[4:7], 0 offen
+; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v10, v1
+; GFX7-NEXT: v_mov_b32_e32 v9, v0
+; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
+; GFX7-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v7
+; GFX7-NEXT: v_mov_b32_e32 v1, v8
+; GFX7-NEXT: v_mov_b32_e32 v2, v9
+; GFX7-NEXT: v_mov_b32_e32 v3, v10
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[4:7], 0 offen glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB14_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(7) %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret double %result
+}
+
+define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, double %val) {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1]
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[2:3], v6, s[0:3], null offen
+; GFX12-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[2:3], v[2:3]
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
+; GFX12-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
+; GFX12-NEXT: v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v6, s4
+; GFX940-NEXT: buffer_load_dwordx2 v[2:3], v6, s[0:3], 0 offen
+; GFX940-NEXT: s_mov_b64 s[4:5], 0
+; GFX940-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX940-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX940-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[10:11], v[2:3]
+; GFX940-NEXT: v_mov_b64_e32 v[8:9], v[0:1]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v6, s[0:3], 0 offen sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[2:3]
+; GFX940-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[2:3], v[8:9]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_cbranch_execnz .LBB15_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[2:3], v6, s[0:3], 0 offen
+; GFX11-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
+; GFX11-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
+; GFX11-NEXT: v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v6, s8
+; GFX10-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX10-NEXT: s_mov_b32 s8, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[2:3], v6, s[4:7], 0 offen
+; GFX10-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v10, v3
+; GFX10-NEXT: v_mov_b32_e32 v9, v2
+; GFX10-NEXT: v_mov_b32_e32 v8, v1
+; GFX10-NEXT: v_mov_b32_e32 v7, v0
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[4:7], 0 offen glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
+; GFX10-NEXT: v_mov_b32_e32 v2, v7
+; GFX10-NEXT: v_mov_b32_e32 v3, v8
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_cbranch_execnz .LBB15_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v6, s8
+; GFX90A-NEXT: buffer_load_dwordx2 v[2:3], v6, s[4:7], 0 offen
+; GFX90A-NEXT: s_mov_b64 s[8:9], 0
+; GFX90A-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX90A-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX90A-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[0:1], v[0:1] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v6, s[4:7], 0 offen glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[2:3]
+; GFX90A-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[8:9], v[8:9] op_sel:[0,1]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_cbranch_execnz .LBB15_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v6, s8
+; GFX908-NEXT: buffer_load_dwordx2 v[2:3], v6, s[4:7], 0 offen
+; GFX908-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX908-NEXT: s_mov_b64 s[8:9], 0
+; GFX908-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX908-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v10, v3
+; GFX908-NEXT: v_mov_b32_e32 v9, v2
+; GFX908-NEXT: v_mov_b32_e32 v8, v1
+; GFX908-NEXT: v_mov_b32_e32 v7, v0
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[4:7], 0 offen glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
+; GFX908-NEXT: v_mov_b32_e32 v2, v7
+; GFX908-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX908-NEXT: v_mov_b32_e32 v3, v8
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_cbranch_execnz .LBB15_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v6, s8
+; GFX8-NEXT: buffer_load_dwordx2 v[2:3], v6, s[4:7], 0 offen
+; GFX8-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[8:9], 0
+; GFX8-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v10, v3
+; GFX8-NEXT: v_mov_b32_e32 v9, v2
+; GFX8-NEXT: v_mov_b32_e32 v8, v1
+; GFX8-NEXT: v_mov_b32_e32 v7, v0
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[4:7], 0 offen glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v2, v7
+; GFX8-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v3, v8
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_cbranch_execnz .LBB15_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v6, s8
+; GFX7-NEXT: buffer_load_dwordx2 v[2:3], v6, s[4:7], 0 offen
+; GFX7-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v10, v3
+; GFX7-NEXT: v_mov_b32_e32 v9, v2
+; GFX7-NEXT: v_mov_b32_e32 v8, v1
+; GFX7-NEXT: v_mov_b32_e32 v7, v0
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[4:7], 0 offen glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v2, v7
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v3, v8
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB15_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmax ptr addrspace(7) %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
new file mode 100644
index 0000000000000..97d68d9c2e621
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
@@ -0,0 +1,2965 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx940 < %s | FileCheck -check-prefix=GFX940 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tonga < %s | FileCheck -check-prefix=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii < %s | FileCheck -check-prefix=GFX7 %s
+
+; TODO: Delete this and add run lines to use *-atomicrmw-fmin.ll tests
+
+define float @local_atomic_fmin_ret_f32(ptr addrspace(3) %ptr, float %val) {
+; GFX12-LABEL: local_atomic_fmin_ret_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: ds_min_num_rtn_f32 v0, v0, v1
+; GFX12-NEXT: s_wait_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SE
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: local_atomic_fmin_ret_f32:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: ds_min_rtn_f32 v0, v0, v1
+; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: local_atomic_fmin_ret_f32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: ds_min_rtn_f32 v0, v0, v1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: local_atomic_fmin_ret_f32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: ds_min_rtn_f32 v0, v0, v1
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: local_atomic_fmin_ret_f32:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: ds_min_rtn_f32 v0, v0, v1
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: local_atomic_fmin_ret_f32:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: ds_min_rtn_f32 v0, v0, v1
+; GFX908-NEXT: s_waitcnt lgkmcnt(0)
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: local_atomic_fmin_ret_f32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 m0, -1
+; GFX8-NEXT: ds_min_rtn_f32 v0, v0, v1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: local_atomic_fmin_ret_f32:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 m0, -1
+; GFX7-NEXT: ds_min_rtn_f32 v0, v0, v1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(3) %ptr, float %val seq_cst
+ ret float %result
+}
+
+define void @local_atomic_fmin_noret_f32(ptr addrspace(3) %ptr, float %val) {
+; GFX12-LABEL: local_atomic_fmin_noret_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: ds_min_num_f32 v0, v1
+; GFX12-NEXT: s_wait_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SE
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: local_atomic_fmin_noret_f32:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: ds_min_f32 v0, v1
+; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: local_atomic_fmin_noret_f32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: ds_min_f32 v0, v1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: local_atomic_fmin_noret_f32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: ds_min_f32 v0, v1
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: local_atomic_fmin_noret_f32:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: ds_min_f32 v0, v1
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: local_atomic_fmin_noret_f32:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: ds_min_f32 v0, v1
+; GFX908-NEXT: s_waitcnt lgkmcnt(0)
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: local_atomic_fmin_noret_f32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 m0, -1
+; GFX8-NEXT: ds_min_f32 v0, v1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: local_atomic_fmin_noret_f32:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 m0, -1
+; GFX7-NEXT: ds_min_f32 v0, v1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmin ptr addrspace(3) %ptr, float %val seq_cst
+ ret void
+}
+
+define double @local_atomic_fmin_ret_f64(ptr addrspace(3) %ptr, double %val) {
+; GFX12-LABEL: local_atomic_fmin_ret_f64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: ds_min_num_rtn_f64 v[0:1], v0, v[1:2]
+; GFX12-NEXT: s_wait_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SE
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: local_atomic_fmin_ret_f64:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v4, v1
+; GFX940-NEXT: v_mov_b32_e32 v5, v2
+; GFX940-NEXT: ds_min_rtn_f64 v[0:1], v0, v[4:5]
+; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: local_atomic_fmin_ret_f64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: ds_min_rtn_f64 v[0:1], v0, v[1:2]
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: local_atomic_fmin_ret_f64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: ds_min_rtn_f64 v[0:1], v0, v[1:2]
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: local_atomic_fmin_ret_f64:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v4, v1
+; GFX90A-NEXT: v_mov_b32_e32 v5, v2
+; GFX90A-NEXT: ds_min_rtn_f64 v[0:1], v0, v[4:5]
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: local_atomic_fmin_ret_f64:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: ds_min_rtn_f64 v[0:1], v0, v[1:2]
+; GFX908-NEXT: s_waitcnt lgkmcnt(0)
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: local_atomic_fmin_ret_f64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 m0, -1
+; GFX8-NEXT: ds_min_rtn_f64 v[0:1], v0, v[1:2]
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: local_atomic_fmin_ret_f64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 m0, -1
+; GFX7-NEXT: ds_min_rtn_f64 v[0:1], v0, v[1:2]
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(3) %ptr, double %val seq_cst
+ ret double %result
+}
+
+define void @local_atomic_fmin_noret_f64(ptr addrspace(3) %ptr, double %val) {
+; GFX12-LABEL: local_atomic_fmin_noret_f64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: ds_min_num_f64 v0, v[1:2]
+; GFX12-NEXT: s_wait_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SE
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: local_atomic_fmin_noret_f64:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v4, v1
+; GFX940-NEXT: v_mov_b32_e32 v5, v2
+; GFX940-NEXT: ds_min_f64 v0, v[4:5]
+; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: local_atomic_fmin_noret_f64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: ds_min_f64 v0, v[1:2]
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: local_atomic_fmin_noret_f64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: ds_min_f64 v0, v[1:2]
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: local_atomic_fmin_noret_f64:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v4, v1
+; GFX90A-NEXT: v_mov_b32_e32 v5, v2
+; GFX90A-NEXT: ds_min_f64 v0, v[4:5]
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: local_atomic_fmin_noret_f64:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: ds_min_f64 v0, v[1:2]
+; GFX908-NEXT: s_waitcnt lgkmcnt(0)
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: local_atomic_fmin_noret_f64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 m0, -1
+; GFX8-NEXT: ds_min_f64 v0, v[1:2]
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: local_atomic_fmin_noret_f64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 m0, -1
+; GFX7-NEXT: ds_min_f64 v0, v[1:2]
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmin ptr addrspace(3) %ptr, double %val seq_cst
+ ret void
+}
+
+define float @global_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, float %val) {
+; GFX12-LABEL: global_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v2
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v3, v4, v4
+; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB4_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: global_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: global_load_dword v3, v[0:1], off
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX940-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v5, v3
+; GFX940-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX940-NEXT: v_min_f32_e32 v4, v3, v2
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: global_atomic_cmpswap v3, v[0:1], v[4:5], off sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB4_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v0, v3
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: global_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX11-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: global_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: global_load_dword v3, v[0:1], off
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX10-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB4_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: global_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: global_load_dword v3, v[0:1], off
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v5, v3
+; GFX90A-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX90A-NEXT: v_min_f32_e32 v4, v3, v2
+; GFX90A-NEXT: global_atomic_cmpswap v3, v[0:1], v[4:5], off glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB4_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v0, v3
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: global_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: global_load_dword v3, v[0:1], off
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX908-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX908-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB4_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v3
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: global_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dword v3, v[0:1]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v4
+; GFX8-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB4_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: global_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX7-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v5
+; GFX7-NEXT: v_min_f32_e32 v4, v3, v2
+; GFX7-NEXT: v_mov_b32_e32 v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v5
+; GFX7-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB4_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v0, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret float %result
+}
+
+define void @global_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, float %val) {
+; GFX12-LABEL: global_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-NEXT: v_max_num_f32_e32 v4, v2, v2
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_max_num_f32_e32 v2, v3, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB5_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: global_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: global_load_dword v3, v[0:1], off
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX940-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX940-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v3, v2
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB5_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: global_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB5_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: global_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: global_load_dword v3, v[0:1], off
+; GFX10-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX10-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB5_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: global_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: global_load_dword v3, v[0:1], off
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX90A-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX90A-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB5_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: global_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: global_load_dword v3, v[0:1], off
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX908-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX908-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB5_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: global_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dword v3, v[0:1]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v2
+; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX8-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX8-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB5_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: global_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v2
+; GFX7-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX7-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX7-NEXT: v_mov_b32_e32 v6, v3
+; GFX7-NEXT: v_mov_b32_e32 v5, v2
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v3
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v3, v5
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB5_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, double %val) {
+; GFX12-LABEL: global_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_b64 v[4:5], v[0:1], off
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[6:7], v[6:7]
+; GFX12-NEXT: v_min_num_f64_e32 v[4:5], v[4:5], v[2:3]
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cmpswap_b64 v[4:5], v[0:1], v[4:7], off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: global_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX940-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_mov_b64_e32 v[6:7], v[4:5]
+; GFX940-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX940-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB6_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v0, v4
+; GFX940-NEXT: v_mov_b32_e32 v1, v5
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: global_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: global_load_b64 v[4:5], v[0:1], off
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX11-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[4:5], v[0:1], v[4:7], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: global_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX10-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB6_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v1, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: global_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX90A-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX90A-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB6_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v1, v5
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: global_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX908-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
+; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX908-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX908-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB6_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v1, v5
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: global_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX8-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB6_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v1, v5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: global_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v4, v0
+; GFX7-NEXT: v_mov_b32_e32 v5, v1
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v11, v1
+; GFX7-NEXT: v_mov_b32_e32 v10, v0
+; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
+; GFX7-NEXT: v_min_f64 v[8:9], v[0:1], v[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v0, v8
+; GFX7-NEXT: v_mov_b32_e32 v1, v9
+; GFX7-NEXT: v_mov_b32_e32 v2, v10
+; GFX7-NEXT: v_mov_b32_e32 v3, v11
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB6_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret double %result
+}
+
+define void @global_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, double %val) {
+; GFX12-LABEL: global_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_b64 v[4:5], v[0:1], off
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[2:3], v[2:3]
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: global_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX940-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX940-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB7_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: global_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: global_load_b64 v[4:5], v[0:1], off
+; GFX11-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: global_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX10-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX10-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB7_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: global_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX90A-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX90A-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB7_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: global_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
+; GFX908-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX908-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX908-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v3
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB7_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: global_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX8-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB7_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: global_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX7-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v11, v5
+; GFX7-NEXT: v_mov_b32_e32 v10, v4
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v4, v8
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v5, v9
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB7_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define float @flat_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory(ptr %ptr, float %val) {
+; GFX12-LABEL: flat_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v2
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v3, v4, v4
+; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: flat_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: flat_load_dword v3, v[0:1]
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX940-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v5, v3
+; GFX940-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX940-NEXT: v_min_f32_e32 v4, v3, v2
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: flat_atomic_cmpswap v3, v[0:1], v[4:5] sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB8_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v0, v3
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: flat_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX11-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: flat_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: flat_load_dword v3, v[0:1]
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX10-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB8_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: flat_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: flat_load_dword v3, v[0:1]
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v5, v3
+; GFX90A-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX90A-NEXT: v_min_f32_e32 v4, v3, v2
+; GFX90A-NEXT: flat_atomic_cmpswap v3, v[0:1], v[4:5] glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB8_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v0, v3
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: flat_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: flat_load_dword v3, v[0:1]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_max_f32_e32 v3, v4, v4
+; GFX908-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX908-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB8_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v3
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: flat_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dword v3, v[0:1]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v4
+; GFX8-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB8_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: flat_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: flat_load_dword v3, v[0:1]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v4
+; GFX7-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX7-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB8_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmin ptr %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret float %result
+}
+
+define void @flat_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory(ptr %ptr, float %val) {
+; GFX12-LABEL: flat_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-NEXT: v_max_num_f32_e32 v4, v2, v2
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_max_num_f32_e32 v2, v3, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: flat_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: flat_load_dword v3, v[0:1]
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX940-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX940-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v3, v2
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB9_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: flat_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: flat_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: flat_load_dword v3, v[0:1]
+; GFX10-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX10-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB9_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: flat_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: flat_load_dword v3, v[0:1]
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX90A-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX90A-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX90A-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB9_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: flat_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: flat_load_dword v3, v[0:1]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: v_max_f32_e32 v4, v2, v2
+; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX908-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX908-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB9_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: flat_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: flat_load_dword v3, v[0:1]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v2
+; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX8-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX8-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB9_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: flat_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: flat_load_dword v3, v[0:1]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v2
+; GFX7-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX7-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX7-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v3, v2
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB9_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmin ptr %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define double @flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory(ptr %ptr, double %val) {
+; GFX12-LABEL: flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: flat_load_b64 v[4:5], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[6:7], v[6:7]
+; GFX12-NEXT: v_min_num_f64_e32 v[4:5], v[4:5], v[2:3]
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: flat_atomic_cmpswap_b64 v[4:5], v[0:1], v[4:7] th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX940-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b64_e32 v[6:7], v[4:5]
+; GFX940-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX940-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB10_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: v_mov_b32_e32 v0, v4
+; GFX940-NEXT: v_mov_b32_e32 v1, v5
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: flat_load_b64 v[4:5], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX11-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: flat_atomic_cmpswap_b64 v[4:5], v[0:1], v[4:7] glc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX10-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB10_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v1, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX90A-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB10_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v1, v5
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX908-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
+; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX908-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX908-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB10_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v1, v5
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, 4, v0
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GFX8-NEXT: flat_load_dword v4, v[0:1]
+; GFX8-NEXT: flat_load_dword v5, v[5:6]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX8-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB10_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v1, v5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v5, vcc, 4, v0
+; GFX7-NEXT: v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GFX7-NEXT: flat_load_dword v4, v[0:1]
+; GFX7-NEXT: flat_load_dword v5, v[5:6]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX7-NEXT: v_min_f64 v[4:5], v[4:5], v[2:3]
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB10_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v4
+; GFX7-NEXT: v_mov_b32_e32 v1, v5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmin ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret double %result
+}
+
+define void @flat_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory(ptr %ptr, double %val) {
+; GFX12-LABEL: flat_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: flat_load_b64 v[4:5], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[2:3], v[2:3]
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: flat_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5] th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: flat_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX940-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX940-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX940-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_cbranch_execnz .LBB11_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: flat_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: flat_load_b64 v[4:5], v[0:1]
+; GFX11-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: flat_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5] glc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: flat_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX10-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX10-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_cbranch_execnz .LBB11_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: flat_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX90A-NEXT: s_mov_b64 s[4:5], 0
+; GFX90A-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX90A-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: flat_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; GFX908-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX908-NEXT: s_mov_b64 s[4:5], 0
+; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX908-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX908-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX908-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v3
+; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_execnz .LBB11_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: flat_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, 4, v0
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GFX8-NEXT: flat_load_dword v4, v[0:1]
+; GFX8-NEXT: flat_load_dword v5, v[5:6]
+; GFX8-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB11_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: flat_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v5, vcc, 4, v0
+; GFX7-NEXT: v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GFX7-NEXT: flat_load_dword v4, v[0:1]
+; GFX7-NEXT: flat_load_dword v5, v[5:6]
+; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX7-NEXT: v_min_f64 v[2:3], v[2:3], v[6:7]
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB11_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmin ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, float %val) {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v2, s4
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v3, v1, v1
+; GFX12-NEXT: buffer_load_b32 v0, v2, s[0:3], null offen
+; GFX12-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v5, v0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v0, v5, v5
+; GFX12-NEXT: v_min_num_f32_e32 v4, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v2, s4
+; GFX940-NEXT: v_mov_b32_e32 v1, v0
+; GFX940-NEXT: buffer_load_dword v0, v2, s[0:3], 0 offen
+; GFX940-NEXT: s_mov_b64 s[4:5], 0
+; GFX940-NEXT: v_max_f32_e32 v3, v1, v1
+; GFX940-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v5, v0
+; GFX940-NEXT: v_max_f32_e32 v0, v5, v5
+; GFX940-NEXT: v_min_f32_e32 v4, v0, v3
+; GFX940-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[0:3], 0 offen sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX940-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_cbranch_execnz .LBB12_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v2, s4
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v3, v1, v1
+; GFX11-NEXT: buffer_load_b32 v0, v2, s[0:3], 0 offen
+; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v0, v5, v5
+; GFX11-NEXT: v_min_f32_e32 v4, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v2, s[0:3], 0 offen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v2, s8
+; GFX10-NEXT: v_mov_b32_e32 v1, v0
+; GFX10-NEXT: s_mov_b32 s8, 0
+; GFX10-NEXT: buffer_load_dword v0, v2, s[4:7], 0 offen
+; GFX10-NEXT: v_max_f32_e32 v3, v1, v1
+; GFX10-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_f32_e32 v0, v5, v5
+; GFX10-NEXT: v_min_f32_e32 v4, v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v1, v5
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[4:7], 0 offen glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_cbranch_execnz .LBB12_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v2, s8
+; GFX90A-NEXT: v_mov_b32_e32 v1, v0
+; GFX90A-NEXT: buffer_load_dword v0, v2, s[4:7], 0 offen
+; GFX90A-NEXT: s_mov_b64 s[8:9], 0
+; GFX90A-NEXT: v_max_f32_e32 v3, v1, v1
+; GFX90A-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v5, v0
+; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
+; GFX90A-NEXT: v_min_f32_e32 v4, v0, v3
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[4:7], 0 offen glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX90A-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_cbranch_execnz .LBB12_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v2, s8
+; GFX908-NEXT: v_mov_b32_e32 v1, v0
+; GFX908-NEXT: buffer_load_dword v0, v2, s[4:7], 0 offen
+; GFX908-NEXT: s_mov_b64 s[8:9], 0
+; GFX908-NEXT: v_max_f32_e32 v3, v1, v1
+; GFX908-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v5, v0
+; GFX908-NEXT: v_max_f32_e32 v0, v5, v5
+; GFX908-NEXT: v_min_f32_e32 v4, v0, v3
+; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v1, v5
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[4:7], 0 offen glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX908-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_cbranch_execnz .LBB12_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, s8
+; GFX8-NEXT: v_mov_b32_e32 v1, v0
+; GFX8-NEXT: buffer_load_dword v0, v2, s[4:7], 0 offen
+; GFX8-NEXT: s_mov_b64 s[8:9], 0
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v1
+; GFX8-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v0
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v5
+; GFX8-NEXT: v_min_f32_e32 v4, v0, v3
+; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v1, v5
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[4:7], 0 offen glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX8-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_cbranch_execnz .LBB12_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v2, s8
+; GFX7-NEXT: v_mov_b32_e32 v1, v0
+; GFX7-NEXT: buffer_load_dword v0, v2, s[4:7], 0 offen
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v1
+; GFX7-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v5
+; GFX7-NEXT: v_min_f32_e32 v4, v0, v3
+; GFX7-NEXT: v_mov_b32_e32 v0, v4
+; GFX7-NEXT: v_mov_b32_e32 v1, v5
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v2, s[4:7], 0 offen glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB12_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(7) %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret float %result
+}
+
+define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, float %val) {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_max_num_f32 v3, v0, v0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
+; GFX12-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_max_num_f32_e32 v0, v1, v1
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f32_e32 v0, v0, v3
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX12-NEXT: v_mov_b32_e32 v1, v4
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v2, s4
+; GFX940-NEXT: buffer_load_dword v1, v2, s[0:3], 0 offen
+; GFX940-NEXT: s_mov_b64 s[4:5], 0
+; GFX940-NEXT: v_max_f32_e32 v3, v0, v0
+; GFX940-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_max_f32_e32 v0, v1, v1
+; GFX940-NEXT: v_min_f32_e32 v0, v0, v3
+; GFX940-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[0:3], 0 offen sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
+; GFX940-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX940-NEXT: v_mov_b32_e32 v1, v4
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_cbranch_execnz .LBB13_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_max_f32 v3, v0, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
+; GFX11-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_f32_e32 v0, v1, v1
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_f32_e32 v0, v0, v3
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX11-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v2, s8
+; GFX10-NEXT: v_max_f32_e32 v3, v0, v0
+; GFX10-NEXT: s_mov_b32 s8, 0
+; GFX10-NEXT: buffer_load_dword v1, v2, s[4:7], 0 offen
+; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v1, v1
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v5, v1
+; GFX10-NEXT: v_mov_b32_e32 v4, v0
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[4:7], 0 offen glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX10-NEXT: v_mov_b32_e32 v1, v4
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_cbranch_execnz .LBB13_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v2, s8
+; GFX90A-NEXT: buffer_load_dword v1, v2, s[4:7], 0 offen
+; GFX90A-NEXT: s_mov_b64 s[8:9], 0
+; GFX90A-NEXT: v_max_f32_e32 v3, v0, v0
+; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1
+; GFX90A-NEXT: v_min_f32_e32 v0, v0, v3
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[4:7], 0 offen glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
+; GFX90A-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX90A-NEXT: v_mov_b32_e32 v1, v4
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_cbranch_execnz .LBB13_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v2, s8
+; GFX908-NEXT: buffer_load_dword v1, v2, s[4:7], 0 offen
+; GFX908-NEXT: s_mov_b64 s[8:9], 0
+; GFX908-NEXT: v_max_f32_e32 v3, v0, v0
+; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_max_f32_e32 v0, v1, v1
+; GFX908-NEXT: v_min_f32_e32 v0, v0, v3
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[4:7], 0 offen glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
+; GFX908-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX908-NEXT: v_mov_b32_e32 v1, v4
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_cbranch_execnz .LBB13_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, s8
+; GFX8-NEXT: buffer_load_dword v1, v2, s[4:7], 0 offen
+; GFX8-NEXT: s_mov_b64 s[8:9], 0
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v0
+; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v1
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v3
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[4:7], 0 offen glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
+; GFX8-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v1, v4
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_cbranch_execnz .LBB13_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f32__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v2, s8
+; GFX7-NEXT: buffer_load_dword v1, v2, s[4:7], 0 offen
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v0
+; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v3
+; GFX7-NEXT: v_mov_b32_e32 v5, v1
+; GFX7-NEXT: v_mov_b32_e32 v4, v0
+; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v2, s[4:7], 0 offen glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v1, v4
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB13_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmin ptr addrspace(7) %ptr, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, double %val) {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
+; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
+; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
+; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v6, s4
+; GFX940-NEXT: v_mov_b32_e32 v2, v0
+; GFX940-NEXT: v_mov_b32_e32 v3, v1
+; GFX940-NEXT: buffer_load_dwordx2 v[0:1], v6, s[0:3], 0 offen
+; GFX940-NEXT: s_mov_b64 s[4:5], 0
+; GFX940-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX940-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_mov_b64_e32 v[10:11], v[0:1]
+; GFX940-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
+; GFX940-NEXT: v_min_f64 v[8:9], v[0:1], v[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[0:1], v[8:9]
+; GFX940-NEXT: v_mov_b64_e32 v[2:3], v[10:11]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[0:3], 0 offen sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX940-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_cbranch_execnz .LBB14_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen
+; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
+; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
+; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v6, s8
+; GFX10-NEXT: v_mov_b32_e32 v2, v0
+; GFX10-NEXT: v_mov_b32_e32 v3, v1
+; GFX10-NEXT: s_mov_b32 s8, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v6, s[4:7], 0 offen
+; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX10-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v10, v1
+; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
+; GFX10-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v0, v7
+; GFX10-NEXT: v_mov_b32_e32 v1, v8
+; GFX10-NEXT: v_mov_b32_e32 v2, v9
+; GFX10-NEXT: v_mov_b32_e32 v3, v10
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[4:7], 0 offen glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_cbranch_execnz .LBB14_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v6, s8
+; GFX90A-NEXT: v_mov_b32_e32 v2, v0
+; GFX90A-NEXT: v_mov_b32_e32 v3, v1
+; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v6, s[4:7], 0 offen
+; GFX90A-NEXT: s_mov_b64 s[8:9], 0
+; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX90A-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1]
+; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
+; GFX90A-NEXT: v_min_f64 v[8:9], v[0:1], v[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[4:7], 0 offen glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX90A-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_cbranch_execnz .LBB14_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v6, s8
+; GFX908-NEXT: v_mov_b32_e32 v2, v0
+; GFX908-NEXT: v_mov_b32_e32 v3, v1
+; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[4:7], 0 offen
+; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX908-NEXT: s_mov_b64 s[8:9], 0
+; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v10, v1
+; GFX908-NEXT: v_mov_b32_e32 v9, v0
+; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
+; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v7
+; GFX908-NEXT: v_mov_b32_e32 v1, v8
+; GFX908-NEXT: v_mov_b32_e32 v2, v9
+; GFX908-NEXT: v_mov_b32_e32 v3, v10
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[4:7], 0 offen glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_cbranch_execnz .LBB14_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v6, s8
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[4:7], 0 offen
+; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX8-NEXT: s_mov_b64 s[8:9], 0
+; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v10, v1
+; GFX8-NEXT: v_mov_b32_e32 v9, v0
+; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
+; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v7
+; GFX8-NEXT: v_mov_b32_e32 v1, v8
+; GFX8-NEXT: v_mov_b32_e32 v2, v9
+; GFX8-NEXT: v_mov_b32_e32 v3, v10
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[4:7], 0 offen glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_cbranch_execnz .LBB14_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v6, s8
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v6, s[4:7], 0 offen
+; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v10, v1
+; GFX7-NEXT: v_mov_b32_e32 v9, v0
+; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
+; GFX7-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v7
+; GFX7-NEXT: v_mov_b32_e32 v1, v8
+; GFX7-NEXT: v_mov_b32_e32 v2, v9
+; GFX7-NEXT: v_mov_b32_e32 v3, v10
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[4:7], 0 offen glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB14_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(7) %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret double %result
+}
+
+define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, double %val) {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1]
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[2:3], v6, s[0:3], null offen
+; GFX12-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[2:3], v[2:3]
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
+; GFX12-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
+; GFX12-NEXT: v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX940-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX940: ; %bb.0:
+; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX940-NEXT: v_mov_b32_e32 v6, s4
+; GFX940-NEXT: buffer_load_dwordx2 v[2:3], v6, s[0:3], 0 offen
+; GFX940-NEXT: s_mov_b64 s[4:5], 0
+; GFX940-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX940-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX940-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[10:11], v[2:3]
+; GFX940-NEXT: v_mov_b64_e32 v[8:9], v[0:1]
+; GFX940-NEXT: buffer_wbl2 sc1
+; GFX940-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v6, s[0:3], 0 offen sc0
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[2:3]
+; GFX940-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[2:3], v[8:9]
+; GFX940-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_cbranch_execnz .LBB15_1
+; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX940-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[2:3], v6, s[0:3], 0 offen
+; GFX11-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
+; GFX11-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
+; GFX11-NEXT: v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v6, s8
+; GFX10-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX10-NEXT: s_mov_b32 s8, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[2:3], v6, s[4:7], 0 offen
+; GFX10-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v10, v3
+; GFX10-NEXT: v_mov_b32_e32 v9, v2
+; GFX10-NEXT: v_mov_b32_e32 v8, v1
+; GFX10-NEXT: v_mov_b32_e32 v7, v0
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[4:7], 0 offen glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
+; GFX10-NEXT: v_mov_b32_e32 v2, v7
+; GFX10-NEXT: v_mov_b32_e32 v3, v8
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_cbranch_execnz .LBB15_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX90A: ; %bb.0:
+; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v6, s8
+; GFX90A-NEXT: buffer_load_dwordx2 v[2:3], v6, s[4:7], 0 offen
+; GFX90A-NEXT: s_mov_b64 s[8:9], 0
+; GFX90A-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX90A-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX90A-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[0:1], v[0:1] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v6, s[4:7], 0 offen glc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: buffer_wbinvl1
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[2:3]
+; GFX90A-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[8:9], v[8:9] op_sel:[0,1]
+; GFX90A-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_cbranch_execnz .LBB15_1
+; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX90A-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX908: ; %bb.0:
+; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX908-NEXT: v_mov_b32_e32 v6, s8
+; GFX908-NEXT: buffer_load_dwordx2 v[2:3], v6, s[4:7], 0 offen
+; GFX908-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX908-NEXT: s_mov_b64 s[8:9], 0
+; GFX908-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX908-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v10, v3
+; GFX908-NEXT: v_mov_b32_e32 v9, v2
+; GFX908-NEXT: v_mov_b32_e32 v8, v1
+; GFX908-NEXT: v_mov_b32_e32 v7, v0
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[4:7], 0 offen glc
+; GFX908-NEXT: s_waitcnt vmcnt(0)
+; GFX908-NEXT: buffer_wbinvl1
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
+; GFX908-NEXT: v_mov_b32_e32 v2, v7
+; GFX908-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX908-NEXT: v_mov_b32_e32 v3, v8
+; GFX908-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_cbranch_execnz .LBB15_1
+; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX908-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX908-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v6, s8
+; GFX8-NEXT: buffer_load_dwordx2 v[2:3], v6, s[4:7], 0 offen
+; GFX8-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[8:9], 0
+; GFX8-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v10, v3
+; GFX8-NEXT: v_mov_b32_e32 v9, v2
+; GFX8-NEXT: v_mov_b32_e32 v8, v1
+; GFX8-NEXT: v_mov_b32_e32 v7, v0
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[4:7], 0 offen glc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: buffer_wbinvl1
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v2, v7
+; GFX8-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v3, v8
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_cbranch_execnz .LBB15_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v6, s8
+; GFX7-NEXT: buffer_load_dwordx2 v[2:3], v6, s[4:7], 0 offen
+; GFX7-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
+; GFX7-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v10, v3
+; GFX7-NEXT: v_mov_b32_e32 v9, v2
+; GFX7-NEXT: v_mov_b32_e32 v8, v1
+; GFX7-NEXT: v_mov_b32_e32 v7, v0
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[4:7], 0 offen glc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v2, v7
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v3, v8
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB15_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+ %unused = atomicrmw fmin ptr addrspace(7) %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+!0 = !{}
>From 4a1fdeb04d10f5562687568ea8c494b3ef46c587 Mon Sep 17 00:00:00 2001
From: RoseZhang03 <rosezhang at google.com>
Date: Wed, 3 Jul 2024 21:40:20 +0000
Subject: [PATCH 185/246] [libc] reordered Function class parameters and moved
yaml files (#97329)
Reordered Function class parameter "standards" to make more logical
sense and to match the ordering in the add_function function.
Deleted the yaml_combined folder and moved contained files to the yaml
folder.
Updated math.yaml file with the recently added math functions in spec.td
---
.../{yaml_combined => yaml}/ctype.yaml | 0
.../{yaml_combined => yaml}/fenv.yaml | 0
.../{yaml_combined => yaml}/math.yaml | 192 +++++++++++++++++-
.../{yaml_combined => yaml}/pthread.yaml | 48 +++++
.../{yaml_combined => yaml}/sched.yaml | 0
.../{yaml_combined => yaml}/signal.yaml | 0
.../{yaml_combined => yaml}/stdfix.yaml | 0
.../{yaml_combined => yaml}/stdio.yaml | 7 +
.../{yaml_combined => yaml}/stdlib.yaml | 0
.../{yaml_combined => yaml}/string.yaml | 0
.../{yaml_combined => yaml}/strings.yaml | 0
libc/newhdrgen/yaml/sys_auxv.yaml | 8 +-
.../{yaml_combined => yaml}/sys_mman.yaml | 2 -
libc/newhdrgen/yaml/sys_types.yaml | 2 +-
.../{yaml_combined => yaml}/sys_wait.yaml | 2 +-
.../{yaml_combined => yaml}/time.yaml | 0
16 files changed, 255 insertions(+), 6 deletions(-)
rename libc/newhdrgen/{yaml_combined => yaml}/ctype.yaml (100%)
rename libc/newhdrgen/{yaml_combined => yaml}/fenv.yaml (100%)
rename libc/newhdrgen/{yaml_combined => yaml}/math.yaml (89%)
rename libc/newhdrgen/{yaml_combined => yaml}/pthread.yaml (87%)
rename libc/newhdrgen/{yaml_combined => yaml}/sched.yaml (100%)
rename libc/newhdrgen/{yaml_combined => yaml}/signal.yaml (100%)
rename libc/newhdrgen/{yaml_combined => yaml}/stdfix.yaml (100%)
rename libc/newhdrgen/{yaml_combined => yaml}/stdio.yaml (98%)
rename libc/newhdrgen/{yaml_combined => yaml}/stdlib.yaml (100%)
rename libc/newhdrgen/{yaml_combined => yaml}/string.yaml (100%)
rename libc/newhdrgen/{yaml_combined => yaml}/strings.yaml (100%)
rename libc/newhdrgen/{yaml_combined => yaml}/sys_mman.yaml (97%)
rename libc/newhdrgen/{yaml_combined => yaml}/sys_wait.yaml (94%)
rename libc/newhdrgen/{yaml_combined => yaml}/time.yaml (100%)
diff --git a/libc/newhdrgen/yaml_combined/ctype.yaml b/libc/newhdrgen/yaml/ctype.yaml
similarity index 100%
rename from libc/newhdrgen/yaml_combined/ctype.yaml
rename to libc/newhdrgen/yaml/ctype.yaml
diff --git a/libc/newhdrgen/yaml_combined/fenv.yaml b/libc/newhdrgen/yaml/fenv.yaml
similarity index 100%
rename from libc/newhdrgen/yaml_combined/fenv.yaml
rename to libc/newhdrgen/yaml/fenv.yaml
diff --git a/libc/newhdrgen/yaml_combined/math.yaml b/libc/newhdrgen/yaml/math.yaml
similarity index 89%
rename from libc/newhdrgen/yaml_combined/math.yaml
rename to libc/newhdrgen/yaml/math.yaml
index dbb1e6ec63030..18a49ad1c62c4 100644
--- a/libc/newhdrgen/yaml_combined/math.yaml
+++ b/libc/newhdrgen/yaml/math.yaml
@@ -563,6 +563,15 @@ functions:
- type: float
- type: float
- type: float
+ - name: f16fma
+ standards:
+ - stdc
+ return_type: _Float16
+ arguments:
+ - type: double
+ - type: double
+ - type: double
+ guard: LIBC_TYPES_HAS_FLOAT16
- name: f16fmaf
standards:
- stdc
@@ -572,6 +581,24 @@ functions:
- type: float
- type: float
guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16fmal
+ standards:
+ - stdc
+ return_type: _Float16
+ arguments:
+ - type: long double
+ - type: long double
+ - type: long double
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16fmaf128
+ standards:
+ - stdc
+ return_type: _Float16
+ arguments:
+ - type: float128
+ - type: float128
+ - type: float128
+ guard: LIBC_TYPES_HAS_FLOAT16_AND_FLOAT128
- name: fmod
standards:
- stdc
@@ -913,6 +940,14 @@ functions:
arguments:
- type: long double
- type: int
+ - name: ldexpf16
+ standards:
+ - stdc
+ return_type: _Float16
+ arguments:
+ - type: _Float16
+ - type: int
+ guard: LIBC_TYPES_HAS_FLOAT16
- name: ldexpf128
standards:
- stdc
@@ -1761,6 +1796,14 @@ functions:
return_type: float
arguments:
- type: float
+ - name: scalblnf16
+ standards:
+ - stdc
+ return_type: _Float16
+ arguments:
+ - type: _Float16
+ - type: long
+ guard: LIBC_TYPES_HAS_FLOAT16
- name: scalbn
standards:
- stdc
@@ -1782,6 +1825,13 @@ functions:
arguments:
- type: long double
- type: int
+ - name: scalbnf16
+ standards:
+ - stdc
+ return_type: _Float16
+ arguments:
+ - type: _Float16
+ - type: int
- name: scalbnf128
standards:
- stdc
@@ -1875,10 +1925,150 @@ functions:
- type: _Float16 *
- type: _Float16 *
guard: LIBC_TYPES_HAS_FLOAT16
- - name: f16sqrtf
+ - name: getpayloadf16
standards:
- stdc
return_type: _Float16
+ arguments:
+ - type: _Float16 *
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: setpayloadf16
+ standards:
+ - stdc
+ return_type: int
+ arguments:
+ - type: _Float16 *
+ - type: _Float16
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: setpayloadsigf16
+ standards:
+ - stdc
+ return_type: int
+ arguments:
+ - type: _Float16 *
+ - type: _Float16
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16addf128
+ standards:
+ - stdc
+ return_type: _Float16
+ arguments:
+ - type: float128
+ - type: float128
+ guard: LIBC_TYPES_HAS_FLOAT16_AND_FLOAT128
+ - name: f16subf128
+ standards:
+ - stdc
+ return_type: _Float16
+ arguments:
+ - type: float128
+ - type: float128
+ guard: LIBC_TYPES_HAS_FLOAT16_AND_FLOAT128
+ - name: f16sqrtf
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
arguments:
- type: float
guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16sqrtf128
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: float128
+ guard: LIBC_TYPES_HAS_FLOAT16_AND_FLOAT128
+ - name: f16add
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: double
+ - type: double
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16addf
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: float
+ - type: float
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16addl
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: long double
+ - type: long double
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16sub
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: double
+ - type: double
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16subf
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: float
+ - type: float
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16subl
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: long double
+ - type: long double
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16div
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: double
+ - type: double
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16divf
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: float
+ - type: float
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16divf128
+ standards:
+ - stdc
+ return_type: _Float16
+ arguments:
+ - type: float128
+ - type: float128
+ guard: LIBC_TYPES_HAS_FLOAT16_AND_FLOAT128
+ - name: f16divl
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: long double
+ - type: long double
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16sqrt
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: double
+ guard: LIBC_TYPES_HAS_FLOAT16
+ - name: f16sqrtl
+ standards:
+ - llvm_libc_ext
+ return_type: _Float16
+ arguments:
+ - type: long double
+ guard: LIBC_TYPES_HAS_FLOAT16
diff --git a/libc/newhdrgen/yaml_combined/pthread.yaml b/libc/newhdrgen/yaml/pthread.yaml
similarity index 87%
rename from libc/newhdrgen/yaml_combined/pthread.yaml
rename to libc/newhdrgen/yaml/pthread.yaml
index 5a1ede32e53e3..9ffcf454035ff 100644
--- a/libc/newhdrgen/yaml_combined/pthread.yaml
+++ b/libc/newhdrgen/yaml/pthread.yaml
@@ -366,3 +366,51 @@ functions:
arguments:
- type: pthread_key_t
- type: const void *
+ - name: pthread_rwlock_init
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_rwlock_t *
+ - type: const pthread_rwlockattr_t *__restrict
+ - name: pthread_rwlock_tryrdlock
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_rwlock_t *
+ - name: pthread_rwlock_trywrlock
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_rwlock_t *
+ - name: pthread_rwlock_timedrdlock
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_rwlock_t *__restrict
+ - type: const struct timespec *__restrict
+ - name: pthread_rwlock_timedwrlock
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_rwlock_t *__restrict
+ - type: const struct timespec *__restrict
+ - name: pthread_rwlock_rdlock
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_rwlock_t *
+ - name: pthread_rwlock_wrlock
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_rwlock_t *
+ - name: pthread_rwlock_unlock
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_rwlock_t *
+ - name: pthread_rwlock_destroy
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_rwlock_t *
diff --git a/libc/newhdrgen/yaml_combined/sched.yaml b/libc/newhdrgen/yaml/sched.yaml
similarity index 100%
rename from libc/newhdrgen/yaml_combined/sched.yaml
rename to libc/newhdrgen/yaml/sched.yaml
diff --git a/libc/newhdrgen/yaml_combined/signal.yaml b/libc/newhdrgen/yaml/signal.yaml
similarity index 100%
rename from libc/newhdrgen/yaml_combined/signal.yaml
rename to libc/newhdrgen/yaml/signal.yaml
diff --git a/libc/newhdrgen/yaml_combined/stdfix.yaml b/libc/newhdrgen/yaml/stdfix.yaml
similarity index 100%
rename from libc/newhdrgen/yaml_combined/stdfix.yaml
rename to libc/newhdrgen/yaml/stdfix.yaml
diff --git a/libc/newhdrgen/yaml_combined/stdio.yaml b/libc/newhdrgen/yaml/stdio.yaml
similarity index 98%
rename from libc/newhdrgen/yaml_combined/stdio.yaml
rename to libc/newhdrgen/yaml/stdio.yaml
index 928a8d5228c21..2fdc772da122c 100644
--- a/libc/newhdrgen/yaml_combined/stdio.yaml
+++ b/libc/newhdrgen/yaml/stdio.yaml
@@ -130,6 +130,13 @@ functions:
return_type: int
arguments:
- type: FILE *
+ - name: fdopen
+ standards:
+ - POSIX
+ return_type: FILE *
+ arguments:
+ - type: int
+ - type: const char *
- name: clearerr
standards:
- stdc
diff --git a/libc/newhdrgen/yaml_combined/stdlib.yaml b/libc/newhdrgen/yaml/stdlib.yaml
similarity index 100%
rename from libc/newhdrgen/yaml_combined/stdlib.yaml
rename to libc/newhdrgen/yaml/stdlib.yaml
diff --git a/libc/newhdrgen/yaml_combined/string.yaml b/libc/newhdrgen/yaml/string.yaml
similarity index 100%
rename from libc/newhdrgen/yaml_combined/string.yaml
rename to libc/newhdrgen/yaml/string.yaml
diff --git a/libc/newhdrgen/yaml_combined/strings.yaml b/libc/newhdrgen/yaml/strings.yaml
similarity index 100%
rename from libc/newhdrgen/yaml_combined/strings.yaml
rename to libc/newhdrgen/yaml/strings.yaml
diff --git a/libc/newhdrgen/yaml/sys_auxv.yaml b/libc/newhdrgen/yaml/sys_auxv.yaml
index beea1d8b5f09f..bbf756a1ffe6a 100644
--- a/libc/newhdrgen/yaml/sys_auxv.yaml
+++ b/libc/newhdrgen/yaml/sys_auxv.yaml
@@ -5,4 +5,10 @@ macros: []
types: []
enums: []
objects: []
-functions: []
+functions:
+ - name: getauxval
+ standards:
+ - GNUExtensions
+ return_type: unsigned long
+ arguments:
+ - type: unsigned long
diff --git a/libc/newhdrgen/yaml_combined/sys_mman.yaml b/libc/newhdrgen/yaml/sys_mman.yaml
similarity index 97%
rename from libc/newhdrgen/yaml_combined/sys_mman.yaml
rename to libc/newhdrgen/yaml/sys_mman.yaml
index 7b858db2de11a..16a6463b0917d 100644
--- a/libc/newhdrgen/yaml_combined/sys_mman.yaml
+++ b/libc/newhdrgen/yaml/sys_mman.yaml
@@ -1,8 +1,6 @@
-#known as sys/mman in POSIX
header: sys-mman.h
standards: POSIX
macros: []
-macros: []
types:
- type_name: mode_t
- type_name: size_t
diff --git a/libc/newhdrgen/yaml/sys_types.yaml b/libc/newhdrgen/yaml/sys_types.yaml
index 6b08254a7fab1..20cf9bcd39da3 100644
--- a/libc/newhdrgen/yaml/sys_types.yaml
+++ b/libc/newhdrgen/yaml/sys_types.yaml
@@ -26,5 +26,5 @@ types:
- type_name: pthread_key_t
- type_name: pthread_condattr_t
enums: []
-functions: []
objects: []
+functions: []
diff --git a/libc/newhdrgen/yaml_combined/sys_wait.yaml b/libc/newhdrgen/yaml/sys_wait.yaml
similarity index 94%
rename from libc/newhdrgen/yaml_combined/sys_wait.yaml
rename to libc/newhdrgen/yaml/sys_wait.yaml
index 56a6066e925d6..2f2f70d0e9745 100644
--- a/libc/newhdrgen/yaml_combined/sys_wait.yaml
+++ b/libc/newhdrgen/yaml/sys_wait.yaml
@@ -1,4 +1,4 @@
-header: sys-wait.h #sys/wait.h
+header: sys-wait.h
macros: []
types:
- type_name: siginfo_t
diff --git a/libc/newhdrgen/yaml_combined/time.yaml b/libc/newhdrgen/yaml/time.yaml
similarity index 100%
rename from libc/newhdrgen/yaml_combined/time.yaml
rename to libc/newhdrgen/yaml/time.yaml
>From 5828b04b0373f10fade7eefbeb181dc8ceb98422 Mon Sep 17 00:00:00 2001
From: Sayhaan Siddiqui <49014204+sayhaan at users.noreply.github.com>
Date: Wed, 3 Jul 2024 14:50:40 -0700
Subject: [PATCH 186/246] [BOLT][DWARF] Refactor legacy ranges writers (#96006)
Refactors legacy ranges writers to create a writer for each instance of
a DWO file.
We now write out everything into .debug_ranges after the all the DWO
files are processed. This also changes the order that ranges is written
out in, as before we wrote out while in the main CU processing loop and
we now iterate through the CU buckets created by partitionCUs, after the
main processing loop.
---
bolt/include/bolt/Core/DebugData.h | 12 ++
bolt/include/bolt/Rewrite/DWARFRewriter.h | 4 +
bolt/lib/Core/DebugData.cpp | 6 +
bolt/lib/Rewrite/DWARFRewriter.cpp | 132 +++++++++++-------
bolt/test/X86/debug-fission-single-convert.s | 6 +-
bolt/test/X86/dwarf4-df-dualcu.test | 40 +++---
.../X86/dwarf4-df-input-lowpc-ranges-cus.test | 80 ++++++-----
.../X86/dwarf4-df-input-lowpc-ranges.test | 37 ++---
8 files changed, 190 insertions(+), 127 deletions(-)
diff --git a/bolt/include/bolt/Core/DebugData.h b/bolt/include/bolt/Core/DebugData.h
index 585bafa088849..144433ac78a37 100644
--- a/bolt/include/bolt/Core/DebugData.h
+++ b/bolt/include/bolt/Core/DebugData.h
@@ -210,6 +210,15 @@ class DebugRangesSectionWriter {
static bool classof(const DebugRangesSectionWriter *Writer) {
return Writer->getKind() == RangesWriterKind::DebugRangesWriter;
}
+
+ /// Append a range to the main buffer.
+ void appendToRangeBuffer(const DebugBufferVector &CUBuffer);
+
+ /// Sets Unit DIE to be updated for CU.
+ void setDie(DIE *Die) { this->Die = Die; }
+
+ /// Returns Unit DIE to be updated for CU.
+ DIE *getDie() const { return Die; }
/// Writes out range lists for a current CU being processed.
void virtual finalizeSection(){};
@@ -232,6 +241,9 @@ class DebugRangesSectionWriter {
static constexpr uint64_t EmptyRangesOffset{0};
private:
+ /// Stores Unit DIE to be updated for CU.
+ DIE *Die{0};
+
RangesWriterKind Kind;
};
diff --git a/bolt/include/bolt/Rewrite/DWARFRewriter.h b/bolt/include/bolt/Rewrite/DWARFRewriter.h
index c34fbd5088a41..4f576eaa95576 100644
--- a/bolt/include/bolt/Rewrite/DWARFRewriter.h
+++ b/bolt/include/bolt/Rewrite/DWARFRewriter.h
@@ -89,6 +89,10 @@ class DWARFRewriter {
/// Store Rangelists writer for each DWO CU.
RangeListsDWOWriers RangeListsWritersByCU;
+ /// Stores ranges writer for each DWO CU.
+ std::unordered_map<uint64_t, std::unique_ptr<DebugRangesSectionWriter>>
+ LegacyRangesWritersByCU;
+
std::mutex LocListDebugInfoPatchesMutex;
/// Dwo id specific its RangesBase.
diff --git a/bolt/lib/Core/DebugData.cpp b/bolt/lib/Core/DebugData.cpp
index f502a50312470..08d4c45aac791 100644
--- a/bolt/lib/Core/DebugData.cpp
+++ b/bolt/lib/Core/DebugData.cpp
@@ -177,6 +177,12 @@ uint64_t DebugRangesSectionWriter::getSectionOffset() {
return SectionOffset;
}
+void DebugRangesSectionWriter::appendToRangeBuffer(
+ const DebugBufferVector &CUBuffer) {
+ *RangesStream << CUBuffer;
+ SectionOffset = RangesBuffer->size();
+}
+
DebugAddrWriter *DebugRangeListsSectionWriter::AddrWriter = nullptr;
uint64_t DebugRangeListsSectionWriter::addRanges(
diff --git a/bolt/lib/Rewrite/DWARFRewriter.cpp b/bolt/lib/Rewrite/DWARFRewriter.cpp
index 1f5c3a488b914..1f426d0adfc61 100644
--- a/bolt/lib/Rewrite/DWARFRewriter.cpp
+++ b/bolt/lib/Rewrite/DWARFRewriter.cpp
@@ -646,6 +646,15 @@ void DWARFRewriter::updateDebugInfo() {
} else {
LocListWritersByCU[CUIndex] = std::make_unique<DebugLocWriter>();
+ if (std::optional<uint64_t> DWOId = CU.getDWOId()) {
+ assert(LegacyRangesWritersByCU.count(*DWOId) == 0 &&
+ "LegacyRangeLists writer for DWO unit already exists.");
+ auto LegacyRangesSectionWriterByCU =
+ std::make_unique<DebugRangesSectionWriter>();
+ LegacyRangesSectionWriterByCU->initSection(CU);
+ LegacyRangesWritersByCU[*DWOId] =
+ std::move(LegacyRangesSectionWriterByCU);
+ }
}
return LocListWritersByCU[CUIndex++].get();
};
@@ -693,6 +702,7 @@ void DWARFRewriter::updateDebugInfo() {
if (Unit->getVersion() >= 5) {
TempRangesSectionWriter = RangeListsWritersByCU[*DWOId].get();
} else {
+ TempRangesSectionWriter = LegacyRangesWritersByCU[*DWOId].get();
RangesBase = RangesSectionWriter->getSectionOffset();
setDwoRangesBase(*DWOId, *RangesBase);
}
@@ -1274,9 +1284,17 @@ void DWARFRewriter::updateDWARFObjectAddressRanges(
}
if (RangesBaseInfo) {
- DIEBldr.replaceValue(&Die, RangesBaseInfo.getAttribute(),
- RangesBaseInfo.getForm(),
- DIEInteger(static_cast<uint32_t>(*RangesBase)));
+ if (RangesBaseInfo.getAttribute() == dwarf::DW_AT_GNU_ranges_base) {
+ auto RangesWriterIterator =
+ LegacyRangesWritersByCU.find(*Unit.getDWOId());
+ assert(RangesWriterIterator != LegacyRangesWritersByCU.end() &&
+ "RangesWriter does not exist for DWOId");
+ RangesWriterIterator->second->setDie(&Die);
+ } else {
+ DIEBldr.replaceValue(&Die, RangesBaseInfo.getAttribute(),
+ RangesBaseInfo.getForm(),
+ DIEInteger(static_cast<uint32_t>(*RangesBase)));
+ }
RangesBase = std::nullopt;
}
}
@@ -1294,20 +1312,12 @@ void DWARFRewriter::updateDWARFObjectAddressRanges(
RangesAttrInfo.getForm() == dwarf::DW_FORM_sec_offset)
NeedConverted = true;
- uint64_t CurRangeBase = 0;
- if (Unit.isDWOUnit()) {
- if (std::optional<uint64_t> DWOId = Unit.getDWOId())
- CurRangeBase = getDwoRangesBase(*DWOId);
- else
- errs() << "BOLT-WARNING: [internal-dwarf-error]: DWOId is not found "
- "for DWO Unit.";
- }
if (NeedConverted || RangesAttrInfo.getForm() == dwarf::DW_FORM_rnglistx)
DIEBldr.replaceValue(&Die, dwarf::DW_AT_ranges, dwarf::DW_FORM_rnglistx,
DIEInteger(DebugRangesOffset));
else
DIEBldr.replaceValue(&Die, dwarf::DW_AT_ranges, RangesAttrInfo.getForm(),
- DIEInteger(DebugRangesOffset - CurRangeBase));
+ DIEInteger(DebugRangesOffset));
if (!RangesBase) {
if (LowPCAttrInfo &&
@@ -1324,15 +1334,21 @@ void DWARFRewriter::updateDWARFObjectAddressRanges(
// If we are at this point we are in the CU/Skeleton CU, and
// DW_AT_GNU_ranges_base or DW_AT_rnglists_base doesn't exist.
- if (Unit.getVersion() <= 4)
+ if (Unit.getVersion() <= 4) {
DIEBldr.addValue(&Die, dwarf::DW_AT_GNU_ranges_base, dwarf::DW_FORM_data4,
- DIEInteger(*RangesBase));
- else if (Unit.getVersion() == 5)
+ DIEInteger(INT_MAX));
+ auto RangesWriterIterator =
+ LegacyRangesWritersByCU.find(*Unit.getDWOId());
+ assert(RangesWriterIterator != LegacyRangesWritersByCU.end() &&
+ "RangesWriter does not exist for DWOId");
+ RangesWriterIterator->second->setDie(&Die);
+ } else if (Unit.getVersion() == 5) {
DIEBldr.addValue(&Die, dwarf::DW_AT_rnglists_base,
dwarf::DW_FORM_sec_offset, DIEInteger(*RangesBase));
- else
+ } else {
DIEBldr.addValue(&Die, dwarf::DW_AT_rnglists_base,
dwarf::DW_FORM_sec_offset, DIEInteger(*RangesBase));
+ }
return;
}
@@ -1611,6 +1627,30 @@ void DWARFRewriter::finalizeCompileUnits(DIEBuilder &DIEBlder,
DIEStreamer &Streamer,
CUOffsetMap &CUMap,
const std::list<DWARFUnit *> &CUs) {
+ for (DWARFUnit *CU : CUs) {
+ if (CU->getVersion() != 4)
+ continue;
+ std::optional<uint64_t> DWOId = CU->getDWOId();
+ if (!DWOId)
+ continue;
+ auto RangesWriterIterator = LegacyRangesWritersByCU.find(*DWOId);
+ assert(RangesWriterIterator != LegacyRangesWritersByCU.end() &&
+ "RangesWriter does not exist for DWOId");
+ std::unique_ptr<DebugRangesSectionWriter> &LegacyRangesWriter =
+ RangesWriterIterator->second;
+ std::optional<DIE *> Die = LegacyRangesWriter->getDie();
+ if (!Die || !Die.value())
+ continue;
+ DIEValue DvalGNUBase =
+ Die.value()->findAttribute(dwarf::DW_AT_GNU_ranges_base);
+ assert(DvalGNUBase && "GNU_ranges_base attribute does not exist for DWOId");
+ DIEBlder.replaceValue(
+ Die.value(), dwarf::DW_AT_GNU_ranges_base, DvalGNUBase.getForm(),
+ DIEInteger(LegacyRangesSectionWriter->getSectionOffset()));
+ std::unique_ptr<DebugBufferVector> RangesWritersContents =
+ LegacyRangesWriter->releaseBuffer();
+ LegacyRangesSectionWriter->appendToRangeBuffer(*RangesWritersContents);
+ }
DIEBlder.generateAbbrevs();
DIEBlder.finish();
// generate debug_info and CUMap
@@ -2098,7 +2138,6 @@ void DWARFRewriter::convertToRangesPatchDebugInfo(
DWARFUnit &Unit, DIEBuilder &DIEBldr, DIE &Die,
uint64_t RangesSectionOffset, DIEValue &LowPCAttrInfo,
DIEValue &HighPCAttrInfo, std::optional<uint64_t> RangesBase) {
- uint32_t BaseOffset = 0;
dwarf::Form LowForm = LowPCAttrInfo.getForm();
dwarf::Attribute RangeBaseAttribute = dwarf::DW_AT_GNU_ranges_base;
dwarf::Form RangesForm = dwarf::DW_FORM_sec_offset;
@@ -2113,45 +2152,40 @@ void DWARFRewriter::convertToRangesPatchDebugInfo(
Die.getTag() == dwarf::DW_TAG_skeleton_unit;
if (!IsUnitDie)
DIEBldr.deleteValue(&Die, LowPCAttrInfo.getAttribute());
- // In DWARF4 for DW_AT_low_pc in binary DW_FORM_addr is used. In the DWO
- // section DW_FORM_GNU_addr_index is used. So for if we are converting
- // DW_AT_low_pc/DW_AT_high_pc and see DW_FORM_GNU_addr_index. We are
- // converting in DWO section, and DW_AT_ranges [DW_FORM_sec_offset] is
- // relative to DW_AT_GNU_ranges_base.
- if (LowForm == dwarf::DW_FORM_GNU_addr_index) {
- // Ranges are relative to DW_AT_GNU_ranges_base.
- uint64_t CurRangeBase = 0;
- if (std::optional<uint64_t> DWOId = Unit.getDWOId()) {
- CurRangeBase = getDwoRangesBase(*DWOId);
- }
- BaseOffset = CurRangeBase;
- } else {
- // In DWARF 5 we can have DW_AT_low_pc either as DW_FORM_addr, or
- // DW_FORM_addrx. Former is when DW_AT_rnglists_base is present. Latter is
- // when it's absent.
- if (IsUnitDie) {
- if (LowForm == dwarf::DW_FORM_addrx) {
- const uint32_t Index = AddrWriter->getIndexFromAddress(0, Unit);
- DIEBldr.replaceValue(&Die, LowPCAttrInfo.getAttribute(),
- LowPCAttrInfo.getForm(), DIEInteger(Index));
- } else {
- DIEBldr.replaceValue(&Die, LowPCAttrInfo.getAttribute(),
- LowPCAttrInfo.getForm(), DIEInteger(0));
- }
+
+ // In DWARF 5 we can have DW_AT_low_pc either as DW_FORM_addr, or
+ // DW_FORM_addrx. Former is when DW_AT_rnglists_base is present. Latter is
+ // when it's absent.
+ if (IsUnitDie) {
+ if (LowForm == dwarf::DW_FORM_addrx) {
+ const uint32_t Index = AddrWriter->getIndexFromAddress(0, Unit);
+ DIEBldr.replaceValue(&Die, LowPCAttrInfo.getAttribute(),
+ LowPCAttrInfo.getForm(), DIEInteger(Index));
+ } else {
+ DIEBldr.replaceValue(&Die, LowPCAttrInfo.getAttribute(),
+ LowPCAttrInfo.getForm(), DIEInteger(0));
}
- // Original CU didn't have DW_AT_*_base. We converted it's children (or
- // dwo), so need to insert it into CU.
- if (RangesBase)
+ }
+ // Original CU didn't have DW_AT_*_base. We converted it's children (or
+ // dwo), so need to insert it into CU.
+ if (RangesBase) {
+ if (Unit.getVersion() >= 5) {
DIEBldr.addValue(&Die, RangeBaseAttribute, dwarf::DW_FORM_sec_offset,
DIEInteger(*RangesBase));
+ } else {
+ DIEBldr.addValue(&Die, RangeBaseAttribute, dwarf::DW_FORM_sec_offset,
+ DIEInteger(INT_MAX));
+ auto RangesWriterIterator =
+ LegacyRangesWritersByCU.find(*Unit.getDWOId());
+ assert(RangesWriterIterator != LegacyRangesWritersByCU.end() &&
+ "RangesWriter does not exist for DWOId");
+ RangesWriterIterator->second->setDie(&Die);
+ }
}
- uint64_t RangeAttrVal = RangesSectionOffset - BaseOffset;
- if (Unit.getVersion() >= 5)
- RangeAttrVal = RangesSectionOffset;
// HighPC was conveted into DW_AT_ranges.
// For DWARF5 we only access ranges through index.
DIEBldr.replaceValue(&Die, HighPCAttrInfo.getAttribute(), dwarf::DW_AT_ranges,
- RangesForm, DIEInteger(RangeAttrVal));
+ RangesForm, DIEInteger(RangesSectionOffset));
}
diff --git a/bolt/test/X86/debug-fission-single-convert.s b/bolt/test/X86/debug-fission-single-convert.s
index 28fcb6686e0a2..4cd881740b2f8 100644
--- a/bolt/test/X86/debug-fission-single-convert.s
+++ b/bolt/test/X86/debug-fission-single-convert.s
@@ -31,11 +31,11 @@
# CHECK-DWO-DWO: 00000010
# CHECK-DWO-DWO: 00000050
# CHECK-DWO-DWO: DW_TAG_subprogram
-# CHECK-DWO-DWO-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000000
+# CHECK-DWO-DWO-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000010
# CHECK-DWO-DWO: DW_TAG_subprogram
-# CHECK-DWO-DWO-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000020
+# CHECK-DWO-DWO-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000030
# CHECK-DWO-DWO: DW_TAG_subprogram
-# CHECK-DWO-DWO-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000040
+# CHECK-DWO-DWO-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000050
# CHECK-ADDR-SEC: .debug_addr contents:
# CHECK-ADDR-SEC: 0x00000000: Addrs: [
diff --git a/bolt/test/X86/dwarf4-df-dualcu.test b/bolt/test/X86/dwarf4-df-dualcu.test
index b690623b70d83..fb328eb1872e0 100644
--- a/bolt/test/X86/dwarf4-df-dualcu.test
+++ b/bolt/test/X86/dwarf4-df-dualcu.test
@@ -37,36 +37,38 @@
; BOLT: .debug_ranges
; BOLT-NEXT: 00000000 <End of list>
-; BOLT-NEXT: 00000010 [[#%.16x,ADDR:]] [[#%.16x,ADDRB:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR1:]] [[#%.16x,ADDRB1:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR2:]] [[#%.16x,ADDRB2:]]
; BOLT-NEXT: 00000010 <End of list>
-; BOLT-NEXT: 00000030 [[#%.16x,ADDR1:]] [[#%.16x,ADDR1B:]]
-; BOLT-NEXT: 00000030 <End of list>
-; BOLT-NEXT: 00000050 [[#%.16x,ADDR2:]] [[#%.16x,ADDR2B:]]
-; BOLT-NEXT: 00000050 [[#%.16x,ADDR3:]] [[#%.16x,ADDR3B:]]
+; BOLT-NEXT: 00000040 <End of list>
+; BOLT-NEXT: 00000050 [[#%.16x,ADDR1:]] [[#%.16x,ADDRB1:]]
; BOLT-NEXT: 00000050 <End of list>
-; BOLT-NEXT: 00000080 [[#%.16x,ADDR4:]] [[#%.16x,ADDR4B:]]
-; BOLT-NEXT: 00000080 <End of list>
-; BOLT-NEXT: 000000a0 [[#%.16x,ADDR5:]] [[#%.16x,ADDR5B:]]
-; BOLT-NEXT: 000000a0 <End of list>
+; BOLT-NEXT: 00000070 [[#%.16x,ADDR2:]] [[#%.16x,ADDRB2:]]
+; BOLT-NEXT: 00000070 <End of list>
+; BOLT-NEXT: 00000090 [[#%.16x,ADDR3:]] [[#%.16x,ADDRB3:]]
+; BOLT-NEXT: 00000090 <End of list>
+; BOLT-NEXT: 000000b0 <End of list>
+; BOLT-NEXT: 000000c0 [[#%.16x,ADDR3:]] [[#%.16x,ADDRB3:]]
+; BOLT-NEXT: 000000c0 <End of list>
; BOLT: DW_TAG_compile_unit
; BOLT: DW_AT_GNU_dwo_name [DW_FORM_strp] ( .debug_str[0x00000016] = "main.dwo.dwo")
; BOLT-NEXT: DW_AT_GNU_dwo_id
; BOLT-NEXT: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
-; BOLT-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000050
-; BOLT-NEXT: [0x[[#ADDR2]], 0x[[#ADDR2B]])
-; BOLT-NEXT: [0x[[#ADDR3]], 0x[[#ADDR3B]]))
+; BOLT-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000010
+; BOLT-NEXT: [0x[[#ADDR1]], 0x[[#ADDRB1]])
+; BOLT-NEXT: [0x[[#ADDR2]], 0x[[#ADDRB2]]))
; BOLT-NEXT: DW_AT_GNU_addr_base [DW_FORM_sec_offset] (0x00000000)
-; BOLT-NEXT: DW_AT_GNU_ranges_base [DW_FORM_sec_offset] (0x00000010)
+; BOLT-NEXT: DW_AT_GNU_ranges_base [DW_FORM_sec_offset] (0x00000040)
; BOLT-NEXT: Compile
; BOLT: DW_TAG_compile_unit
; BOLT: DW_AT_GNU_dwo_name [DW_FORM_strp] ( .debug_str[0x00000023] = "helper.dwo.dwo")
; BOLT-NEXT: DW_AT_GNU_dwo_id
; BOLT-NEXT: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
-; BOLT-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x000000a0
-; BOLT-NEXT: [0x[[#ADDR5]], 0x[[#ADDR5B]])
+; BOLT-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000090
+; BOLT-NEXT: [0x[[#ADDR3]], 0x[[#ADDRB3]])
; BOLT-NEXT: DW_AT_GNU_addr_base [DW_FORM_sec_offset] (0x00000010)
-; BOLT-NEXT: DW_AT_GNU_ranges_base [DW_FORM_sec_offset] (0x00000080)
+; BOLT-NEXT: DW_AT_GNU_ranges_base [DW_FORM_sec_offset] (0x000000b0)
; PRE-BOLT-DWO-MAIN: version = 0x0004
; PRE-BOLT-DWO-MAIN: DW_TAG_compile_unit
@@ -113,13 +115,13 @@
; BOLT-DWO-MAIN-NEXT: DW_AT_decl_line
; BOLT-DWO-MAIN-NEXT: DW_AT_location [DW_FORM_exprloc] (DW_OP_GNU_addr_index 0x1)
; BOLT-DWO-MAIN: DW_TAG_subprogram [4]
-; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000000
+; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000010
; BOLT-DWO-MAIN-NEXT: )
; BOLT-DWO-MAIN-NEXT: DW_AT_frame_base
; BOLT-DWO-MAIN-NEXT: DW_AT_linkage_name [DW_FORM_GNU_str_index] (indexed (00000003) string = "_Z3usePiS_")
; BOLT-DWO-MAIN-NEXT: DW_AT_name [DW_FORM_GNU_str_index] (indexed (00000004) string = "use")
; BOLT-DWO-MAIN: DW_TAG_subprogram [6]
-; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000020
+; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000030
; BOLT-DWO-MAIN-NEXT: )
; BOLT-DWO-MAIN-NEXT: DW_AT_frame_base [DW_FORM_exprloc] (DW_OP_reg6 RBP)
; BOLT-DWO-MAIN-NEXT: DW_AT_name [DW_FORM_GNU_str_index] (indexed (00000005) string = "main")
@@ -160,4 +162,4 @@
; BOLT-DWO-HELPER-NEXT: DW_AT_decl_line
; BOLT-DWO-HELPER-NEXT: DW_AT_location [DW_FORM_exprloc] (DW_OP_GNU_addr_index 0x1)
; BOLT-DWO-HELPER: DW_TAG_subprogram [4]
-; BOLT-DWO-HELPER-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000000
+; BOLT-DWO-HELPER-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000010
diff --git a/bolt/test/X86/dwarf4-df-input-lowpc-ranges-cus.test b/bolt/test/X86/dwarf4-df-input-lowpc-ranges-cus.test
index c9abd02bbb7d9..cf9357d5f3c59 100644
--- a/bolt/test/X86/dwarf4-df-input-lowpc-ranges-cus.test
+++ b/bolt/test/X86/dwarf4-df-input-lowpc-ranges-cus.test
@@ -17,45 +17,47 @@
; BOLT: .debug_ranges
; BOLT-NEXT: 00000000 <End of list>
-; BOLT-NEXT: 00000010
-; BOLT-NEXT: 00000010
-; BOLT-NEXT: 00000010
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR1:]] [[#%.16x,ADDRB1:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR2:]] [[#%.16x,ADDRB2:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR3:]] [[#%.16x,ADDRB3:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR4:]] [[#%.16x,ADDRB4:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR5:]] [[#%.16x,ADDRB5:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR6:]] [[#%.16x,ADDRB6:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR7:]] [[#%.16x,ADDRB7:]]
; BOLT-NEXT: 00000010 <End of list>
-; BOLT-NEXT: 00000050
-; BOLT-NEXT: 00000050
-; BOLT-NEXT: 00000050
-; BOLT-NEXT: 00000050 <End of list>
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR1:]] [[#%.16x,ADDRB1:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR2:]] [[#%.16x,ADDRB2:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR3:]] [[#%.16x,ADDRB3:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR4:]] [[#%.16x,ADDRB4:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR5:]] [[#%.16x,ADDRB5:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR6:]] [[#%.16x,ADDRB6:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR7:]] [[#%.16x,ADDRB7:]]
; BOLT-NEXT: 00000090 <End of list>
-; BOLT-NEXT: 00000110
-; BOLT-NEXT: 00000110
-; BOLT-NEXT: 00000110
-; BOLT-NEXT: 00000110 <End of list>
-; BOLT-NEXT: 00000150
-; BOLT-NEXT: 00000150
-; BOLT-NEXT: 00000150
-; BOLT-NEXT: 00000150 <End of list>
-; BOLT-NEXT: 00000190 [[#%.16x,ADDR8:]] [[#%.16x,ADDRB8:]]
-; BOLT-NEXT: 00000190 [[#%.16x,ADDR9:]] [[#%.16x,ADDRB9:]]
-; BOLT-NEXT: 00000190 [[#%.16x,ADDR10:]] [[#%.16x,ADDRB10:]]
-; BOLT-NEXT: 00000190 [[#%.16x,ADDR11:]] [[#%.16x,ADDRB11:]]
-; BOLT-NEXT: 00000190 [[#%.16x,ADDR12:]] [[#%.16x,ADDRB12:]]
-; BOLT-NEXT: 00000190 [[#%.16x,ADDR13:]] [[#%.16x,ADDRB13:]]
-; BOLT-NEXT: 00000190 [[#%.16x,ADDR14:]] [[#%.16x,ADDRB14:]]
-; BOLT-NEXT: 00000190 <End of list>
+; BOLT-NEXT: 000000a0 [[#%.16x,ADDR1:]] [[#%.16x,ADDRB1:]]
+; BOLT-NEXT: 000000a0 [[#%.16x,ADDR2:]] [[#%.16x,ADDRB2:]]
+; BOLT-NEXT: 000000a0 [[#%.16x,ADDR3:]] [[#%.16x,ADDRB3:]]
+; BOLT-NEXT: 000000a0 <End of list>
+; BOLT-NEXT: 000000e0 [[#%.16x,ADDR5:]] [[#%.16x,ADDRB5:]]
+; BOLT-NEXT: 000000e0 [[#%.16x,ADDR6:]] [[#%.16x,ADDRB6:]]
+; BOLT-NEXT: 000000e0 [[#%.16x,ADDR7:]] [[#%.16x,ADDRB7:]]
+; BOLT-NEXT: 000000e0 <End of list>
+; BOLT-NEXT: 00000120 [[#%.16x,ADDR8:]] [[#%.16x,ADDRB8:]]
+; BOLT-NEXT: 00000120 [[#%.16x,ADDR9:]] [[#%.16x,ADDRB9:]]
+; BOLT-NEXT: 00000120 [[#%.16x,ADDR10:]] [[#%.16x,ADDRB10:]]
+; BOLT-NEXT: 00000120 [[#%.16x,ADDR11:]] [[#%.16x,ADDRB11:]]
+; BOLT-NEXT: 00000120 [[#%.16x,ADDR12:]] [[#%.16x,ADDRB12:]]
+; BOLT-NEXT: 00000120 [[#%.16x,ADDR13:]] [[#%.16x,ADDRB13:]]
+; BOLT-NEXT: 00000120 [[#%.16x,ADDR14:]] [[#%.16x,ADDRB14:]]
+; BOLT-NEXT: 00000120 <End of list>
+; BOLT-NEXT: 000001a0 <End of list>
+; BOLT-NEXT: 000001b0 [[#%.16x,ADDR8:]] [[#%.16x,ADDRB8:]]
+; BOLT-NEXT: 000001b0 [[#%.16x,ADDR9:]] [[#%.16x,ADDRB9:]]
+; BOLT-NEXT: 000001b0 [[#%.16x,ADDR10:]] [[#%.16x,ADDRB10:]]
+; BOLT-NEXT: 000001b0 <End of list>
+; BOLT-NEXT: 000001f0 [[#%.16x,ADDR12:]] [[#%.16x,ADDRB12:]]
+; BOLT-NEXT: 000001f0 [[#%.16x,ADDR13:]] [[#%.16x,ADDRB13:]]
+; BOLT-NEXT: 000001f0 [[#%.16x,ADDR14:]] [[#%.16x,ADDRB14:]]
+; BOLT-NEXT: 000001f0 <End of list>
; BOLT: DW_TAG_compile_unit
; BOLT: DW_AT_GNU_dwo_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-fA-F]+}}] = "main.dwo.dwo")
; BOLT-NEXT: DW_AT_GNU_dwo_id
-; BOLT-NEXT: DW_AT_GNU_ranges_base [DW_FORM_sec_offset] (0x00000010)
+; BOLT-NEXT: DW_AT_GNU_ranges_base [DW_FORM_sec_offset] (0x00000090)
; BOLT-NEXT: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
-; BOLT-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000090
+; BOLT-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000010
; BOLT-NEXT: [0x[[#ADDR1]], 0x[[#ADDRB1]])
; BOLT-NEXT: [0x[[#ADDR2]], 0x[[#ADDRB2]])
; BOLT-NEXT: [0x[[#ADDR3]], 0x[[#ADDRB3]])
@@ -64,13 +66,14 @@
; BOLT-NEXT: [0x[[#ADDR6]], 0x[[#ADDRB6]])
; BOLT-NEXT: [0x[[#ADDR7]], 0x[[#ADDRB7]])
; BOLT-NEXT: DW_AT_GNU_addr_base [DW_FORM_sec_offset] (0x00000000)
+; BOLT-NEXT: Compile Unit
; BOLT: DW_TAG_compile_unit
; BOLT: DW_AT_GNU_dwo_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-fA-F]+}}] = "mainOther.dwo.dwo")
; BOLT-NEXT: DW_AT_GNU_dwo_id
-; BOLT-NEXT: DW_AT_GNU_ranges_base [DW_FORM_sec_offset] (0x00000110)
+; BOLT-NEXT: DW_AT_GNU_ranges_base [DW_FORM_sec_offset] (0x000001a0)
; BOLT-NEXT: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
-; BOLT-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000190
+; BOLT-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000120
; BOLT-NEXT: [0x[[#ADDR8]], 0x[[#ADDRB8]])
; BOLT-NEXT: [0x[[#ADDR9]], 0x[[#ADDRB9]])
; BOLT-NEXT: [0x[[#ADDR10]], 0x[[#ADDRB10]])
@@ -79,19 +82,20 @@
; BOLT-NEXT: [0x[[#ADDR13]], 0x[[#ADDRB13]])
; BOLT-NEXT: [0x[[#ADDR14]], 0x[[#ADDRB14]])
; BOLT-NEXT: DW_AT_GNU_addr_base [DW_FORM_sec_offset] (0x00000018)
+; BOLT: {{^$}}
; BOLT-DWO-MAIN: DW_TAG_subprogram
-; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000000
+; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000010
; BOLT-DWO-MAIN: DW_TAG_subprogram
; BOLT-DWO-MAIN: DW_TAG_subprogram
; BOLT-DWO-MAIN: DW_TAG_subprogram
; BOLT-DWO-MAIN: DW_TAG_subprogram
-; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000040
+; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000050
; BOLT-DWO-MAIN: DW_TAG_subprogram
-; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000000
+; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000010
; BOLT-DWO-MAIN: DW_TAG_subprogram
; BOLT-DWO-MAIN: DW_TAG_subprogram
; BOLT-DWO-MAIN: DW_TAG_subprogram
; BOLT-DWO-MAIN: DW_TAG_subprogram
-; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000040
+; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000050
diff --git a/bolt/test/X86/dwarf4-df-input-lowpc-ranges.test b/bolt/test/X86/dwarf4-df-input-lowpc-ranges.test
index 276bea4ba0c1c..ab4353a282475 100644
--- a/bolt/test/X86/dwarf4-df-input-lowpc-ranges.test
+++ b/bolt/test/X86/dwarf4-df-input-lowpc-ranges.test
@@ -15,29 +15,30 @@
; BOLT: .debug_ranges
; BOLT-NEXT: 00000000 <End of list>
-; BOLT-NEXT: 00000010
-; BOLT-NEXT: 00000010
-; BOLT-NEXT: 00000010
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR1:]] [[#%.16x,ADDRB1:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR2:]] [[#%.16x,ADDRB2:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR3:]] [[#%.16x,ADDRB3:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR4:]] [[#%.16x,ADDRB4:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR5:]] [[#%.16x,ADDRB5:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR6:]] [[#%.16x,ADDRB6:]]
+; BOLT-NEXT: 00000010 [[#%.16x,ADDR7:]] [[#%.16x,ADDRB7:]]
; BOLT-NEXT: 00000010 <End of list>
-; BOLT-NEXT: 00000050
-; BOLT-NEXT: 00000050
-; BOLT-NEXT: 00000050
-; BOLT-NEXT: 00000050 <End of list>
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR1:]] [[#%.16x,ADDRB1:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR2:]] [[#%.16x,ADDRB2:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR3:]] [[#%.16x,ADDRB3:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR4:]] [[#%.16x,ADDRB4:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR5:]] [[#%.16x,ADDRB5:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR6:]] [[#%.16x,ADDRB6:]]
-; BOLT-NEXT: 00000090 [[#%.16x,ADDR7:]] [[#%.16x,ADDRB7:]]
; BOLT-NEXT: 00000090 <End of list>
+; BOLT-NEXT: 000000a0 [[#%.16x,ADDR1:]] [[#%.16x,ADDRB1:]]
+; BOLT-NEXT: 000000a0 [[#%.16x,ADDR2:]] [[#%.16x,ADDRB2:]]
+; BOLT-NEXT: 000000a0 [[#%.16x,ADDR3:]] [[#%.16x,ADDRB3:]]
+; BOLT-NEXT: 000000a0 <End of list>
+; BOLT-NEXT: 000000e0 [[#%.16x,ADDR5:]] [[#%.16x,ADDRB5:]]
+; BOLT-NEXT: 000000e0 [[#%.16x,ADDR6:]] [[#%.16x,ADDRB6:]]
+; BOLT-NEXT: 000000e0 [[#%.16x,ADDR7:]] [[#%.16x,ADDRB7:]]
+; BOLT-NEXT: 000000e0 <End of list>
; BOLT: DW_TAG_compile_unit
; BOLT: DW_AT_GNU_dwo_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-fA-F]+}}] = "main.dwo.dwo")
; BOLT-NEXT: DW_AT_GNU_dwo_id
-; BOLT-NEXT: DW_AT_GNU_ranges_base [DW_FORM_sec_offset] (0x00000010)
+; BOLT-NEXT: DW_AT_GNU_ranges_base [DW_FORM_sec_offset] (0x00000090)
; BOLT-NEXT: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
-; BOLT-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000090
+; BOLT-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000010
; BOLT-NEXT: [0x[[#ADDR1]], 0x[[#ADDRB1]])
; BOLT-NEXT: [0x[[#ADDR2]], 0x[[#ADDRB2]])
; BOLT-NEXT: [0x[[#ADDR3]], 0x[[#ADDRB3]])
@@ -48,9 +49,9 @@
; BOLT-NEXT: DW_AT_GNU_addr_base [DW_FORM_sec_offset] (0x00000000)
; BOLT-DWO-MAIN: DW_TAG_subprogram
-; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000000
+; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000010
; BOLT-DWO-MAIN: DW_TAG_subprogram
; BOLT-DWO-MAIN: DW_TAG_subprogram
; BOLT-DWO-MAIN: DW_TAG_subprogram
; BOLT-DWO-MAIN: DW_TAG_subprogram
-; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000040
+; BOLT-DWO-MAIN-NEXT: DW_AT_ranges [DW_FORM_sec_offset] (0x00000050
>From 7d68d9d2f27535e03934383220282e18edd0c1e8 Mon Sep 17 00:00:00 2001
From: lntue <35648136+lntue at users.noreply.github.com>
Date: Wed, 3 Jul 2024 18:05:24 -0400
Subject: [PATCH 187/246] [libc][math] Implement correctly rounded double
precision tan (#97489)
Using the same range reduction as `sin`, `cos`, and `sincos`:
1) Reducing `x = k*pi/128 + u`, with `|u| <= pi/256`, and `u` is in
double-double.
2) Approximate `tan(u)` using degree-9 Taylor polynomial.
3) Compute
```
tan(x) ~ (sin(k*pi/128) + tan(u) * cos(k*pi/128)) / (cos(k*pi/128) - tan(u) * sin(k*pi/128))
```
using the fast double-double division algorithm in [the CORE-MATH
project](https://gitlab.inria.fr/core-math/core-math/-/blob/master/src/binary64/tan/tan.c#L1855).
4) Perform relative-error Ziv's accuracy test
5) If the accuracy tests failed, we redo the computations using 128-bit
precision `DyadicFloat`.
Fixes https://github.com/llvm/llvm-project/issues/96930
---
libc/config/darwin/arm/entrypoints.txt | 1 +
libc/config/linux/aarch64/entrypoints.txt | 1 +
libc/config/linux/arm/entrypoints.txt | 1 +
libc/config/linux/riscv/entrypoints.txt | 1 +
libc/docs/math/index.rst | 2 +-
libc/src/__support/FPUtil/double_double.h | 36 +++
libc/src/math/generic/CMakeLists.txt | 21 ++
libc/src/math/generic/tan.cpp | 318 ++++++++++++++++++++++
libc/src/math/x86_64/CMakeLists.txt | 9 -
libc/src/math/x86_64/tan.cpp | 23 --
libc/test/src/math/smoke/CMakeLists.txt | 10 +
libc/test/src/math/smoke/tan_test.cpp | 26 ++
libc/test/src/math/tan_test.cpp | 115 +++++++-
13 files changed, 518 insertions(+), 46 deletions(-)
create mode 100644 libc/src/math/generic/tan.cpp
delete mode 100644 libc/src/math/x86_64/CMakeLists.txt
delete mode 100644 libc/src/math/x86_64/tan.cpp
create mode 100644 libc/test/src/math/smoke/tan_test.cpp
diff --git a/libc/config/darwin/arm/entrypoints.txt b/libc/config/darwin/arm/entrypoints.txt
index cb4603c79c79c..feb106cc2cb63 100644
--- a/libc/config/darwin/arm/entrypoints.txt
+++ b/libc/config/darwin/arm/entrypoints.txt
@@ -234,6 +234,7 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.sqrt
libc.src.math.sqrtf
libc.src.math.sqrtl
+ libc.src.math.tan
libc.src.math.tanf
libc.src.math.tanhf
libc.src.math.trunc
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index 940df63e3912b..f7e08ec151d07 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -489,6 +489,7 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.sqrt
libc.src.math.sqrtf
libc.src.math.sqrtl
+ libc.src.math.tan
libc.src.math.tanf
libc.src.math.tanhf
libc.src.math.trunc
diff --git a/libc/config/linux/arm/entrypoints.txt b/libc/config/linux/arm/entrypoints.txt
index a27a494153480..a24514e29334d 100644
--- a/libc/config/linux/arm/entrypoints.txt
+++ b/libc/config/linux/arm/entrypoints.txt
@@ -366,6 +366,7 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.sqrt
libc.src.math.sqrtf
libc.src.math.sqrtl
+ libc.src.math.tan
libc.src.math.tanf
libc.src.math.tanhf
libc.src.math.trunc
diff --git a/libc/config/linux/riscv/entrypoints.txt b/libc/config/linux/riscv/entrypoints.txt
index 51d85eed9ff16..5b0d591557944 100644
--- a/libc/config/linux/riscv/entrypoints.txt
+++ b/libc/config/linux/riscv/entrypoints.txt
@@ -497,6 +497,7 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.sqrt
libc.src.math.sqrtf
libc.src.math.sqrtl
+ libc.src.math.tan
libc.src.math.tanf
libc.src.math.tanhf
libc.src.math.trunc
diff --git a/libc/docs/math/index.rst b/libc/docs/math/index.rst
index 04f63d03778a8..b70f29a986e14 100644
--- a/libc/docs/math/index.rst
+++ b/libc/docs/math/index.rst
@@ -338,7 +338,7 @@ Higher Math Functions
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| sqrt | |check| | |check| | |check| | | |check| | 7.12.7.10 | F.10.4.10 |
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| tan | |check| | | | | | 7.12.4.7 | F.10.1.7 |
+| tan | |check| | |check| | | | | 7.12.4.7 | F.10.1.7 |
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| tanh | |check| | | | | | 7.12.5.6 | F.10.2.6 |
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
diff --git a/libc/src/__support/FPUtil/double_double.h b/libc/src/__support/FPUtil/double_double.h
index 3d16a3cce3a99..6867026953f25 100644
--- a/libc/src/__support/FPUtil/double_double.h
+++ b/libc/src/__support/FPUtil/double_double.h
@@ -129,6 +129,42 @@ LIBC_INLINE DoubleDouble multiply_add<DoubleDouble>(const DoubleDouble &a,
return add(c, quick_mult(a, b));
}
+// Accurate double-double division, following Karp-Markstein's trick for
+// division, implemented in the CORE-MATH project at:
+// https://gitlab.inria.fr/core-math/core-math/-/blob/master/src/binary64/tan/tan.c#L1855
+//
+// Error bounds:
+// Let a = ah + al, b = bh + bl.
+// Let r = rh + rl be the approximation of (ah + al) / (bh + bl).
+// Then:
+// (ah + al) / (bh + bl) - rh =
+// = ((ah - bh * rh) + (al - bl * rh)) / (bh + bl)
+// = (1 + O(bl/bh)) * ((ah - bh * rh) + (al - bl * rh)) / bh
+// Let q = round(1/bh), then the above expressions are approximately:
+// = (1 + O(bl / bh)) * (1 + O(2^-52)) * q * ((ah - bh * rh) + (al - bl * rh))
+// So we can compute:
+// rl = q * (ah - bh * rh) + q * (al - bl * rh)
+// as accurate as possible, then the error is bounded by:
+// |(ah + al) / (bh + bl) - (rh + rl)| < O(bl/bh) * (2^-52 + al/ah + bl/bh)
+LIBC_INLINE DoubleDouble div(const DoubleDouble &a, const DoubleDouble &b) {
+ DoubleDouble r;
+ double q = 1.0 / b.hi;
+ r.hi = a.hi * q;
+
+#ifdef LIBC_TARGET_CPU_HAS_FMA
+ double e_hi = fputil::multiply_add(b.hi, -r.hi, a.hi);
+ double e_lo = fputil::multiply_add(b.lo, -r.hi, a.lo);
+#else
+ DoubleDouble b_hi_r_hi = fputil::exact_mult</*NO_FMA=*/true>(b.hi, -r.hi);
+ DoubleDouble b_lo_r_hi = fputil::exact_mult</*NO_FMA=*/true>(b.lo, -r.hi);
+ double e_hi = (a.hi + b_hi_r_hi.hi) + b_hi_r_hi.lo;
+ double e_lo = (a.lo + b_lo_r_hi.hi) + b_lo_r_hi.lo;
+#endif // LIBC_TARGET_CPU_HAS_FMA
+
+ r.lo = q * (e_hi + e_lo);
+ return r;
+}
+
} // namespace LIBC_NAMESPACE::fputil
#endif // LLVM_LIBC_SRC___SUPPORT_FPUTIL_DOUBLE_DOUBLE_H
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index 2e4ed8f2961da..ff84a434cc202 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -323,6 +323,27 @@ add_entrypoint_object(
-O3
)
+add_entrypoint_object(
+ tan
+ SRCS
+ tan.cpp
+ HDRS
+ ../tan.h
+ DEPENDS
+ .range_reduction_double
+ libc.hdr.errno_macros
+ libc.src.errno.errno
+ libc.src.__support.FPUtil.double_double
+ libc.src.__support.FPUtil.dyadic_float
+ libc.src.__support.FPUtil.except_value_utils
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.macros.optimization
+ COMPILE_OPTIONS
+ -O3
+)
+
add_entrypoint_object(
tanf
SRCS
diff --git a/libc/src/math/generic/tan.cpp b/libc/src/math/generic/tan.cpp
new file mode 100644
index 0000000000000..e6230e9c1cd69
--- /dev/null
+++ b/libc/src/math/generic/tan.cpp
@@ -0,0 +1,318 @@
+//===-- Double-precision tan function -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/tan.h"
+#include "hdr/errno_macros.h"
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/double_double.h"
+#include "src/__support/FPUtil/dyadic_float.h"
+#include "src/__support/FPUtil/except_value_utils.h"
+#include "src/__support/FPUtil/multiply_add.h"
+#include "src/__support/FPUtil/rounding_mode.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
+#include "src/__support/macros/properties/cpu_features.h" // LIBC_TARGET_CPU_HAS_FMA
+
+#ifdef LIBC_TARGET_CPU_HAS_FMA
+#include "range_reduction_double_fma.h"
+
+// With FMA, we limit the maxmimum exponent to be 2^16, so that the error bound
+// from the fma::range_reduction_small is bounded by 2^-88 instead of 2^-72.
+#define FAST_PASS_EXPONENT 16
+using LIBC_NAMESPACE::fma::ONE_TWENTY_EIGHT_OVER_PI;
+using LIBC_NAMESPACE::fma::range_reduction_small;
+using LIBC_NAMESPACE::fma::SIN_K_PI_OVER_128;
+
+LIBC_INLINE constexpr bool NO_FMA = false;
+#else
+#include "range_reduction_double_nofma.h"
+
+using LIBC_NAMESPACE::nofma::FAST_PASS_EXPONENT;
+using LIBC_NAMESPACE::nofma::ONE_TWENTY_EIGHT_OVER_PI;
+using LIBC_NAMESPACE::nofma::range_reduction_small;
+using LIBC_NAMESPACE::nofma::SIN_K_PI_OVER_128;
+
+LIBC_INLINE constexpr bool NO_FMA = true;
+#endif // LIBC_TARGET_CPU_HAS_FMA
+
+// TODO: We might be able to improve the performance of large range reduction of
+// non-FMA targets further by operating directly on 25-bit chunks of 128/pi and
+// pre-split SIN_K_PI_OVER_128, but that might double the memory footprint of
+// those lookup table.
+#include "range_reduction_double_common.h"
+
+#if ((LIBC_MATH & LIBC_MATH_SKIP_ACCURATE_PASS) != 0)
+#define LIBC_MATH_TAN_SKIP_ACCURATE_PASS
+#endif
+
+namespace LIBC_NAMESPACE {
+
+using DoubleDouble = fputil::DoubleDouble;
+using Float128 = typename fputil::DyadicFloat<128>;
+
+namespace {
+
+LIBC_INLINE DoubleDouble tan_eval(const DoubleDouble &u) {
+ // Evaluate tan(y) = tan(x - k * (pi/128))
+ // We use the degree-9 Taylor approximation:
+ // tan(y) ~ P(y) = y + y^3/3 + 2*y^5/15 + 17*y^7/315 + 62*y^9/2835
+ // Then the error is bounded by:
+ // |tan(y) - P(y)| < 2^-6 * |y|^11 < 2^-6 * 2^-66 = 2^-72.
+ // For y ~ u_hi + u_lo, fully expanding the polynomial and drop any terms
+ // < ulp(u_hi^3) gives us:
+ // P(y) = y + y^3/3 + 2*y^5/15 + 17*y^7/315 + 62*y^9/2835 = ...
+ // ~ u_hi + u_hi^3 * (1/3 + u_hi^2 * (2/15 + u_hi^2 * (17/315 +
+ // + u_hi^2 * 62/2835))) +
+ // + u_lo (1 + u_hi^2 * (1 + u_hi^2 * 2/3))
+ double u_hi_sq = u.hi * u.hi; // Error < ulp(u_hi^2) < 2^(-6 - 52) = 2^-58.
+ // p1 ~ 17/315 + u_hi^2 62 / 2835.
+ double p1 =
+ fputil::multiply_add(u_hi_sq, 0x1.664f4882c10fap-6, 0x1.ba1ba1ba1ba1cp-5);
+ // p2 ~ 1/3 + u_hi^2 2 / 15.
+ double p2 =
+ fputil::multiply_add(u_hi_sq, 0x1.1111111111111p-3, 0x1.5555555555555p-2);
+ // q1 ~ 1 + u_hi^2 * 2/3.
+ double q1 = fputil::multiply_add(u_hi_sq, 0x1.5555555555555p-1, 1.0);
+ double u_hi_3 = u_hi_sq * u.hi;
+ double u_hi_4 = u_hi_sq * u_hi_sq;
+ // p3 ~ 1/3 + u_hi^2 * (2/15 + u_hi^2 * (17/315 + u_hi^2 * 62/2835))
+ double p3 = fputil::multiply_add(u_hi_4, p1, p2);
+ // q2 ~ 1 + u_hi^2 * (1 + u_hi^2 * 2/3)
+ double q2 = fputil::multiply_add(u_hi_sq, q1, 1.0);
+ double tan_lo = fputil::multiply_add(u_hi_3, p3, u.lo * q2);
+ // Overall, |tan(y) - (u_hi + tan_lo)| < ulp(u_hi^3) <= 2^-71.
+ // And the relative errors is:
+ // |(tan(y) - (u_hi + tan_lo)) / tan(y) | <= 2*ulp(u_hi^2) < 2^-64
+
+ return fputil::exact_add(u.hi, tan_lo);
+}
+
+// Accurate evaluation of tan for small u.
+Float128 tan_eval(const Float128 &u) {
+ Float128 u_sq = fputil::quick_mul(u, u);
+
+ // tan(x) ~ x + x^3/3 + x^5 * 2/15 + x^7 * 17/315 + x^9 * 62/2835 +
+ // + x^11 * 1382/155925 + x^13 * 21844/6081075 +
+ // + x^15 * 929569/638512875 + x^17 * 6404582/10854718875
+ // Relative errors < 2^-127 for |u| < pi/256.
+ constexpr Float128 TAN_COEFFS[] = {
+ {Sign::POS, -127, 0x80000000'00000000'00000000'00000000_u128}, // 1
+ {Sign::POS, -129, 0xaaaaaaaa'aaaaaaaa'aaaaaaaa'aaaaaaab_u128}, // 1
+ {Sign::POS, -130, 0x88888888'88888888'88888888'88888889_u128}, // 2/15
+ {Sign::POS, -132, 0xdd0dd0dd'0dd0dd0d'd0dd0dd0'dd0dd0dd_u128}, // 17/315
+ {Sign::POS, -133, 0xb327a441'6087cf99'6b5dd24e'ec0b327a_u128}, // 62/2835
+ {Sign::POS, -134,
+ 0x91371aaf'3611e47a'da8e1cba'7d900eca_u128}, // 1382/155925
+ {Sign::POS, -136,
+ 0xeb69e870'abeefdaf'e606d2e4'd1e65fbc_u128}, // 21844/6081075
+ {Sign::POS, -137,
+ 0xbed1b229'5baf15b5'0ec9af45'a2619971_u128}, // 929569/638512875
+ {Sign::POS, -138,
+ 0x9aac1240'1b3a2291'1b2ac7e3'e4627d0a_u128}, // 6404582/10854718875
+ };
+
+ return fputil::quick_mul(
+ u, fputil::polyeval(u_sq, TAN_COEFFS[0], TAN_COEFFS[1], TAN_COEFFS[2],
+ TAN_COEFFS[3], TAN_COEFFS[4], TAN_COEFFS[5],
+ TAN_COEFFS[6], TAN_COEFFS[7], TAN_COEFFS[8]));
+}
+
+// Calculation a / b = a * (1/b) for Float128.
+// Using the initial approximation of q ~ (1/b), then apply 2 Newton-Raphson
+// iterations, before multiplying by a.
+Float128 newton_raphson_div(const Float128 &a, Float128 b, double q) {
+ Float128 q0(q);
+ constexpr Float128 TWO(2.0);
+ b.sign = (b.sign == Sign::POS) ? Sign::NEG : Sign::POS;
+ Float128 q1 =
+ fputil::quick_mul(q0, fputil::quick_add(TWO, fputil::quick_mul(b, q0)));
+ Float128 q2 =
+ fputil::quick_mul(q1, fputil::quick_add(TWO, fputil::quick_mul(b, q1)));
+ return fputil::quick_mul(a, q2);
+}
+
+} // anonymous namespace
+
+LLVM_LIBC_FUNCTION(double, tan, (double x)) {
+ using FPBits = typename fputil::FPBits<double>;
+ FPBits xbits(x);
+
+ uint16_t x_e = xbits.get_biased_exponent();
+
+ DoubleDouble y;
+ unsigned k;
+ generic::LargeRangeReduction<NO_FMA> range_reduction_large;
+
+ // |x| < 2^32 (with FMA) or |x| < 2^23 (w/o FMA)
+ if (LIBC_LIKELY(x_e < FPBits::EXP_BIAS + FAST_PASS_EXPONENT)) {
+ // |x| < 2^-27
+ if (LIBC_UNLIKELY(x_e < FPBits::EXP_BIAS - 27)) {
+ // Signed zeros.
+ if (LIBC_UNLIKELY(x == 0.0))
+ return x;
+
+ // For |x| < 2^-27, |tan(x) - x| < ulp(x)/2.
+#ifdef LIBC_TARGET_CPU_HAS_FMA
+ return fputil::multiply_add(x, 0x1.0p-54, x);
+#else
+ if (LIBC_UNLIKELY(x_e < 4)) {
+ int rounding_mode = fputil::quick_get_round();
+ if (rounding_mode == FE_TOWARDZERO ||
+ (xbits.sign() == Sign::POS && rounding_mode == FE_DOWNWARD) ||
+ (xbits.sign() == Sign::NEG && rounding_mode == FE_UPWARD))
+ return FPBits(xbits.uintval() + 1).get_val();
+ }
+ return fputil::multiply_add(x, 0x1.0p-54, x);
+#endif // LIBC_TARGET_CPU_HAS_FMA
+ }
+
+ // // Small range reduction.
+ k = range_reduction_small(x, y);
+ } else {
+ // Inf or NaN
+ if (LIBC_UNLIKELY(x_e > 2 * FPBits::EXP_BIAS)) {
+ // tan(+-Inf) = NaN
+ if (xbits.get_mantissa() == 0) {
+ fputil::set_errno_if_required(EDOM);
+ fputil::raise_except_if_required(FE_INVALID);
+ }
+ return x + FPBits::quiet_nan().get_val();
+ }
+
+ // Large range reduction.
+ k = range_reduction_large.compute_high_part(x);
+ y = range_reduction_large.fast();
+ }
+
+ DoubleDouble tan_y = tan_eval(y);
+
+ // Look up sin(k * pi/128) and cos(k * pi/128)
+ // Memory saving versions:
+
+ // Use 128-entry table instead:
+ // DoubleDouble sin_k = SIN_K_PI_OVER_128[k & 127];
+ // uint64_t sin_s = static_cast<uint64_t>(k & 128) << (63 - 7);
+ // sin_k.hi = FPBits(FPBits(sin_k.hi).uintval() ^ sin_s).get_val();
+ // sin_k.lo = FPBits(FPBits(sin_k.hi).uintval() ^ sin_s).get_val();
+ // DoubleDouble cos_k = SIN_K_PI_OVER_128[(k + 64) & 127];
+ // uint64_t cos_s = static_cast<uint64_t>((k + 64) & 128) << (63 - 7);
+ // cos_k.hi = FPBits(FPBits(cos_k.hi).uintval() ^ cos_s).get_val();
+ // cos_k.lo = FPBits(FPBits(cos_k.hi).uintval() ^ cos_s).get_val();
+
+ // Use 64-entry table instead:
+ // auto get_idx_dd = [](unsigned kk) -> DoubleDouble {
+ // unsigned idx = (kk & 64) ? 64 - (kk & 63) : (kk & 63);
+ // DoubleDouble ans = SIN_K_PI_OVER_128[idx];
+ // if (kk & 128) {
+ // ans.hi = -ans.hi;
+ // ans.lo = -ans.lo;
+ // }
+ // return ans;
+ // };
+ // DoubleDouble msin_k = get_idx_dd(k + 128);
+ // DoubleDouble cos_k = get_idx_dd(k + 64);
+
+ // Fast look up version, but needs 256-entry table.
+ // cos(k * pi/128) = sin(k * pi/128 + pi/2) = sin((k + 64) * pi/128).
+ DoubleDouble msin_k = SIN_K_PI_OVER_128[(k + 128) & 255];
+ DoubleDouble cos_k = SIN_K_PI_OVER_128[(k + 64) & 255];
+
+ // After range reduction, k = round(x * 128 / pi) and y = x - k * (pi / 128).
+ // So k is an integer and -pi / 256 <= y <= pi / 256.
+ // Then tan(x) = sin(x) / cos(x)
+ // = sin((k * pi/128 + y) / cos((k * pi/128 + y)
+ // = (cos(y) * sin(k*pi/128) + sin(y) * cos(k*pi/128)) /
+ // / (cos(y) * cos(k*pi/128) - sin(y) * sin(k*pi/128))
+ // = (sin(k*pi/128) + tan(y) * cos(k*pi/128)) /
+ // / (cos(k*pi/128) - tan(y) * sin(k*pi/128))
+ DoubleDouble cos_k_tan_y = fputil::quick_mult<NO_FMA>(tan_y, cos_k);
+ DoubleDouble msin_k_tan_y = fputil::quick_mult<NO_FMA>(tan_y, msin_k);
+
+ // num_dd = sin(k*pi/128) + tan(y) * cos(k*pi/128)
+ DoubleDouble num_dd = fputil::exact_add<false>(cos_k_tan_y.hi, -msin_k.hi);
+ // den_dd = cos(k*pi/128) - tan(y) * sin(k*pi/128)
+ DoubleDouble den_dd = fputil::exact_add<false>(msin_k_tan_y.hi, cos_k.hi);
+ num_dd.lo += cos_k_tan_y.lo - msin_k.lo;
+ den_dd.lo += msin_k_tan_y.lo + cos_k.lo;
+
+#ifdef LIBC_MATH_TAN_SKIP_ACCURATE_PASS
+ double tan_x = (num_dd.hi + num_dd.lo) / (den_dd.hi + den_dd.lo);
+ return tan_x;
+#else
+ // Accurate test and pass for correctly rounded implementation.
+
+ // Accurate double-double division
+ DoubleDouble tan_x = fputil::div(num_dd, den_dd);
+
+ // Relative errors for k != 0 mod 64 is:
+ // absolute errors / min(sin(k*pi/128), cos(k*pi/128)) <= 2^-71 / 2^-7
+ // = 2^-64.
+ // For k = 0 mod 64, the relative errors is bounded by:
+ // 2^-71 / 2^(exponent of x).
+ constexpr int ERR = 64;
+
+ int y_exp = 7 + FPBits(y.hi).get_exponent();
+ int rel_err_exp = ERR + static_cast<int>((k & 63) == 0) * y_exp;
+ int64_t tan_x_err = static_cast<int64_t>(FPBits(tan_x.hi).uintval()) -
+ (static_cast<int64_t>(rel_err_exp) << 52);
+ double tan_err = FPBits(static_cast<uint64_t>(tan_x_err)).get_val();
+
+ double err_higher = tan_x.lo + tan_err;
+ double err_lower = tan_x.lo - tan_err;
+
+ double tan_upper = tan_x.hi + err_higher;
+ double tan_lower = tan_x.hi + err_lower;
+
+ // Ziv's rounding test.
+ if (LIBC_LIKELY(tan_upper == tan_lower))
+ return tan_upper;
+
+ Float128 u_f128;
+ if (LIBC_LIKELY(x_e < FPBits::EXP_BIAS + FAST_PASS_EXPONENT))
+ u_f128 = generic::range_reduction_small_f128(x);
+ else
+ u_f128 = range_reduction_large.accurate();
+
+ Float128 tan_u = tan_eval(u_f128);
+
+ auto get_sin_k = [](unsigned kk) -> Float128 {
+ unsigned idx = (kk & 64) ? 64 - (kk & 63) : (kk & 63);
+ Float128 ans = generic::SIN_K_PI_OVER_128_F128[idx];
+ if (kk & 128)
+ ans.sign = Sign::NEG;
+ return ans;
+ };
+
+ // cos(k * pi/128) = sin(k * pi/128 + pi/2) = sin((k + 64) * pi/128).
+ Float128 sin_k_f128 = get_sin_k(k);
+ Float128 cos_k_f128 = get_sin_k(k + 64);
+ Float128 msin_k_f128 = get_sin_k(k + 128);
+
+ // num_f128 = sin(k*pi/128) + tan(y) * cos(k*pi/128)
+ Float128 num_f128 =
+ fputil::quick_add(sin_k_f128, fputil::quick_mul(cos_k_f128, tan_u));
+ // den_f128 = cos(k*pi/128) - tan(y) * sin(k*pi/128)
+ Float128 den_f128 =
+ fputil::quick_add(cos_k_f128, fputil::quick_mul(msin_k_f128, tan_u));
+
+ // tan(x) = (sin(k*pi/128) + tan(y) * cos(k*pi/128)) /
+ // / (cos(k*pi/128) - tan(y) * sin(k*pi/128))
+ // TODO: The initial seed 1.0/den_dd.hi for Newton-Raphson reciprocal can be
+ // reused from DoubleDouble fputil::div in the fast pass.
+ Float128 result = newton_raphson_div(num_f128, den_f128, 1.0 / den_dd.hi);
+
+ // TODO: Add assertion if Ziv's accuracy tests fail in debug mode.
+ // https://github.com/llvm/llvm-project/issues/96452.
+ return static_cast<double>(result);
+
+#endif // !LIBC_MATH_TAN_SKIP_ACCURATE_PASS
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/x86_64/CMakeLists.txt b/libc/src/math/x86_64/CMakeLists.txt
deleted file mode 100644
index 3cfc422e56d49..0000000000000
--- a/libc/src/math/x86_64/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-add_entrypoint_object(
- tan
- SRCS
- tan.cpp
- HDRS
- ../tan.h
- COMPILE_OPTIONS
- -O2
-)
diff --git a/libc/src/math/x86_64/tan.cpp b/libc/src/math/x86_64/tan.cpp
deleted file mode 100644
index bc0e0fc7d1ffa..0000000000000
--- a/libc/src/math/x86_64/tan.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//===-- Implementation of the tan function for x86_64 ---------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "src/math/tan.h"
-#include "src/__support/common.h"
-
-namespace LIBC_NAMESPACE {
-
-LLVM_LIBC_FUNCTION(double, tan, (double x)) {
- double result;
- // The fptan instruction pushes the number 1 on to the FP stack after
- // computing tan. So, we read out the one before popping the actual result.
- __asm__ __volatile__("fptan" : "+t"(x));
- __asm__ __volatile__("fstpl %0" : "=m"(result));
- return result;
-}
-
-} // namespace LIBC_NAMESPACE
diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt
index f244e7addc151..123e8ffdb5be8 100644
--- a/libc/test/src/math/smoke/CMakeLists.txt
+++ b/libc/test/src/math/smoke/CMakeLists.txt
@@ -3940,3 +3940,13 @@ add_fp_unittest(
DEPENDS
libc.src.math.sincos
)
+
+add_fp_unittest(
+ tan_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ tan_test.cpp
+ DEPENDS
+ libc.src.math.tan
+)
diff --git a/libc/test/src/math/smoke/tan_test.cpp b/libc/test/src/math/smoke/tan_test.cpp
new file mode 100644
index 0000000000000..498dba76b6e71
--- /dev/null
+++ b/libc/test/src/math/smoke/tan_test.cpp
@@ -0,0 +1,26 @@
+//===-- Unittests for tan -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/tan.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+using LlvmLibcTanTest = LIBC_NAMESPACE::testing::FPTest<double>;
+
+using LIBC_NAMESPACE::testing::tlog;
+
+TEST_F(LlvmLibcTanTest, SpecialNumbers) {
+ EXPECT_FP_EQ_ALL_ROUNDING(aNaN, LIBC_NAMESPACE::tan(aNaN));
+ EXPECT_FP_EQ_ALL_ROUNDING(aNaN, LIBC_NAMESPACE::tan(inf));
+ EXPECT_FP_EQ_ALL_ROUNDING(aNaN, LIBC_NAMESPACE::tan(neg_inf));
+ EXPECT_FP_EQ_ALL_ROUNDING(zero, LIBC_NAMESPACE::tan(zero));
+ EXPECT_FP_EQ_ALL_ROUNDING(neg_zero, LIBC_NAMESPACE::tan(neg_zero));
+ EXPECT_FP_EQ(0x1.0p-50, LIBC_NAMESPACE::tan(0x1.0p-50));
+ EXPECT_FP_EQ(min_normal, LIBC_NAMESPACE::tan(min_normal));
+ EXPECT_FP_EQ(min_denormal, LIBC_NAMESPACE::tan(min_denormal));
+}
diff --git a/libc/test/src/math/tan_test.cpp b/libc/test/src/math/tan_test.cpp
index d813dccc38369..e9e3e59f4d12d 100644
--- a/libc/test/src/math/tan_test.cpp
+++ b/libc/test/src/math/tan_test.cpp
@@ -6,27 +6,116 @@
//
//===----------------------------------------------------------------------===//
+#include "src/__support/FPUtil/FPBits.h"
#include "src/math/tan.h"
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
-#include "hdr/math_macros.h"
-
using LlvmLibcTanTest = LIBC_NAMESPACE::testing::FPTest<double>;
namespace mpfr = LIBC_NAMESPACE::testing::mpfr;
-TEST_F(LlvmLibcTanTest, Range) {
- static constexpr double _2pi = 6.283185307179586;
- constexpr StorageType COUNT = 100'000;
- constexpr StorageType STEP = STORAGE_MAX / COUNT;
- for (StorageType i = 0, v = 0; i <= COUNT; ++i, v += STEP) {
- double x = FPBits(v).get_val();
- // TODO: Expand the range of testing after range reduction is implemented.
- if (isnan(x) || isinf(x) || x > _2pi || x < -_2pi)
- continue;
-
- ASSERT_MPFR_MATCH(mpfr::Operation::Tan, x, LIBC_NAMESPACE::tan(x), 1.0);
+using LIBC_NAMESPACE::testing::tlog;
+
+TEST_F(LlvmLibcTanTest, TrickyInputs) {
+ constexpr double INPUTS[] = {
+ 0x1.d130383d17321p-27, 0x1.8000000000009p-23, 0x1.8000000000024p-22,
+ 0x1.800000000009p-21, 0x1.20000000000f3p-20, 0x1.800000000024p-20,
+ 0x1.e0000000001c2p-20, 0x1.0da8cc189b47dp-10, 0x1.00a33764a0a83p-7,
+ 0x1.911a18779813fp-7, 0x1.940c877fb7dacp-7, 0x1.f42fb19b5b9b2p-6,
+ 0x1.0285070f9f1bcp-5, 0x1.6ca9ef729af76p-1, 0x1.23f40dccdef72p+0,
+ 0x1.43cf16358c9d7p+0, 0x1.addf3b9722265p+0, 0x1.ae78d360afa15p+0,
+ 0x1.fe81868fc47fep+1, 0x1.e31b55306f22cp+2, 0x1.e639103a05997p+2,
+ 0x1.f7898d5a756ddp+2, 0x1.1685973506319p+3, 0x1.5f09cad750ab1p+3,
+ 0x1.aaf85537ea4c7p+3, 0x1.4f2b874135d27p+4, 0x1.13114266f9764p+4,
+ 0x1.a211877de55dbp+4, 0x1.a5eece87e8606p+4, 0x1.a65d441ea6dcep+4,
+ 0x1.1ffb509f3db15p+5, 0x1.2345d1e090529p+5, 0x1.c96e28eb679f8p+5,
+ 0x1.da1838053b866p+5, 0x1.be886d9c2324dp+6, 0x1.ab514bfc61c76p+7,
+ 0x1.14823229799c2p+7, 0x1.48ff1782ca91dp+8, 0x1.dcbfda0c7559ep+8,
+ 0x1.dcbfda0c7559ep+8, 0x1.2e566149bf5fdp+9, 0x1.cb996c60f437ep+9,
+ 0x1.119471e9216cdp+10, 0x1.ae945054939c2p+10, 0x1.fffffffffdb6p+24,
+ 0x1.fd4da4ef37075p+29, 0x1.55202aefde314p+31, 0x1.b951f1572eba5p+31,
+ 0x1.76e86a7485a46p59, 0x1.7776c2343ba4ep+101, 0x1.85fc0f04c0128p+101,
+ 0x1.678309fa50d58p+110, 0x1.524489232dc4ap+178, 0x1.fffffffffef4ep+199,
+ 0x1.6deb37da81129p+205, 0x1.3eec5912ea7cdp+331, 0x1.08087e9aad90bp+887,
+ 0x1.6ac5b262ca1ffp+843, 0x1.8bb5847d49973p+845, 0x1.6ac5b262ca1ffp+849,
+ 0x1.f08b14e1c4d0fp+890, 0x1.2b5fe88a9d8d5p+903, 0x1.a880417b7b119p+1023,
+ 0x1.f6d7518808571p+1023,
+ };
+ constexpr int N = sizeof(INPUTS) / sizeof(INPUTS[0]);
+
+ for (int i = 0; i < N; ++i) {
+ double x = INPUTS[i];
+ ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tan, x,
+ LIBC_NAMESPACE::tan(x), 0.5);
+ ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tan, -x,
+ LIBC_NAMESPACE::tan(-x), 0.5);
}
}
+
+TEST_F(LlvmLibcTanTest, InDoubleRange) {
+ constexpr uint64_t COUNT = 1'234'51;
+ uint64_t START = LIBC_NAMESPACE::fputil::FPBits<double>(0x1.0p-50).uintval();
+ uint64_t STOP = LIBC_NAMESPACE::fputil::FPBits<double>(0x1.0p200).uintval();
+ uint64_t STEP = (STOP - START) / COUNT;
+
+ auto test = [&](mpfr::RoundingMode rounding_mode) {
+ mpfr::ForceRoundingMode force_rounding(rounding_mode);
+ if (!force_rounding.success)
+ return;
+
+ uint64_t fails = 0;
+ uint64_t tested = 0;
+ uint64_t total = 0;
+ double worst_input, worst_output = 0.0;
+ double ulp = 0.5;
+
+ for (uint64_t i = 0, v = START; i <= COUNT; ++i, v += STEP) {
+ double x = FPBits(v).get_val();
+ if (isnan(x) || isinf(x))
+ continue;
+
+ double result = LIBC_NAMESPACE::tan(x);
+ ++total;
+ if (isnan(result) || isinf(result))
+ continue;
+
+ ++tested;
+
+ if (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Tan, x, result,
+ 0.5, rounding_mode)) {
+ ++fails;
+ while (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Tan, x,
+ result, ulp, rounding_mode)) {
+ worst_input = x;
+ worst_output = result;
+
+ if (ulp > 1000.0)
+ break;
+
+ ulp *= 2.0;
+ }
+ }
+ }
+ if (fails) {
+ tlog << " Tan failed: " << fails << "/" << tested << "/" << total
+ << " tests.\n";
+ tlog << " Max ULPs is at most: " << static_cast<uint64_t>(ulp) << ".\n";
+ EXPECT_MPFR_MATCH(mpfr::Operation::Tan, worst_input, worst_output, 0.5,
+ rounding_mode);
+ }
+ };
+
+ tlog << " Test Rounding To Nearest...\n";
+ test(mpfr::RoundingMode::Nearest);
+
+ tlog << " Test Rounding Downward...\n";
+ test(mpfr::RoundingMode::Downward);
+
+ tlog << " Test Rounding Upward...\n";
+ test(mpfr::RoundingMode::Upward);
+
+ tlog << " Test Rounding Toward Zero...\n";
+ test(mpfr::RoundingMode::TowardZero);
+}
>From 10b43f429a9f5cc7315c5b0490433647c9a93493 Mon Sep 17 00:00:00 2001
From: Krystian Stasiowski <sdkrystian at gmail.com>
Date: Wed, 3 Jul 2024 18:19:58 -0400
Subject: [PATCH 188/246] [Clang][Sema] Correctly transform dependent operands
of overloaded binary operator& (#97596)
Currently, `TreeTransform::TransformCXXOperatorCallExpr` calls
`TreeTransform::TransformAddressOfOperand` to transform the first
operand of a `CXXOperatorCallExpr` when its `OverloadOperatorKind` is
`OO_Amp` -- regardless of arity. This results in the first operand of
binary `operator&` being incorrectly transformed as if it was the
operand of the address of operator in cases such as the following:
```
struct A {
int x;
};
void operator&(A, A);
template<typename T>
struct B {
int f() {
return T::x & 1; // invalid reference to 'A::x' is not diagnosed because 'T::x' is incorrectly transformed as if it was the operand of unary operator&
}
};
template struct B<A>;
```
Prior to #92318 we would build a `CXXDependentScopeMemberExpr` for
`T::x` (as with most dependent qualified names that were not member
qualified names). Since `TreeTransform::TransformAddressOfOperand` only
differs from `TransformExpr` for `DependentScopeDeclRefExpr` and
`UnresolvedLookupExpr` operands, `T::x` was transformed "correctly". Now
that we build a `DependentScopeDeclRefExpr` for `T::x`, it is
incorrectly transformed as if it was the operand of the address of
operator and we fail to diagnose the invalid reference to a non-static
data member. This patch fixes the issue by only calling
`TreeTransform::TransformAddressOfOperand` for `CXXOperatorCallExpr`s
with a single operand. This fixes #97483.
---
clang/docs/ReleaseNotes.rst | 2 ++
clang/lib/Sema/TreeTransform.h | 2 +-
.../expr.prim.id/expr.prim.id.general/p4.cpp | 16 ++++++++++++++++
3 files changed, 19 insertions(+), 1 deletion(-)
create mode 100644 clang/test/CXX/expr/expr.prim/expr.prim.id/expr.prim.id.general/p4.cpp
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index f40fd1cd145bb..f149684214567 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -954,6 +954,8 @@ Bug Fixes to C++ Support
forward-declared class. (#GH93512).
- Fixed a bug in access checking inside return-type-requirement of compound requirements. (#GH93788).
- Fixed an assertion failure about invalid conversion when calling lambda. (#GH96205).
+- Fixed a bug where the first operand of binary ``operator&`` would be transformed as if it was the operand
+ of the address of operator. (#GH97483).
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 51ba22f99e3a3..4450ebaf615cd 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -12919,7 +12919,7 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
}
ExprResult First;
- if (E->getOperator() == OO_Amp)
+ if (E->getNumArgs() == 1 && E->getOperator() == OO_Amp)
First = getDerived().TransformAddressOfOperand(E->getArg(0));
else
First = getDerived().TransformExpr(E->getArg(0));
diff --git a/clang/test/CXX/expr/expr.prim/expr.prim.id/expr.prim.id.general/p4.cpp b/clang/test/CXX/expr/expr.prim/expr.prim.id/expr.prim.id.general/p4.cpp
new file mode 100644
index 0000000000000..e6d9c171e3893
--- /dev/null
+++ b/clang/test/CXX/expr/expr.prim/expr.prim.id/expr.prim.id.general/p4.cpp
@@ -0,0 +1,16 @@
+// RUN: %clang_cc1 -verify %s
+
+struct A {
+ int x;
+};
+
+void operator&(A, A);
+
+template<typename T>
+struct B {
+ int f() {
+ return T::x & 1; // expected-error {{invalid use of non-static data member 'x'}}
+ }
+};
+
+template struct B<A>; // expected-note {{in instantiation of}}
>From af7ee51a90beeb8198ceac0100c52b102543cf4b Mon Sep 17 00:00:00 2001
From: Jeff Niu <jeff at modular.com>
Date: Wed, 3 Jul 2024 15:32:45 -0700
Subject: [PATCH 189/246] [mlir][bytecode] Fix external resource bytecode
parsing (#97650)
The key was being dropped for external resources because they aren't
present in the dialect resource name mapper.
---
mlir/lib/Bytecode/Reader/BytecodeReader.cpp | 2 +-
mlir/test/Bytecode/external_resources.mlir | 19 +++++++++++++++++++
2 files changed, 20 insertions(+), 1 deletion(-)
create mode 100644 mlir/test/Bytecode/external_resources.mlir
diff --git a/mlir/lib/Bytecode/Reader/BytecodeReader.cpp b/mlir/lib/Bytecode/Reader/BytecodeReader.cpp
index f767740a8d177..1064896a1f714 100644
--- a/mlir/lib/Bytecode/Reader/BytecodeReader.cpp
+++ b/mlir/lib/Bytecode/Reader/BytecodeReader.cpp
@@ -706,7 +706,7 @@ LogicalResult ResourceSectionReader::initialize(
auto resolveKey = [&](StringRef key) -> StringRef {
auto it = dialectResourceHandleRenamingMap.find(key);
if (it == dialectResourceHandleRenamingMap.end())
- return "";
+ return key;
return it->second;
};
diff --git a/mlir/test/Bytecode/external_resources.mlir b/mlir/test/Bytecode/external_resources.mlir
new file mode 100644
index 0000000000000..df064d7795338
--- /dev/null
+++ b/mlir/test/Bytecode/external_resources.mlir
@@ -0,0 +1,19 @@
+// RUN: mlir-opt %s -emit-bytecode | mlir-opt | FileCheck %s
+
+module {
+}
+
+{-#
+ // CHECK: external_resources
+ external_resources: {
+ // CHECK-NEXT: mlir_reproducer
+ mlir_reproducer: {
+ // CHECK-NEXT: pipeline: "builtin.module(func.func(canonicalize,cse))",
+ pipeline: "builtin.module(func.func(canonicalize,cse))",
+ // CHECK-NEXT: disable_threading: true
+ disable_threading: true,
+ // CHECK-NEXT: verify_each: true
+ verify_each: true
+ }
+ }
+#-}
>From eaabd762bd54dec75d8fee69f6aa1c26d1cbc68f Mon Sep 17 00:00:00 2001
From: Han-Chung Wang <hanhan0912 at gmail.com>
Date: Wed, 3 Jul 2024 16:03:41 -0700
Subject: [PATCH 190/246] Revert "[MLIR][Vector] Generalize
DropUnitDimFromElementwiseOps to non leading / trailing dimensions." (#97652)
Reverts llvm/llvm-project#92934 because it breaks some lowering. To
repro: `mlir-opt -test-vector-transfer-flatten-patterns ~/repro.mlir`
```mlir
func.func @unit_dim_folding(%arg0: vector<1x1xf32>) -> vector<1x1xf32> {
%cst = arith.constant dense<0.000000e+00> : vector<1x1xf32>
%0 = arith.mulf %arg0, %cst : vector<1x1xf32>
return %0 : vector<1x1xf32>
}
```
---
.../Vector/Transforms/VectorTransforms.cpp | 55 +++++++++----------
.../Vector/vector-transfer-flatten.mlir | 36 ------------
2 files changed, 26 insertions(+), 65 deletions(-)
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index c7d3022eff4d3..da5954b70a2ec 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -1622,27 +1622,7 @@ struct ChainedReduction final : OpRewritePattern<vector::ReductionOp> {
}
};
-// Scalable unit dimensions are not supported. Folding such dimensions would
-// require "shifting" the scalable flag onto some other fixed-width dim (e.g.
-// vector<[1]x4xf32> -> vector<[4]xf32>). This could be implemented in the
-// future.
-static VectorType dropNonScalableUnitDimFromType(VectorType inVecTy) {
- auto inVecShape = inVecTy.getShape();
- SmallVector<int64_t> newShape;
- SmallVector<bool> newScalableDims;
- for (auto [dim, isScalable] :
- llvm::zip_equal(inVecShape, inVecTy.getScalableDims())) {
- if (dim == 1 && !isScalable)
- continue;
-
- newShape.push_back(dim);
- newScalableDims.push_back(isScalable);
- }
-
- return VectorType::get(newShape, inVecTy.getElementType(), newScalableDims);
-}
-
-/// For vectors with at least an unit dim, replaces:
+/// For vectors with either leading or trailing unit dim, replaces:
/// elementwise(a, b)
/// with:
/// sc_a = shape_cast(a)
@@ -1654,16 +1634,20 @@ static VectorType dropNonScalableUnitDimFromType(VectorType inVecTy) {
/// required to be rank > 1.
///
/// Ex:
+/// ```
/// %mul = arith.mulf %B_row, %A_row : vector<1x[4]xf32>
/// %cast = vector.shape_cast %mul : vector<1x[4]xf32> to vector<[4]xf32>
+/// ```
///
/// gets converted to:
///
+/// ```
/// %B_row_sc = vector.shape_cast %B_row : vector<1x[4]xf32> to vector<[4]xf32>
/// %A_row_sc = vector.shape_cast %A_row : vector<1x[4]xf32> to vector<[4]xf32>
/// %mul = arith.mulf %B_row_sc, %A_row_sc : vector<[4]xf32>
/// %cast_new = vector.shape_cast %mul : vector<[4]xf32> to vector<1x[4]xf32>
/// %cast = vector.shape_cast %cast_new : vector<1x[4]xf32> to vector<[4]xf32>
+/// ```
///
/// Patterns for folding shape_casts should instantly eliminate `%cast_new` and
/// `%cast`.
@@ -1683,29 +1667,42 @@ struct DropUnitDimFromElementwiseOps final
// guaranteed to have identical shapes (with some exceptions such as
// `arith.select`) and it suffices to only check one of them.
auto sourceVectorType = dyn_cast<VectorType>(op->getOperand(0).getType());
- if (!sourceVectorType || sourceVectorType.getRank() < 2)
+ if (!sourceVectorType)
+ return failure();
+ if (sourceVectorType.getRank() < 2)
+ return failure();
+
+ bool hasTrailingDimUnitFixed =
+ ((sourceVectorType.getShape().back() == 1) &&
+ (!sourceVectorType.getScalableDims().back()));
+ bool hasLeadingDimUnitFixed =
+ ((sourceVectorType.getShape().front() == 1) &&
+ (!sourceVectorType.getScalableDims().front()));
+ if (!hasLeadingDimUnitFixed && !hasTrailingDimUnitFixed)
return failure();
+ // Drop leading/trailing unit dim by applying vector.shape_cast to all
+ // operands
+ int64_t dim = hasLeadingDimUnitFixed ? 0 : sourceVectorType.getRank() - 1;
+
SmallVector<Value> newOperands;
auto loc = op->getLoc();
for (auto operand : op->getOperands()) {
auto opVectorType = cast<VectorType>(operand.getType());
- auto newVType = dropNonScalableUnitDimFromType(opVectorType);
- if (newVType == opVectorType)
- return rewriter.notifyMatchFailure(op, "No unit dimension to remove.");
-
+ VectorType newVType = VectorType::Builder(opVectorType).dropDim(dim);
auto opSC = rewriter.create<vector::ShapeCastOp>(loc, newVType, operand);
newOperands.push_back(opSC);
}
VectorType newResultVectorType =
- dropNonScalableUnitDimFromType(resultVectorType);
- // Create an updated elementwise Op without unit dim.
+ VectorType::Builder(resultVectorType).dropDim(dim);
+ // Create an updated elementwise Op without leading/trailing unit dim
Operation *elementwiseOp =
rewriter.create(loc, op->getName().getIdentifier(), newOperands,
newResultVectorType, op->getAttrs());
- // Restore the unit dim by applying vector.shape_cast to the result.
+ // Restore the leading/trailing unit dim by applying vector.shape_cast
+ // to the result
rewriter.replaceOpWithNewOp<ShapeCastOp>(op, resultVectorType,
elementwiseOp->getResult(0));
diff --git a/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir b/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
index 3a5041fca53fc..5fd3cbd54aa58 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
@@ -604,42 +604,6 @@ func.func @fold_unit_dims_entirely(%arg0 : vector<8xi32>,
// -----
-func.func @fold_inner_unit_dim(%arg0 : vector<8x1x3xf128>,
- %arg1 : vector<1x8x3xf128>) -> vector<8x3xf128> {
- %sc_arg1 = vector.shape_cast %arg1 : vector<1x8x3xf128> to vector<8x1x3xf128>
- %mul = arith.mulf %arg0, %sc_arg1 : vector<8x1x3xf128>
- %res = vector.shape_cast %mul : vector<8x1x3xf128> to vector<8x3xf128>
- return %res : vector<8x3xf128>
-}
-
-// CHECK-LABEL: func.func @fold_inner_unit_dim(
-// CHECK-SAME: %[[VAL_0:.*]]: vector<8x1x3xf128>,
-// CHECK-SAME: %[[VAL_1:.*]]: vector<1x8x3xf128>) -> vector<8x3xf128> {
-// CHECK: %[[VAL_2:.*]] = vector.shape_cast %[[VAL_0]] : vector<8x1x3xf128> to vector<8x3xf128>
-// CHECK: %[[VAL_3:.*]] = vector.shape_cast %[[VAL_1]] : vector<1x8x3xf128> to vector<8x3xf128>
-// CHECK: %[[VAL_4:.*]] = arith.mulf %[[VAL_2]], %[[VAL_3]] : vector<8x3xf128>
-// CHECK: return %[[VAL_4]] : vector<8x3xf128>
-
-// -----
-
-func.func @fold_inner_unit_dim_scalable(%arg0 : vector<8x1x[1]x3xf128>,
- %arg1 : vector<1x8x[1]x3xf128>) -> vector<8x[1]x3xf128> {
- %sc_arg1 = vector.shape_cast %arg1 : vector<1x8x[1]x3xf128> to vector<8x1x[1]x3xf128>
- %mul = arith.mulf %arg0, %sc_arg1 : vector<8x1x[1]x3xf128>
- %res = vector.shape_cast %mul : vector<8x1x[1]x3xf128> to vector<8x[1]x3xf128>
- return %res : vector<8x[1]x3xf128>
-}
-
-// CHECK-LABEL: func.func @fold_inner_unit_dim_scalable(
-// CHECK-SAME: %[[VAL_0:.*]]: vector<8x1x[1]x3xf128>,
-// CHECK-SAME: %[[VAL_1:.*]]: vector<1x8x[1]x3xf128>) -> vector<8x[1]x3xf128> {
-// CHECK: %[[VAL_2:.*]] = vector.shape_cast %[[VAL_0]] : vector<8x1x[1]x3xf128> to vector<8x[1]x3xf128>
-// CHECK: %[[VAL_3:.*]] = vector.shape_cast %[[VAL_1]] : vector<1x8x[1]x3xf128> to vector<8x[1]x3xf128>
-// CHECK: %[[VAL_4:.*]] = arith.mulf %[[VAL_2]], %[[VAL_3]] : vector<8x[1]x3xf128>
-// CHECK: return %[[VAL_4]] : vector<8x[1]x3xf128>
-
-// -----
-
func.func @negative_out_of_bound_transfer_read(
%arg : memref<?x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>>) -> vector<5x4x3x2xi8> {
%c0 = arith.constant 0 : index
>From d3a76b03d8f35903bc17cb85c14c9816c963cff6 Mon Sep 17 00:00:00 2001
From: Jon Roelofs <jonathan_roelofs at apple.com>
Date: Wed, 3 Jul 2024 16:25:32 -0700
Subject: [PATCH 191/246] [llvm][SLPVectorizer] Fix a bad cast assertion
(#97621)
Fixes: rdar://128092379
---
.../Transforms/Vectorize/SLPVectorizer.cpp | 83 ++++++++++++-------
.../Transforms/SLPVectorizer/rdar128092379.ll | 39 +++++++++
2 files changed, 90 insertions(+), 32 deletions(-)
create mode 100644 llvm/test/Transforms/SLPVectorizer/rdar128092379.ll
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 11f9ad70dc725..64c5fb49ec85c 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -361,12 +361,14 @@ static bool isCommutative(Instruction *I) {
return I->isCommutative();
}
-/// \returns inserting index of InsertElement or InsertValue instruction,
-/// using Offset as base offset for index.
-static std::optional<unsigned> getInsertIndex(const Value *InsertInst,
- unsigned Offset = 0) {
+template <typename T>
+static std::optional<unsigned> getInsertExtractIndex(const Value *Inst,
+ unsigned Offset) {
+ static_assert(std::is_same_v<T, InsertElementInst> ||
+ std::is_same_v<T, ExtractElementInst>,
+ "unsupported T");
int Index = Offset;
- if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) {
+ if (const auto *IE = dyn_cast<T>(Inst)) {
const auto *VT = dyn_cast<FixedVectorType>(IE->getType());
if (!VT)
return std::nullopt;
@@ -379,8 +381,25 @@ static std::optional<unsigned> getInsertIndex(const Value *InsertInst,
Index += CI->getZExtValue();
return Index;
}
+ return std::nullopt;
+}
+
+/// \returns inserting or extracting index of InsertElement, ExtractElement or
+/// InsertValue instruction, using Offset as base offset for index.
+/// \returns std::nullopt if the index is not an immediate.
+static std::optional<unsigned> getElementIndex(const Value *Inst,
+ unsigned Offset = 0) {
+ if (auto Index = getInsertExtractIndex<InsertElementInst>(Inst, Offset))
+ return Index;
+ if (auto Index = getInsertExtractIndex<ExtractElementInst>(Inst, Offset))
+ return Index;
+
+ int Index = Offset;
+
+ const auto *IV = dyn_cast<InsertValueInst>(Inst);
+ if (!IV)
+ return std::nullopt;
- const auto *IV = cast<InsertValueInst>(InsertInst);
Type *CurrentType = IV->getType();
for (unsigned I : IV->indices()) {
if (const auto *ST = dyn_cast<StructType>(CurrentType)) {
@@ -454,7 +473,7 @@ static SmallBitVector isUndefVector(const Value *V,
Base = II->getOperand(0);
if (isa<T>(II->getOperand(1)))
continue;
- std::optional<unsigned> Idx = getInsertIndex(II);
+ std::optional<unsigned> Idx = getElementIndex(II);
if (!Idx) {
Res.reset();
return Res;
@@ -4718,8 +4737,8 @@ static bool areTwoInsertFromSameBuildVector(
return false;
auto *IE1 = VU;
auto *IE2 = V;
- std::optional<unsigned> Idx1 = getInsertIndex(IE1);
- std::optional<unsigned> Idx2 = getInsertIndex(IE2);
+ std::optional<unsigned> Idx1 = getElementIndex(IE1);
+ std::optional<unsigned> Idx2 = getElementIndex(IE2);
if (Idx1 == std::nullopt || Idx2 == std::nullopt)
return false;
// Go through the vector operand of insertelement instructions trying to find
@@ -4734,7 +4753,7 @@ static bool areTwoInsertFromSameBuildVector(
if (IE1 == V && !IE2)
return V->hasOneUse();
if (IE1 && IE1 != V) {
- unsigned Idx1 = getInsertIndex(IE1).value_or(*Idx2);
+ unsigned Idx1 = getElementIndex(IE1).value_or(*Idx2);
IsReusedIdx |= ReusedIdx.test(Idx1);
ReusedIdx.set(Idx1);
if ((IE1 != VU && !IE1->hasOneUse()) || IsReusedIdx)
@@ -4743,7 +4762,7 @@ static bool areTwoInsertFromSameBuildVector(
IE1 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE1));
}
if (IE2 && IE2 != VU) {
- unsigned Idx2 = getInsertIndex(IE2).value_or(*Idx1);
+ unsigned Idx2 = getElementIndex(IE2).value_or(*Idx1);
IsReusedIdx |= ReusedIdx.test(Idx2);
ReusedIdx.set(Idx2);
if ((IE2 != V && !IE2->hasOneUse()) || IsReusedIdx)
@@ -4902,13 +4921,13 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
IE1, IE2,
[](InsertElementInst *II) { return II->getOperand(0); }))
return I1 < I2;
- return getInsertIndex(IE1) < getInsertIndex(IE2);
+ return getElementIndex(IE1) < getElementIndex(IE2);
}
if (auto *EE1 = dyn_cast<ExtractElementInst>(FirstUserOfPhi1))
if (auto *EE2 = dyn_cast<ExtractElementInst>(FirstUserOfPhi2)) {
if (EE1->getOperand(0) != EE2->getOperand(0))
return I1 < I2;
- return getInsertIndex(EE1) < getInsertIndex(EE2);
+ return getElementIndex(EE1) < getElementIndex(EE2);
}
return I1 < I2;
};
@@ -6162,7 +6181,7 @@ BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
ValueSet SourceVectors;
for (Value *V : VL) {
SourceVectors.insert(cast<Instruction>(V)->getOperand(0));
- assert(getInsertIndex(V) != std::nullopt &&
+ assert(getElementIndex(V) != std::nullopt &&
"Non-constant or undef index?");
}
@@ -6929,7 +6948,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
decltype(OrdCompare)>
Indices(OrdCompare);
for (int I = 0, E = VL.size(); I < E; ++I) {
- unsigned Idx = *getInsertIndex(VL[I]);
+ unsigned Idx = *getElementIndex(VL[I]);
Indices.emplace(Idx, I);
}
OrdersType CurrentOrder(VL.size(), VL.size());
@@ -9308,11 +9327,11 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy);
SmallVector<int> InsertMask(NumElts, PoisonMaskElem);
- unsigned OffsetBeg = *getInsertIndex(VL.front());
+ unsigned OffsetBeg = *getElementIndex(VL.front());
unsigned OffsetEnd = OffsetBeg;
InsertMask[OffsetBeg] = 0;
for (auto [I, V] : enumerate(VL.drop_front())) {
- unsigned Idx = *getInsertIndex(V);
+ unsigned Idx = *getElementIndex(V);
if (OffsetBeg > Idx)
OffsetBeg = Idx;
else if (OffsetEnd < Idx)
@@ -9353,7 +9372,7 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
SmallVector<int> PrevMask(InsertVecSz, PoisonMaskElem);
Mask.swap(PrevMask);
for (unsigned I = 0; I < NumScalars; ++I) {
- unsigned InsertIdx = *getInsertIndex(VL[PrevMask[I]]);
+ unsigned InsertIdx = *getElementIndex(VL[PrevMask[I]]);
DemandedElts.setBit(InsertIdx);
IsIdentity &= InsertIdx - OffsetBeg == I;
Mask[InsertIdx - OffsetBeg] = I;
@@ -10103,8 +10122,8 @@ static bool isFirstInsertElement(const InsertElementInst *IE1,
const auto *I2 = IE2;
const InsertElementInst *PrevI1;
const InsertElementInst *PrevI2;
- unsigned Idx1 = *getInsertIndex(IE1);
- unsigned Idx2 = *getInsertIndex(IE2);
+ unsigned Idx1 = *getElementIndex(IE1);
+ unsigned Idx2 = *getElementIndex(IE2);
do {
if (I2 == IE1)
return true;
@@ -10113,10 +10132,10 @@ static bool isFirstInsertElement(const InsertElementInst *IE1,
PrevI1 = I1;
PrevI2 = I2;
if (I1 && (I1 == IE1 || I1->hasOneUse()) &&
- getInsertIndex(I1).value_or(Idx2) != Idx2)
+ getElementIndex(I1).value_or(Idx2) != Idx2)
I1 = dyn_cast<InsertElementInst>(I1->getOperand(0));
if (I2 && ((I2 == IE2 || I2->hasOneUse())) &&
- getInsertIndex(I2).value_or(Idx1) != Idx1)
+ getElementIndex(I2).value_or(Idx1) != Idx1)
I2 = dyn_cast<InsertElementInst>(I2->getOperand(0));
} while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2));
llvm_unreachable("Two different buildvectors not expected.");
@@ -10308,7 +10327,7 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) {
if (!UsedInserts.insert(VU).second)
continue;
- std::optional<unsigned> InsertIdx = getInsertIndex(VU);
+ std::optional<unsigned> InsertIdx = getElementIndex(VU);
if (InsertIdx) {
const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar);
auto *It = find_if(
@@ -10334,14 +10353,14 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) {
if (IEBase != EU.User &&
(!IEBase->hasOneUse() ||
- getInsertIndex(IEBase).value_or(*InsertIdx) == *InsertIdx))
+ getElementIndex(IEBase).value_or(*InsertIdx) == *InsertIdx))
break;
// Build the mask for the vectorized insertelement instructions.
if (const TreeEntry *E = getTreeEntry(IEBase)) {
VU = IEBase;
do {
IEBase = cast<InsertElementInst>(Base);
- int Idx = *getInsertIndex(IEBase);
+ int Idx = *getElementIndex(IEBase);
assert(Mask[Idx] == PoisonMaskElem &&
"InsertElementInstruction used already.");
Mask[Idx] = Idx;
@@ -12755,7 +12774,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
cast<FixedVectorType>(FirstInsert->getType())->getNumElements();
const unsigned NumScalars = E->Scalars.size();
- unsigned Offset = *getInsertIndex(VL0);
+ unsigned Offset = *getElementIndex(VL0);
assert(Offset < NumElts && "Failed to find vector index offset");
// Create shuffle to resize vector
@@ -12773,7 +12792,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
Mask.swap(PrevMask);
for (unsigned I = 0; I < NumScalars; ++I) {
Value *Scalar = E->Scalars[PrevMask[I]];
- unsigned InsertIdx = *getInsertIndex(Scalar);
+ unsigned InsertIdx = *getElementIndex(Scalar);
IsIdentity &= InsertIdx - Offset == I;
Mask[InsertIdx - Offset] = I;
}
@@ -12786,7 +12805,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
// sequence.
InsertElementInst *Ins = cast<InsertElementInst>(VL0);
do {
- std::optional<unsigned> InsertIdx = getInsertIndex(Ins);
+ std::optional<unsigned> InsertIdx = getElementIndex(Ins);
if (!InsertIdx)
break;
if (InsertMask[*InsertIdx] == PoisonMaskElem)
@@ -13835,7 +13854,7 @@ Value *BoUpSLP::vectorizeTree(
}
}
- std::optional<unsigned> InsertIdx = getInsertIndex(VU);
+ std::optional<unsigned> InsertIdx = getElementIndex(VU);
if (InsertIdx) {
auto *It =
find_if(ShuffledInserts, [VU](const ShuffledInsertData &Data) {
@@ -13858,13 +13877,13 @@ Value *BoUpSLP::vectorizeTree(
while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) {
if (IEBase != User &&
(!IEBase->hasOneUse() ||
- getInsertIndex(IEBase).value_or(Idx) == Idx))
+ getElementIndex(IEBase).value_or(Idx) == Idx))
break;
// Build the mask for the vectorized insertelement instructions.
if (const TreeEntry *E = getTreeEntry(IEBase)) {
do {
IEBase = cast<InsertElementInst>(Base);
- int IEIdx = *getInsertIndex(IEBase);
+ int IEIdx = *getElementIndex(IEBase);
assert(Mask[IEIdx] == PoisonMaskElem &&
"InsertElementInstruction used already.");
Mask[IEIdx] = IEIdx;
@@ -17822,7 +17841,7 @@ static void findBuildAggregate_rec(Instruction *LastInsertInst,
do {
Value *InsertedOperand = LastInsertInst->getOperand(1);
std::optional<unsigned> OperandIndex =
- getInsertIndex(LastInsertInst, OperandOffset);
+ getElementIndex(LastInsertInst, OperandOffset);
if (!OperandIndex)
return;
if (isa<InsertElementInst, InsertValueInst>(InsertedOperand)) {
diff --git a/llvm/test/Transforms/SLPVectorizer/rdar128092379.ll b/llvm/test/Transforms/SLPVectorizer/rdar128092379.ll
new file mode 100644
index 0000000000000..15808a545f8f2
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/rdar128092379.ll
@@ -0,0 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=slp-vectorizer < %s -o - -S | FileCheck %s
+
+target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx15.4.0"
+
+define fastcc i32 @rdar128092379(i8 %index) {
+; CHECK-LABEL: define fastcc i32 @rdar128092379(
+; CHECK-SAME: i8 [[INDEX:%.*]]) {
+; CHECK-NEXT: [[BLOCK:.*]]:
+; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[INDEX]] to i64
+; CHECK-NEXT: [[ZEXT1:%.*]] = zext i8 [[INDEX]] to i64
+; CHECK-NEXT: br label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK2:.*]]:
+; CHECK-NEXT: br label %[[BLOCK3]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ 0, %[[BLOCK2]] ], [ [[ZEXT1]], %[[BLOCK]] ]
+; CHECK-NEXT: [[PHI4:%.*]] = phi i64 [ 0, %[[BLOCK2]] ], [ [[ZEXT]], %[[BLOCK]] ]
+; CHECK-NEXT: [[EXTRACTELEMENT:%.*]] = extractelement <16 x i32> zeroinitializer, i64 [[PHI4]]
+; CHECK-NEXT: [[EXTRACTELEMENT5:%.*]] = extractelement <16 x i32> zeroinitializer, i64 [[PHI]]
+; CHECK-NEXT: [[SUM:%.*]] = add i32 [[EXTRACTELEMENT]], [[EXTRACTELEMENT5]]
+; CHECK-NEXT: ret i32 [[SUM]]
+;
+block:
+ %zext = zext i8 %index to i64
+ %zext1 = zext i8 %index to i64
+ br label %block3
+
+block2:
+ br label %block3
+
+block3:
+ %phi = phi i64 [ 0, %block2 ], [ %zext1, %block ]
+ %phi4 = phi i64 [ 0, %block2 ], [ %zext, %block ]
+ %extractelement = extractelement <16 x i32> zeroinitializer, i64 %phi4
+ %extractelement5 = extractelement <16 x i32> zeroinitializer, i64 %phi
+ %sum = add i32 %extractelement, %extractelement5
+ ret i32 %sum
+}
>From 3eebeb7e50c49ece2788ff4d5ffab5cc6c3da455 Mon Sep 17 00:00:00 2001
From: PiJoules <6019989+PiJoules at users.noreply.github.com>
Date: Wed, 3 Jul 2024 16:26:00 -0700
Subject: [PATCH 192/246] [libc] Add aligned_alloc (#96586)
This adds support for aligned_alloc with the freelist allocator. This
works by finding blocks large enough to hold the requested size plus
some shift amount that's at most the requested alignment. Blocks that
meet this requirement but aren't properly aligned can be split such that
the usable_space of a new block is aligned properly. The "padding" block
created will be merged with the previous block if one exists.
---
libc/src/__support/block.h | 121 +++++++++++
libc/src/__support/freelist.h | 18 ++
libc/src/__support/freelist_heap.h | 52 ++++-
libc/src/stdlib/aligned_alloc.h | 20 ++
libc/src/stdlib/freelist_malloc.cpp | 5 +
libc/test/src/__support/CMakeLists.txt | 1 +
libc/test/src/__support/block_test.cpp | 197 ++++++++++++++++++
.../test/src/__support/freelist_heap_test.cpp | 80 ++++++-
.../src/__support/freelist_malloc_test.cpp | 18 ++
9 files changed, 501 insertions(+), 11 deletions(-)
create mode 100644 libc/src/stdlib/aligned_alloc.h
diff --git a/libc/src/__support/block.h b/libc/src/__support/block.h
index 580f20e1ec4a4..026ea9063f416 100644
--- a/libc/src/__support/block.h
+++ b/libc/src/__support/block.h
@@ -16,6 +16,7 @@
#include "src/__support/CPP/optional.h"
#include "src/__support/CPP/span.h"
#include "src/__support/CPP/type_traits.h"
+#include "src/__support/libc_assert.h"
#include <stdint.h>
@@ -261,6 +262,63 @@ class Block {
constexpr Block(size_t prev_outer_size, size_t outer_size);
+ bool is_usable_space_aligned(size_t alignment) const {
+ return reinterpret_cast<uintptr_t>(usable_space()) % alignment == 0;
+ }
+
+ size_t padding_for_alignment(size_t alignment) const {
+ if (is_usable_space_aligned(alignment))
+ return 0;
+
+ // We need to ensure we can always split this block into a "padding" block
+ // and the aligned block. To do this, we need enough extra space for at
+ // least one block.
+ //
+ // |block |usable_space |
+ // |........|......................................|
+ // ^
+ // Alignment requirement
+ //
+ //
+ // |block |space |block |usable_space |
+ // |........|........|........|....................|
+ // ^
+ // Alignment requirement
+ //
+ uintptr_t start = reinterpret_cast<uintptr_t>(usable_space());
+ alignment = cpp::max(alignment, ALIGNMENT);
+ return align_up(start + BLOCK_OVERHEAD, alignment) - start;
+ }
+
+ // Check that we can `allocate` a block with a given alignment and size from
+ // this existing block.
+ bool can_allocate(size_t alignment, size_t size) const;
+
+ // This is the return type for `allocate` which can split one block into up to
+ // three blocks.
+ struct BlockInfo {
+ // This is the newly aligned block. It will have the alignment requested by
+ // a call to `allocate` and at most `size`.
+ Block *block;
+
+ // If the usable_space in the new block was not aligned according to the
+ // `alignment` parameter, we will need to split into this block and the
+ // `block` to ensure `block` is properly aligned. In this case, `prev` will
+ // be a pointer to this new "padding" block. `prev` will be nullptr if no
+ // new block was created or we were able to merge the block before the
+ // original block with the "padding" block.
+ Block *prev;
+
+ // This is the remainder of the next block after splitting the `block`
+ // according to `size`. This can happen if there's enough space after the
+ // `block`.
+ Block *next;
+ };
+
+ // Divide a block into up to 3 blocks according to `BlockInfo`. This should
+ // only be called if `can_allocate` returns true.
+ static BlockInfo allocate(Block *block, size_t alignment, size_t size);
+
private:
/// Consumes the block and returns as a span of bytes.
static ByteSpan as_bytes(Block *&&block);
@@ -357,6 +415,69 @@ void Block<OffsetType, kAlign>::free(Block *&block) {
merge_next(block);
}
+template <typename OffsetType, size_t kAlign>
+bool Block<OffsetType, kAlign>::can_allocate(size_t alignment,
+ size_t size) const {
+ if (is_usable_space_aligned(alignment) && inner_size() >= size)
+ return true; // Size and alignment constraints met.
+
+ // Either the alignment isn't met or we don't have enough size.
+ // If we don't meet alignment, we can always adjust such that we do meet the
+ // alignment. If we meet the alignment but just don't have enough size. This
+ // check will fail anyway.
+ size_t adjustment = padding_for_alignment(alignment);
+ return inner_size() >= size + adjustment;
+}
+
+template <typename OffsetType, size_t kAlign>
+typename Block<OffsetType, kAlign>::BlockInfo
+Block<OffsetType, kAlign>::allocate(Block *block, size_t alignment,
+ size_t size) {
+ LIBC_ASSERT(
+ block->can_allocate(alignment, size) &&
+ "Calls to this function for a given alignment and size should only be "
+ "done if `can_allocate` for these parameters returns true.");
+
+ BlockInfo info{block, /*prev=*/nullptr, /*next=*/nullptr};
+
+ if (!info.block->is_usable_space_aligned(alignment)) {
+ size_t adjustment = info.block->padding_for_alignment(alignment);
+ size_t new_inner_size = adjustment - BLOCK_OVERHEAD;
+ LIBC_ASSERT(new_inner_size % ALIGNMENT == 0 &&
+ "The adjustment calculation should always return a new size "
+ "that's a multiple of ALIGNMENT");
+
+ Block *original = info.block;
+ optional<Block *> maybe_aligned_block =
+ Block::split(original, adjustment - BLOCK_OVERHEAD);
+ LIBC_ASSERT(maybe_aligned_block.has_value() &&
+ "This split should always result in a new block. The check in "
+ "`can_allocate` ensures that we have enough space here to make "
+ "two blocks.");
+
+ if (Block *prev = original->prev()) {
+ // If there is a block before this, we can merge the current one with the
+ // newly created one.
+ merge_next(prev);
+ } else {
+ // Otherwise, this was the very first block in the chain. Now we can make
+ // it the new first block.
+ info.prev = original;
+ }
+
+ Block *aligned_block = *maybe_aligned_block;
+ LIBC_ASSERT(aligned_block->is_usable_space_aligned(alignment) &&
+ "The aligned block isn't aligned somehow.");
+ info.block = aligned_block;
+ }
+
+ // Now get a block for the requested size.
+ if (optional<Block *> next = Block::split(info.block, size))
+ info.next = *next;
+
+ return info;
+}
+
template <typename OffsetType, size_t kAlign>
optional<Block<OffsetType, kAlign> *>
Block<OffsetType, kAlign>::split(Block *&block, size_t new_inner_size) {
diff --git a/libc/src/__support/freelist.h b/libc/src/__support/freelist.h
index 0641ba93807d6..b2eb84a85dbce 100644
--- a/libc/src/__support/freelist.h
+++ b/libc/src/__support/freelist.h
@@ -66,6 +66,8 @@ template <size_t NUM_BUCKETS = 6> class FreeList {
/// A span with a size of 0.
cpp::span<cpp::byte> find_chunk(size_t size) const;
+ template <typename Cond> cpp::span<cpp::byte> find_chunk_if(Cond op) const;
+
/// Removes a chunk from this freelist.
bool remove_chunk(cpp::span<cpp::byte> chunk);
@@ -111,6 +113,22 @@ bool FreeList<NUM_BUCKETS>::add_chunk(span<cpp::byte> chunk) {
return true;
}
+template <size_t NUM_BUCKETS>
+template <typename Cond>
+span<cpp::byte> FreeList<NUM_BUCKETS>::find_chunk_if(Cond op) const {
+ for (FreeListNode *node : chunks_) {
+ while (node != nullptr) {
+ span<cpp::byte> chunk(reinterpret_cast<cpp::byte *>(node), node->size);
+ if (op(chunk))
+ return chunk;
+
+ node = node->next;
+ }
+ }
+
+ return {};
+}
+
template <size_t NUM_BUCKETS>
span<cpp::byte> FreeList<NUM_BUCKETS>::find_chunk(size_t size) const {
if (size == 0)
diff --git a/libc/src/__support/freelist_heap.h b/libc/src/__support/freelist_heap.h
index 3569baf27bdaa..3d0db544c4af0 100644
--- a/libc/src/__support/freelist_heap.h
+++ b/libc/src/__support/freelist_heap.h
@@ -24,6 +24,8 @@ namespace LIBC_NAMESPACE {
using cpp::optional;
using cpp::span;
+inline constexpr bool IsPow2(size_t x) { return x && (x & (x - 1)) == 0; }
+
static constexpr cpp::array<size_t, 6> DEFAULT_BUCKETS{16, 32, 64,
128, 256, 512};
@@ -32,6 +34,9 @@ template <size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()> class FreeListHeap {
using BlockType = Block<>;
using FreeListType = FreeList<NUM_BUCKETS>;
+ static constexpr size_t MIN_ALIGNMENT =
+ cpp::max(BlockType::ALIGNMENT, alignof(max_align_t));
+
struct HeapStats {
size_t total_bytes;
size_t bytes_allocated;
@@ -55,6 +60,9 @@ template <size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()> class FreeListHeap {
}
void *allocate(size_t size);
+ void *aligned_allocate(size_t alignment, size_t size);
+ // NOTE: All pointers passed to free must come from one of the other
+ // allocation functions: `allocate`, `aligned_allocate`, `realloc`, `calloc`.
void free(void *ptr);
void *realloc(void *ptr, size_t size);
void *calloc(size_t num, size_t size);
@@ -74,6 +82,8 @@ template <size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()> class FreeListHeap {
freelist_.set_freelist_node(node, chunk);
}
+ void *allocate_impl(size_t alignment, size_t size);
+
private:
span<cpp::byte> block_to_span(BlockType *block) {
return span<cpp::byte>(block->usable_space(), block->inner_size());
@@ -109,20 +119,31 @@ struct FreeListHeapBuffer : public FreeListHeap<NUM_BUCKETS> {
};
template <size_t NUM_BUCKETS>
-void *FreeListHeap<NUM_BUCKETS>::allocate(size_t size) {
- // Find a chunk in the freelist. Split it if needed, then return
- auto chunk = freelist_.find_chunk(size);
+void *FreeListHeap<NUM_BUCKETS>::allocate_impl(size_t alignment, size_t size) {
+ if (size == 0)
+ return nullptr;
+
+ // Find a chunk in the freelist. Split it if needed, then return.
+ auto chunk =
+ freelist_.find_chunk_if([alignment, size](span<cpp::byte> chunk) {
+ BlockType *block = BlockType::from_usable_space(chunk.data());
+ return block->can_allocate(alignment, size);
+ });
if (chunk.data() == nullptr)
return nullptr;
freelist_.remove_chunk(chunk);
BlockType *chunk_block = BlockType::from_usable_space(chunk.data());
+ LIBC_ASSERT(!chunk_block->used());
// Split that chunk. If there's a leftover chunk, add it to the freelist
- optional<BlockType *> result = BlockType::split(chunk_block, size);
- if (result)
- freelist_.add_chunk(block_to_span(*result));
+ auto block_info = BlockType::allocate(chunk_block, alignment, size);
+ if (block_info.next)
+ freelist_.add_chunk(block_to_span(block_info.next));
+ if (block_info.prev)
+ freelist_.add_chunk(block_to_span(block_info.prev));
+ chunk_block = block_info.block;
chunk_block->mark_used();
@@ -133,6 +154,25 @@ void *FreeListHeap<NUM_BUCKETS>::allocate(size_t size) {
return chunk_block->usable_space();
}
+template <size_t NUM_BUCKETS>
+void *FreeListHeap<NUM_BUCKETS>::allocate(size_t size) {
+ return allocate_impl(MIN_ALIGNMENT, size);
+}
+
+template <size_t NUM_BUCKETS>
+void *FreeListHeap<NUM_BUCKETS>::aligned_allocate(size_t alignment,
+ size_t size) {
+ // The alignment must be an integral power of two.
+ if (!IsPow2(alignment))
+ return nullptr;
+
+ // The size parameter must be an integral multiple of alignment.
+ if (size % alignment != 0)
+ return nullptr;
+
+ return allocate_impl(alignment, size);
+}
+
template <size_t NUM_BUCKETS> void FreeListHeap<NUM_BUCKETS>::free(void *ptr) {
cpp::byte *bytes = static_cast<cpp::byte *>(ptr);
diff --git a/libc/src/stdlib/aligned_alloc.h b/libc/src/stdlib/aligned_alloc.h
new file mode 100644
index 0000000000000..7f294c8114d49
--- /dev/null
+++ b/libc/src/stdlib/aligned_alloc.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for aligned_alloc -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <stddef.h>
+
+#ifndef LLVM_LIBC_SRC_STDLIB_ALIGNED_ALLOC_H
+#define LLVM_LIBC_SRC_STDLIB_ALIGNED_ALLOC_H
+
+namespace LIBC_NAMESPACE {
+
+void *aligned_alloc(size_t alignment, size_t size);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDLIB_ALIGNED_ALLOC_H
diff --git a/libc/src/stdlib/freelist_malloc.cpp b/libc/src/stdlib/freelist_malloc.cpp
index 4d3c42ca90bab..684c447a204e4 100644
--- a/libc/src/stdlib/freelist_malloc.cpp
+++ b/libc/src/stdlib/freelist_malloc.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "src/__support/freelist_heap.h"
+#include "src/stdlib/aligned_alloc.h"
#include "src/stdlib/calloc.h"
#include "src/stdlib/free.h"
#include "src/stdlib/malloc.h"
@@ -42,4 +43,8 @@ LLVM_LIBC_FUNCTION(void *, realloc, (void *ptr, size_t size)) {
return freelist_heap->realloc(ptr, size);
}
+LLVM_LIBC_FUNCTION(void *, aligned_alloc, (size_t alignment, size_t size)) {
+ return freelist_heap->aligned_allocate(alignment, size);
+}
+
} // namespace LIBC_NAMESPACE
diff --git a/libc/test/src/__support/CMakeLists.txt b/libc/test/src/__support/CMakeLists.txt
index ce8413fed7172..98f263b74b759 100644
--- a/libc/test/src/__support/CMakeLists.txt
+++ b/libc/test/src/__support/CMakeLists.txt
@@ -8,6 +8,7 @@ add_libc_test(
block_test.cpp
DEPENDS
libc.src.__support.CPP.array
+ libc.src.__support.CPP.bit
libc.src.__support.CPP.span
libc.src.__support.block
libc.src.string.memcpy
diff --git a/libc/test/src/__support/block_test.cpp b/libc/test/src/__support/block_test.cpp
index 6614e4b583d3f..04704482b5147 100644
--- a/libc/test/src/__support/block_test.cpp
+++ b/libc/test/src/__support/block_test.cpp
@@ -8,6 +8,7 @@
#include <stddef.h>
#include "src/__support/CPP/array.h"
+#include "src/__support/CPP/bit.h"
#include "src/__support/CPP/span.h"
#include "src/__support/block.h"
#include "src/string/memcpy.h"
@@ -36,6 +37,7 @@ using SmallOffsetBlock = LIBC_NAMESPACE::Block<uint16_t>;
template <typename BlockType> void LlvmLibcBlockTest##TestCase::RunTest()
using LIBC_NAMESPACE::cpp::array;
+using LIBC_NAMESPACE::cpp::bit_ceil;
using LIBC_NAMESPACE::cpp::byte;
using LIBC_NAMESPACE::cpp::span;
@@ -567,3 +569,198 @@ TEST_FOR_EACH_BLOCK_TYPE(CanGetConstBlockFromUsableSpace) {
const BlockType *block2 = BlockType::from_usable_space(ptr);
EXPECT_EQ(block1, block2);
}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanAllocate) {
+ constexpr size_t kN = 1024;
+
+ // Ensure we can allocate everything up to the block size within this block.
+ for (size_t i = 0; i < kN - BlockType::BLOCK_OVERHEAD; ++i) {
+ alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ constexpr size_t ALIGN = 1; // Effectively ignores alignment.
+ EXPECT_TRUE(block->can_allocate(ALIGN, i));
+
+ // For each can_allocate, we should be able to do a successful call to
+ // allocate.
+ auto info = BlockType::allocate(block, ALIGN, i);
+ EXPECT_NE(info.block, static_cast<BlockType *>(nullptr));
+ }
+
+ alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ // Given a block of size kN (assuming it's also a power of two), we should be
+ // able to allocate a block within it that's aligned to half its size. This is
+ // because regardless of where the buffer is located, we can always find a
+ // starting location within it that meets this alignment.
+ EXPECT_TRUE(block->can_allocate(kN / 2, 1));
+ auto info = BlockType::allocate(block, kN / 2, 1);
+ EXPECT_NE(info.block, static_cast<BlockType *>(nullptr));
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(AllocateAlreadyAligned) {
+ constexpr size_t kN = 1024;
+
+ alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ // This should result in no new blocks.
+ constexpr size_t kAlignment = BlockType::ALIGNMENT;
+ constexpr size_t kExpectedSize = BlockType::ALIGNMENT;
+ EXPECT_TRUE(block->can_allocate(kAlignment, kExpectedSize));
+
+ auto [aligned_block, prev, next] =
+ BlockType::allocate(block, BlockType::ALIGNMENT, kExpectedSize);
+
+ // Since this is already aligned, there should be no previous block.
+ EXPECT_EQ(prev, static_cast<BlockType *>(nullptr));
+
+ // Ensure we the block is aligned and the size we expect.
+ EXPECT_NE(aligned_block, static_cast<BlockType *>(nullptr));
+ EXPECT_TRUE(aligned_block->is_usable_space_aligned(BlockType::ALIGNMENT));
+ EXPECT_EQ(aligned_block->inner_size(), kExpectedSize);
+
+ // Check the next block.
+ EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(aligned_block->next(), next);
+ EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(),
+ bytes.data() + bytes.size());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(AllocateNeedsAlignment) {
+ constexpr size_t kN = 1024;
+
+ alignas(kN) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ // Ensure first the usable_data is only aligned to the block alignment.
+ ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD);
+ ASSERT_EQ(block->prev(), static_cast<BlockType *>(nullptr));
+
+ // Now pick an alignment such that the usable space is not already aligned to
+ // it. We want to explicitly test that the block will split into one before
+ // it.
+ constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
+ ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));
+
+ constexpr size_t kSize = 10;
+ EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
+
+ auto [aligned_block, prev, next] =
+ BlockType::allocate(block, kAlignment, kSize);
+
+ // Check the previous block was created appropriately. Since this block is the
+ // first block, a new one should be made before this.
+ EXPECT_NE(prev, static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(aligned_block->prev(), prev);
+ EXPECT_EQ(prev->next(), aligned_block);
+ EXPECT_EQ(prev->outer_size(), reinterpret_cast<uintptr_t>(aligned_block) -
+ reinterpret_cast<uintptr_t>(prev));
+
+ // Ensure we the block is aligned and the size we expect.
+ EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+ EXPECT_TRUE(aligned_block->is_usable_space_aligned(kAlignment));
+
+ // Check the next block.
+ EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(aligned_block->next(), next);
+ EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(), &*bytes.end());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(PreviousBlockMergedIfNotFirst) {
+ constexpr size_t kN = 1024;
+
+ alignas(kN) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ // Split the block roughly halfway and work on the second half.
+ auto result2 = BlockType::split(block, kN / 2);
+ ASSERT_TRUE(result2.has_value());
+ BlockType *newblock = *result2;
+ ASSERT_EQ(newblock->prev(), block);
+ size_t old_prev_size = block->outer_size();
+
+ // Now pick an alignment such that the usable space is not already aligned to
+ // it. We want to explicitly test that the block will split into one before
+ // it.
+ constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
+ ASSERT_FALSE(newblock->is_usable_space_aligned(kAlignment));
+
+ // Ensure we can allocate in the new block.
+ constexpr size_t kSize = BlockType::ALIGNMENT;
+ EXPECT_TRUE(newblock->can_allocate(kAlignment, kSize));
+
+ auto [aligned_block, prev, next] =
+ BlockType::allocate(newblock, kAlignment, kSize);
+
+ // Now there should be no new previous block. Instead, the padding we did
+ // create should be merged into the original previous block.
+ EXPECT_EQ(prev, static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(aligned_block->prev(), block);
+ EXPECT_EQ(block->next(), aligned_block);
+ EXPECT_GT(block->outer_size(), old_prev_size);
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanRemergeBlockAllocations) {
+ // Finally to ensure we made the split blocks correctly via allocate. We
+ // should be able to reconstruct the original block from the blocklets.
+ //
+ // This is the same setup as with the `AllocateNeedsAlignment` test case.
+ constexpr size_t kN = 1024;
+
+ alignas(kN) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ // Ensure first the usable_data is only aligned to the block alignment.
+ ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD);
+ ASSERT_EQ(block->prev(), static_cast<BlockType *>(nullptr));
+
+ // Now pick an alignment such that the usable space is not already aligned to
+ // it. We want to explicitly test that the block will split into one before
+ // it.
+ constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
+ ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));
+
+ constexpr size_t kSize = BlockType::ALIGNMENT;
+ EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
+
+ auto [aligned_block, prev, next] =
+ BlockType::allocate(block, kAlignment, kSize);
+
+ // Check we have the appropriate blocks.
+ ASSERT_NE(prev, static_cast<BlockType *>(nullptr));
+ ASSERT_FALSE(prev->last());
+ ASSERT_EQ(aligned_block->prev(), prev);
+ EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+ EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(aligned_block->next(), next);
+ EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
+ ASSERT_TRUE(next->last());
+
+ // Now check for successful merges.
+ EXPECT_TRUE(BlockType::merge_next(prev));
+ EXPECT_EQ(prev->next(), next);
+ EXPECT_TRUE(BlockType::merge_next(prev));
+ EXPECT_EQ(prev->next(), static_cast<BlockType *>(nullptr));
+ EXPECT_TRUE(prev->last());
+
+ // We should have the original buffer.
+ EXPECT_EQ(reinterpret_cast<byte *>(prev), &*bytes.begin());
+ EXPECT_EQ(prev->outer_size(), bytes.size());
+ EXPECT_EQ(reinterpret_cast<byte *>(prev) + prev->outer_size(), &*bytes.end());
+}
diff --git a/libc/test/src/__support/freelist_heap_test.cpp b/libc/test/src/__support/freelist_heap_test.cpp
index a35cb5589ed62..add590f5c6d31 100644
--- a/libc/test/src/__support/freelist_heap_test.cpp
+++ b/libc/test/src/__support/freelist_heap_test.cpp
@@ -47,10 +47,6 @@ TEST_FOR_EACH_ALLOCATOR(CanAllocate, 2048) {
void *ptr = allocator.allocate(ALLOC_SIZE);
ASSERT_NE(ptr, static_cast<void *>(nullptr));
- // In this case, the allocator should be returning us the start of the chunk.
- EXPECT_EQ(ptr, static_cast<void *>(
- reinterpret_cast<cpp::byte *>(allocator.region_start()) +
- FreeListHeap<>::BlockType::BLOCK_OVERHEAD));
}
TEST_FOR_EACH_ALLOCATOR(AllocationsDontOverlap, 2048) {
@@ -94,7 +90,10 @@ TEST(LlvmLibcFreeListHeap, ReturnsNullWhenFull) {
FreeListHeap<> allocator(buf);
- EXPECT_NE(allocator.allocate(N - FreeListHeap<>::BlockType::BLOCK_OVERHEAD),
+ // Use aligned_allocate so we don't need to worry about ensuring the `buf`
+ // being aligned to max_align_t.
+ EXPECT_NE(allocator.aligned_allocate(
+ 1, N - FreeListHeap<>::BlockType::BLOCK_OVERHEAD),
static_cast<void *>(nullptr));
EXPECT_EQ(allocator.allocate(1), static_cast<void *>(nullptr));
}
@@ -214,4 +213,75 @@ TEST_FOR_EACH_ALLOCATOR(CallocTooLarge, 2048) {
EXPECT_EQ(allocator.calloc(1, ALLOC_SIZE), static_cast<void *>(nullptr));
}
+TEST_FOR_EACH_ALLOCATOR(AllocateZero, 2048) {
+ void *ptr = allocator.allocate(0);
+ ASSERT_EQ(ptr, static_cast<void *>(nullptr));
+}
+
+TEST_FOR_EACH_ALLOCATOR(AlignedAlloc, 2048) {
+ constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256};
+ constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5};
+
+ for (size_t alignment : ALIGNMENTS) {
+ for (size_t scale : SIZE_SCALES) {
+ size_t size = alignment * scale;
+ void *ptr = allocator.aligned_allocate(alignment, size);
+ EXPECT_NE(ptr, static_cast<void *>(nullptr));
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, size_t(0));
+ allocator.free(ptr);
+ }
+ }
+}
+
+// This test is not part of the TEST_FOR_EACH_ALLOCATOR since we want to
+// explicitly ensure that the buffer can still return aligned allocations even
+// if the underlying buffer is at most aligned to the BlockType alignment. This
+// is so we can check that we can still get aligned allocations even if the
+// underlying buffer is not aligned to the alignments we request.
+TEST(LlvmLibcFreeListHeap, AlignedAllocOnlyBlockTypeAligned) {
+ constexpr size_t BUFFER_SIZE = 4096;
+ constexpr size_t BUFFER_ALIGNMENT = alignof(FreeListHeap<>::BlockType) * 2;
+ alignas(BUFFER_ALIGNMENT) cpp::byte buf[BUFFER_SIZE] = {cpp::byte(0)};
+
+ // Ensure the underlying buffer is at most aligned to the block type.
+ FreeListHeap<> allocator(
+ span<cpp::byte>(buf).subspan(alignof(FreeListHeap<>::BlockType)));
+
+ constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256};
+ constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5};
+
+ for (size_t alignment : ALIGNMENTS) {
+ for (size_t scale : SIZE_SCALES) {
+ size_t size = alignment * scale;
+ void *ptr = allocator.aligned_allocate(alignment, size);
+ EXPECT_NE(ptr, static_cast<void *>(nullptr));
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, size_t(0));
+ allocator.free(ptr);
+ }
+ }
+}
+
+TEST_FOR_EACH_ALLOCATOR(InvalidAlignedAllocAlignment, 2048) {
+ // Must be a power of 2.
+ constexpr size_t ALIGNMENTS[] = {4, 8, 16, 32, 64, 128, 256};
+ for (size_t alignment : ALIGNMENTS) {
+ void *ptr = allocator.aligned_allocate(alignment - 1, alignment - 1);
+ EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+ }
+
+ // Size must be a multiple of alignment
+ for (size_t alignment : ALIGNMENTS) {
+ void *ptr = allocator.aligned_allocate(alignment, alignment + 1);
+ EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+ }
+
+ // Don't accept zero size.
+ void *ptr = allocator.aligned_allocate(1, 0);
+ EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+
+ // Don't accept zero alignment.
+ ptr = allocator.aligned_allocate(0, 8);
+ EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+}
+
} // namespace LIBC_NAMESPACE
diff --git a/libc/test/src/__support/freelist_malloc_test.cpp b/libc/test/src/__support/freelist_malloc_test.cpp
index 989e9548fa26d..e9d7c63a4d438 100644
--- a/libc/test/src/__support/freelist_malloc_test.cpp
+++ b/libc/test/src/__support/freelist_malloc_test.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "src/__support/freelist_heap.h"
+#include "src/stdlib/aligned_alloc.h"
#include "src/stdlib/calloc.h"
#include "src/stdlib/free.h"
#include "src/stdlib/malloc.h"
@@ -53,4 +54,21 @@ TEST(LlvmLibcFreeListMalloc, MallocStats) {
kAllocSize + kCallocNum * kCallocSize);
EXPECT_EQ(freelist_heap_stats.cumulative_freed,
kAllocSize + kCallocNum * kCallocSize);
+
+ constexpr size_t ALIGN = kAllocSize;
+ void *ptr3 = LIBC_NAMESPACE::aligned_alloc(ALIGN, kAllocSize);
+ EXPECT_NE(ptr3, static_cast<void *>(nullptr));
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % ALIGN, size_t(0));
+ EXPECT_EQ(freelist_heap_stats.bytes_allocated, kAllocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_allocated,
+ kAllocSize + kCallocNum * kCallocSize + kAllocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_freed,
+ kAllocSize + kCallocNum * kCallocSize);
+
+ LIBC_NAMESPACE::free(ptr3);
+ EXPECT_EQ(freelist_heap_stats.bytes_allocated, size_t(0));
+ EXPECT_EQ(freelist_heap_stats.cumulative_allocated,
+ kAllocSize + kCallocNum * kCallocSize + kAllocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_freed,
+ kAllocSize + kCallocNum * kCallocSize + kAllocSize);
}
>From 2ef5b8227a3022654b5ef4bd7bafdc5b8750237f Mon Sep 17 00:00:00 2001
From: Michael Jones <michaelrj at google.com>
Date: Wed, 3 Jul 2024 16:29:19 -0700
Subject: [PATCH 193/246] [libc][docs] Update full host build docs (#97643)
Add a note explaining how to fix the missing `asm` folder, as well as a
warning about installing without setting a sysroot.
---
libc/docs/full_host_build.rst | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/libc/docs/full_host_build.rst b/libc/docs/full_host_build.rst
index 709ba70b22de4..4fb3072590f32 100644
--- a/libc/docs/full_host_build.rst
+++ b/libc/docs/full_host_build.rst
@@ -35,6 +35,14 @@ we've set the Ninja generator, enabled a full compiler suite, set the build
type to "Debug", and enabled the Scudo allocator. The build also tells clang
to use the freshly built lld and compiler-rt.
+.. note::
+ if your build fails with an error saying the compiler can't find
+ ``<asm/unistd.h>`` or similar then you're probably missing the symlink from
+ ``/usr/include/asm`` to ``/usr/include/<TARGET TRIPLE>/asm``. Installing the
+ ``gcc-multilib`` package creates this symlink, or you can do it manually with
+ this command:
+ ``sudo ln -s /usr/include/<TARGET TRIPLE>/asm /usr/include/asm``
+
.. code-block:: sh
$> cd llvm-project # The llvm-project checkout
@@ -76,6 +84,14 @@ above.
Build and install
=================
+.. TODO: add this warning to the cmake
+.. warning::
+ Running these install commands without setting a ``$SYSROOT`` will install
+ them into your system include path, which may break your system. If you're
+ just trying to develop libc, then just run ``ninja check-libc`` to build the
+ libc and run the tests. If you've already accidentally installed the headers,
+ you may need to delete them from ``/usr/local/include``.
+
After configuring the build with the above ``cmake`` command, one can build and
install the libc, clang (and its support libraries and builtins), lld and
compiler-rt, with the following command:
>From 4c79fac140261f67ef33e5f108df63d30cd6e3c7 Mon Sep 17 00:00:00 2001
From: Fangrui Song <i at maskray.me>
Date: Wed, 3 Jul 2024 16:40:49 -0700
Subject: [PATCH 194/246] [BOLT] Remove workaround for flushPendingLabels
The code emits an empty MCDataFragment to ensure that the labels are
attached to `SplitSection`. The workaround, due to the removed
`flushPendingLabels` mechanism (see
75006466296ed4b0f845cbbec4bf77c21de43b40), is now unneeded.
Pull Request: https://github.com/llvm/llvm-project/pull/97632
---
bolt/lib/Core/BinaryContext.cpp | 8 --------
1 file changed, 8 deletions(-)
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index f28a0cd6eb9c6..0a1f1bb9e0d20 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -2403,16 +2403,8 @@ BinaryContext::calculateEmittedSize(BinaryFunction &BF, bool FixBranches) {
Streamer->emitLabel(SplitStartLabel);
emitFunctionBody(*Streamer, BF, FF, /*EmitCodeOnly=*/true);
Streamer->emitLabel(SplitEndLabel);
- // To avoid calling MCObjectStreamer::flushPendingLabels() which is
- // private
- Streamer->emitBytes(StringRef(""));
- Streamer->switchSection(Section);
}
- // To avoid calling MCObjectStreamer::flushPendingLabels() which is private or
- // MCStreamer::Finish(), which does more than we want
- Streamer->emitBytes(StringRef(""));
-
MCAssembler &Assembler =
static_cast<MCObjectStreamer *>(Streamer.get())->getAssembler();
Assembler.layout();
>From f2c6add926625459ec52c9f36b29c8dd05d57c7d Mon Sep 17 00:00:00 2001
From: Fangrui Song <i at maskray.me>
Date: Wed, 3 Jul 2024 16:47:19 -0700
Subject: [PATCH 195/246] [MC] Remove remnant code related to pending labels
Follow-up to 485d7eaefd93c4f6bc8c51c9a169ffb22ce3a898
---
llvm/include/llvm/MC/MCSection.h | 13 -------------
1 file changed, 13 deletions(-)
diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h
index 54f7eb1d0fcfc..b013eca1d63fc 100644
--- a/llvm/include/llvm/MC/MCSection.h
+++ b/llvm/include/llvm/MC/MCSection.h
@@ -115,15 +115,6 @@ class MCSection {
// subsections.
SmallVector<std::pair<unsigned, FragList>, 1> Subsections;
- /// State for tracking labels that don't yet have Fragments
- struct PendingLabel {
- MCSymbol* Sym;
- unsigned Subsection;
- PendingLabel(MCSymbol* Sym, unsigned Subsection = 0)
- : Sym(Sym), Subsection(Subsection) {}
- };
- SmallVector<PendingLabel, 2> PendingLabels;
-
protected:
// TODO Make Name private when possible.
StringRef Name;
@@ -208,10 +199,6 @@ class MCSection {
bool isVirtualSection() const { return IsVirtual; }
virtual StringRef getVirtualSectionKind() const;
-
- /// Add a pending label for the requested subsection. This label will be
- /// associated with a fragment in flushPendingLabels()
- void addPendingLabel(MCSymbol* label, unsigned Subsection = 0);
};
} // end namespace llvm
>From 089ba1127f35fe89ff4d18cca1c87869f9eace6c Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Wed, 3 Jul 2024 16:53:41 -0700
Subject: [PATCH 196/246] [Linker] Use a range-based for loop (NFC) (#97656)
---
llvm/lib/Linker/IRMover.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/llvm/lib/Linker/IRMover.cpp b/llvm/lib/Linker/IRMover.cpp
index 7a5aa0c804782..ba8f371127764 100644
--- a/llvm/lib/Linker/IRMover.cpp
+++ b/llvm/lib/Linker/IRMover.cpp
@@ -1489,8 +1489,7 @@ Error IRLinker::linkModuleFlagsMetadata() {
}
// Check all of the requirements.
- for (unsigned I = 0, E = Requirements.size(); I != E; ++I) {
- MDNode *Requirement = Requirements[I];
+ for (MDNode *Requirement : Requirements) {
MDString *Flag = cast<MDString>(Requirement->getOperand(0));
Metadata *ReqValue = Requirement->getOperand(1);
>From 665efe896746b1dd138773e6e4d300ec97de27c2 Mon Sep 17 00:00:00 2001
From: PiJoules <6019989+PiJoules at users.noreply.github.com>
Date: Wed, 3 Jul 2024 17:02:57 -0700
Subject: [PATCH 197/246] [libc] Add LIBC_NAMESPACE_DECL macro (#97109)
This defines to LIBC_NAMESPACE with
`__attribute__((visibility("hidden")))` so all the symbols under it have
hidden visibility. This new macro should be used when declaring a new
namespace that will have internal functions/globals and LIBC_NAMESPACE
should be used as a means of accessing functions/globals declared within
LIBC_NAMESPACE_DECL.
---
libc/docs/dev/clang_tidy_checks.rst | 5 ++++
libc/docs/dev/code_style.rst | 31 +++++++++++++++++++++++
libc/docs/dev/implementation_standard.rst | 12 ++++-----
libc/src/__support/macros/config.h | 10 ++++++++
4 files changed, 52 insertions(+), 6 deletions(-)
diff --git a/libc/docs/dev/clang_tidy_checks.rst b/libc/docs/dev/clang_tidy_checks.rst
index 3feb5375ef113..198d8f16d1cd7 100644
--- a/libc/docs/dev/clang_tidy_checks.rst
+++ b/libc/docs/dev/clang_tidy_checks.rst
@@ -64,6 +64,11 @@ This check that ensures any function call resolves to a function within the
void LLVM_LIBC_ENTRYPOINT(strcpy)(char *dest, const char *src) {}
}
+..
+ TODO(97655): The clang-tidy check should be updated to ensure the namespace
+ declaration uses LIBC_NAMESPACE_DECL as opposed to LIBC_NAMESPACE. The former
+ should be used for accessing globals in LIBC_NAMESPACE rather than declaration.
+
callee-namespace
----------------
diff --git a/libc/docs/dev/code_style.rst b/libc/docs/dev/code_style.rst
index 170ef6598a9d8..28d8277417b2b 100644
--- a/libc/docs/dev/code_style.rst
+++ b/libc/docs/dev/code_style.rst
@@ -260,3 +260,34 @@ Patches containing any amount of Assembly ideally should be approved by 2
maintainers. llvm-libc maintainers reserve the right to reject Assembly
contributions that they feel could be better maintained if rewritten in C++,
and to revisit this policy in the future.
+
+LIBC_NAMESPACE_DECL
+===================
+
+llvm-libc provides a macro `LIBC_NAMESPACE` which contains internal implementations of
+libc functions and globals. This macro should only be used as an
+identifier for accessing such symbols within the namespace (like `LIBC_NAMESPACE::cpp::max`).
+Any usage of this namespace for declaring or defining internal symbols should
+instead use `LIBC_NAMESPACE_DECL` which declares `LIBC_NAMESPACE` with hidden visibility.
+
+Example usage:
+
+.. code-block:: c++
+
+ #include "src/__support/macros/config.h" // The macro is defined here.
+
+ namespace LIBC_NAMESPACE_DECL {
+
+ void new_function() {
+ ...
+ }
+
+ } // LIBC_NAMESPACE_DECL
+
+Having hidden visibility on the namespace ensures extern declarations in a given TU
+have known visibility and never generate GOT indirextions. The attribute guarantees
+this independently of global compile options and build systems.
+
+..
+ TODO(97655): We should have a clang-tidy check to enforce this and a
+ fixit implementation.
diff --git a/libc/docs/dev/implementation_standard.rst b/libc/docs/dev/implementation_standard.rst
index 790c06726d6b5..277fee9c19089 100644
--- a/libc/docs/dev/implementation_standard.rst
+++ b/libc/docs/dev/implementation_standard.rst
@@ -26,17 +26,17 @@ example. The ``isalpha`` function will be declared in an internal header file
#ifndef LLVM_LIBC_SRC_CTYPE_ISALPHA_H
#define LLVM_LIBC_SRC_CTYPE_ISALPHA_H
- namespace LIBC_NAMESPACE {
+ namespace LIBC_NAMESPACE_DECL {
int isalpha(int c);
- } // namespace LIBC_NAMESPACE
+ } // namespace LIBC_NAMESPACE_DECL
#endif LLVM_LIBC_SRC_CTYPE_ISALPHA_H
Notice that the ``isalpha`` function declaration is nested inside the namespace
-``LIBC_NAMESPACE``. All implementation constructs in LLVM-libc are declared
-within the namespace ``LIBC_NAMESPACE``.
+``LIBC_NAMESPACE_DECL``. All implementation constructs in LLVM-libc are declared
+within the namespace ``LIBC_NAMESPACE_DECL``.
``.cpp`` File Structure
-----------------------
@@ -49,13 +49,13 @@ which must be defined with the ``LLVM_LIBC_FUNCTION`` macro. For example, the
// --- isalpha.cpp --- //
- namespace LIBC_NAMESPACE {
+ namespace LIBC_NAMESPACE_DECL {
LLVM_LIBC_FUNCTION(int, isalpha, (int c)) {
// ... implementation goes here.
}
- } // namespace LIBC_NAMESPACE
+ } // namespace LIBC_NAMESPACE_DECL
Notice the use of the macro ``LLVM_LIBC_FUNCTION``. This macro helps us define
a C alias symbol for the C++ implementation. For example, for a library build,
diff --git a/libc/src/__support/macros/config.h b/libc/src/__support/macros/config.h
index 6390c7992325d..3da6b8bc94c8b 100644
--- a/libc/src/__support/macros/config.h
+++ b/libc/src/__support/macros/config.h
@@ -27,4 +27,14 @@
#define LIBC_HAS_FEATURE(f) 0
#endif
+// Declare a LIBC_NAMESPACE with hidden visibility. `namespace
+// LIBC_NAMESPACE_DECL {` should be used around all declarations and definitions
+// for libc internals as opposed to just `namespace LIBC_NAMESPACE {`. This
+// ensures that all declarations within this namespace have hidden
+// visibility, which optimizes codegen for uses of symbols defined in other
+// translation units in ways that can be necessary for correctness by avoiding
+// dynamic relocations. This does not affect the public C symbols which are
+// controlled independently via `LLVM_LIBC_FUNCTION_ATTR`.
+#define LIBC_NAMESPACE_DECL [[gnu::visibility("hidden")]] LIBC_NAMESPACE
+
#endif // LLVM_LIBC_SRC___SUPPORT_MACROS_CONFIG_H
>From 94471e73fe3a6e5ddf700ed79941b1f1c8d2127b Mon Sep 17 00:00:00 2001
From: Fangrui Song <i at maskray.me>
Date: Wed, 3 Jul 2024 17:25:10 -0700
Subject: [PATCH 198/246] [MC] Move MCAssembler::isSymbolLinkerVisible to
MCSymbolMachO
---
llvm/include/llvm/MC/MCAssembler.h | 6 ------
llvm/include/llvm/MC/MCSymbolMachO.h | 12 ++++++++++++
llvm/lib/MC/MCAssembler.cpp | 11 -----------
llvm/lib/MC/MCMachOStreamer.cpp | 7 ++++---
llvm/lib/MC/MachObjectWriter.cpp | 6 +++---
5 files changed, 19 insertions(+), 23 deletions(-)
diff --git a/llvm/include/llvm/MC/MCAssembler.h b/llvm/include/llvm/MC/MCAssembler.h
index 9cd65d388d247..61188d88b9179 100644
--- a/llvm/include/llvm/MC/MCAssembler.h
+++ b/llvm/include/llvm/MC/MCAssembler.h
@@ -256,12 +256,6 @@ class MCAssembler {
// If this symbol is equivalent to A + Constant, return A.
const MCSymbol *getBaseSymbol(const MCSymbol &Symbol) const;
- /// Check whether a particular symbol is visible to the linker and is required
- /// in the symbol table, or whether it can be discarded by the assembler. This
- /// also effects whether the assembler treats the label as potentially
- /// defining a separate atom.
- bool isSymbolLinkerVisible(const MCSymbol &SD) const;
-
/// Emit the section contents to \p OS.
void writeSectionData(raw_ostream &OS, const MCSection *Section) const;
diff --git a/llvm/include/llvm/MC/MCSymbolMachO.h b/llvm/include/llvm/MC/MCSymbolMachO.h
index f75f61c198c11..730fbea0059a6 100644
--- a/llvm/include/llvm/MC/MCSymbolMachO.h
+++ b/llvm/include/llvm/MC/MCSymbolMachO.h
@@ -109,6 +109,18 @@ class MCSymbolMachO : public MCSymbol {
setFlags(Value & SF_DescFlagsMask);
}
+ // Check whether a particular symbol is visible to the linker and is required
+ // in the symbol table, or whether it can be discarded by the assembler. This
+ // also effects whether the assembler treats the label as potentially defining
+ // a separate atom.
+ bool isSymbolLinkerVisible() const {
+ // Non-temporary labels should always be visible to the linker.
+ if (!isTemporary())
+ return true;
+
+ return isUsedInReloc();
+ }
+
/// Get the encoded value of the flags as they will be emitted in to
/// the MachO binary
uint16_t getEncodedFlags(bool EncodeAsAltEntry) const {
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp
index 14790f508323e..cdac58c5fbf68 100644
--- a/llvm/lib/MC/MCAssembler.cpp
+++ b/llvm/lib/MC/MCAssembler.cpp
@@ -160,17 +160,6 @@ bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const {
return true;
}
-bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
- // Non-temporary labels should always be visible to the linker.
- if (!Symbol.isTemporary())
- return true;
-
- if (Symbol.isUsedInReloc())
- return true;
-
- return false;
-}
-
bool MCAssembler::evaluateFixup(const MCFixup &Fixup, const MCFragment *DF,
MCValue &Target, const MCSubtargetInfo *STI,
uint64_t &Value, bool &WasForced) const {
diff --git a/llvm/lib/MC/MCMachOStreamer.cpp b/llvm/lib/MC/MCMachOStreamer.cpp
index 0b34d87033b7b..6eb9f44de44fd 100644
--- a/llvm/lib/MC/MCMachOStreamer.cpp
+++ b/llvm/lib/MC/MCMachOStreamer.cpp
@@ -198,7 +198,7 @@ void MCMachOStreamer::emitEHSymAttributes(const MCSymbol *Symbol,
void MCMachOStreamer::emitLabel(MCSymbol *Symbol, SMLoc Loc) {
// We have to create a new fragment if this is an atom defining symbol,
// fragments cannot span atoms.
- if (getAssembler().isSymbolLinkerVisible(*Symbol))
+ if (cast<MCSymbolMachO>(Symbol)->isSymbolLinkerVisible())
insert(getContext().allocFragment<MCDataFragment>());
MCObjectStreamer::emitLabel(Symbol, Loc);
@@ -507,8 +507,9 @@ void MCMachOStreamer::finishImpl() {
// defining symbols.
DenseMap<const MCFragment *, const MCSymbol *> DefiningSymbolMap;
for (const MCSymbol &Symbol : getAssembler().symbols()) {
- if (getAssembler().isSymbolLinkerVisible(Symbol) && Symbol.isInSection() &&
- !Symbol.isVariable() && !cast<MCSymbolMachO>(Symbol).isAltEntry()) {
+ auto &Sym = cast<MCSymbolMachO>(Symbol);
+ if (Sym.isSymbolLinkerVisible() && Sym.isInSection() && !Sym.isVariable() &&
+ !Sym.isAltEntry()) {
// An atom defining symbol should never be internal to a fragment.
assert(Symbol.getOffset() == 0 &&
"Invalid offset in atom defining symbol!");
diff --git a/llvm/lib/MC/MachObjectWriter.cpp b/llvm/lib/MC/MachObjectWriter.cpp
index 12048e2e53442..e9499558a90db 100644
--- a/llvm/lib/MC/MachObjectWriter.cpp
+++ b/llvm/lib/MC/MachObjectWriter.cpp
@@ -582,7 +582,7 @@ void MachObjectWriter::computeSymbolTable(
// Build the string table.
for (const MCSymbol &Symbol : Asm.symbols()) {
- if (!Asm.isSymbolLinkerVisible(Symbol))
+ if (!cast<MCSymbolMachO>(Symbol).isSymbolLinkerVisible())
continue;
StringTable.add(Symbol.getName());
@@ -596,7 +596,7 @@ void MachObjectWriter::computeSymbolTable(
// important for letting us diff .o files.
for (const MCSymbol &Symbol : Asm.symbols()) {
// Ignore non-linker visible symbols.
- if (!Asm.isSymbolLinkerVisible(Symbol))
+ if (!cast<MCSymbolMachO>(Symbol).isSymbolLinkerVisible())
continue;
if (!Symbol.isExternal() && !Symbol.isUndefined())
@@ -622,7 +622,7 @@ void MachObjectWriter::computeSymbolTable(
// Now add the data for local symbols.
for (const MCSymbol &Symbol : Asm.symbols()) {
// Ignore non-linker visible symbols.
- if (!Asm.isSymbolLinkerVisible(Symbol))
+ if (!cast<MCSymbolMachO>(Symbol).isSymbolLinkerVisible())
continue;
if (Symbol.isExternal() || Symbol.isUndefined())
>From a0c6b8aef853eedaa0980f07c0a502a5a8a9740e Mon Sep 17 00:00:00 2001
From: Maksim Panchenko <maks at fb.com>
Date: Wed, 3 Jul 2024 17:30:37 -0700
Subject: [PATCH 199/246] [BOLT][docs] Add merge-fdata to Linux optimization
guide (#97659)
---
bolt/docs/OptimizingLinux.md | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/bolt/docs/OptimizingLinux.md b/bolt/docs/OptimizingLinux.md
index 1fffcf2252906..0045f0ead9fd0 100644
--- a/bolt/docs/OptimizingLinux.md
+++ b/bolt/docs/OptimizingLinux.md
@@ -44,6 +44,11 @@ $ perf2bolt -p perf.data -o perf.fdata vmlinux
Under a high load, `perf.data` should be several gigabytes in size and you should expect the converted `perf.fdata` not to exceed 100 MB.
+Profiles collected from multiple workloads could be joined into a single profile using `merge-fdata` utility:
+```bash
+$ merge-fdata perf.1.fdata perf.2.fdata ... perf.<N>.fdata > perf.merged.fdata
+```
+
Two changes are required for the kernel build. The first one is optional but highly recommended. It introduces a BOLT-reserved space into `vmlinux` code section:
>From 73f5f83b192b0a27f7edae5365c247961d9f1bd9 Mon Sep 17 00:00:00 2001
From: Haohai Wen <haohai.wen at intel.com>
Date: Thu, 4 Jul 2024 09:52:38 +0800
Subject: [PATCH 200/246] [BasicBlockSections] Using MBBSectionID as DenseMap
key (#97295)
getSectionIDNum may return same value for two different MBBSectionID.
e.g. A Cold type MBBSectionID with number 0 and a Default type
MBBSectionID with number 2 get same value 2 from getSectionIDNum. This
may lead to overwrite of MBBSectionRanges. Using MBBSectionID itself
as DenseMap key is better choice.
---
llvm/include/llvm/CodeGen/AsmPrinter.h | 4 +--
llvm/include/llvm/CodeGen/MachineBasicBlock.h | 26 ++++++++++++++-----
llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp | 17 +++++++-----
.../CodeGen/AsmPrinter/DwarfCompileUnit.cpp | 2 +-
llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp | 4 +--
llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp | 4 +--
6 files changed, 37 insertions(+), 20 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/AsmPrinter.h b/llvm/include/llvm/CodeGen/AsmPrinter.h
index a60dce30c4a6c..dc00bd57d655d 100644
--- a/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -137,7 +137,7 @@ class AsmPrinter : public MachineFunctionPass {
MCSymbol *BeginLabel, *EndLabel;
};
- MapVector<unsigned, MBBSectionRange> MBBSectionRanges;
+ MapVector<MBBSectionID, MBBSectionRange> MBBSectionRanges;
/// Map global GOT equivalent MCSymbols to GlobalVariables and keep track of
/// its number of uses by other globals.
@@ -157,7 +157,7 @@ class AsmPrinter : public MachineFunctionPass {
/// Map a basic block section ID to the exception symbol associated with that
/// section. Map entries are assigned and looked up via
/// AsmPrinter::getMBBExceptionSym.
- DenseMap<unsigned, MCSymbol *> MBBSectionExceptionSyms;
+ DenseMap<MBBSectionID, MCSymbol *> MBBSectionExceptionSyms;
// The symbol used to represent the start of the current BB section of the
// function. This is used to calculate the size of the BB section.
diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index e4919ecabd705..562d37ef32f54 100644
--- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -13,6 +13,7 @@
#ifndef LLVM_CODEGEN_MACHINEBASICBLOCK_H
#define LLVM_CODEGEN_MACHINEBASICBLOCK_H
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/ADT/ilist.h"
@@ -74,6 +75,25 @@ struct MBBSectionID {
MBBSectionID(SectionType T) : Type(T), Number(0) {}
};
+template <> struct DenseMapInfo<MBBSectionID> {
+ using TypeInfo = DenseMapInfo<MBBSectionID::SectionType>;
+ using NumberInfo = DenseMapInfo<unsigned>;
+
+ static inline MBBSectionID getEmptyKey() {
+ return MBBSectionID(NumberInfo::getEmptyKey());
+ }
+ static inline MBBSectionID getTombstoneKey() {
+ return MBBSectionID(NumberInfo::getTombstoneKey());
+ }
+ static unsigned getHashValue(const MBBSectionID &SecID) {
+ return detail::combineHashValue(TypeInfo::getHashValue(SecID.Type),
+ NumberInfo::getHashValue(SecID.Number));
+ }
+ static bool isEqual(const MBBSectionID &LHS, const MBBSectionID &RHS) {
+ return LHS == RHS;
+ }
+};
+
// This structure represents the information for a basic block pertaining to
// the basic block sections profile.
struct UniqueBBID {
@@ -658,12 +678,6 @@ class MachineBasicBlock
/// Returns the section ID of this basic block.
MBBSectionID getSectionID() const { return SectionID; }
- /// Returns the unique section ID number of this basic block.
- unsigned getSectionIDNum() const {
- return ((unsigned)MBBSectionID::SectionType::Cold) -
- ((unsigned)SectionID.Type) + SectionID.Number;
- }
-
/// Sets the fixed BBID of this basic block.
void setBBID(const UniqueBBID &V) {
assert(!BBID.has_value() && "Cannot change BBID.");
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index c52cbff689dc5..1391893e55a52 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -1374,7 +1374,7 @@ void AsmPrinter::emitBBAddrMapSection(const MachineFunction &MF) {
OutStreamer->emitULEB128IntValue(MBBSectionRanges.size());
}
// Number of blocks in each MBB section.
- MapVector<unsigned, unsigned> MBBSectionNumBlocks;
+ MapVector<MBBSectionID, unsigned> MBBSectionNumBlocks;
const MCSymbol *PrevMBBEndSymbol = nullptr;
if (!Features.MultiBBRange) {
OutStreamer->AddComment("function address");
@@ -1388,7 +1388,7 @@ void AsmPrinter::emitBBAddrMapSection(const MachineFunction &MF) {
BBCount++;
if (MBB.isEndSection()) {
// Store each section's basic block count when it ends.
- MBBSectionNumBlocks[MBB.getSectionIDNum()] = BBCount;
+ MBBSectionNumBlocks[MBB.getSectionID()] = BBCount;
// Reset the count for the next section.
BBCount = 0;
}
@@ -1404,8 +1404,7 @@ void AsmPrinter::emitBBAddrMapSection(const MachineFunction &MF) {
OutStreamer->AddComment("base address");
OutStreamer->emitSymbolValue(MBBSymbol, getPointerSize());
OutStreamer->AddComment("number of basic blocks");
- OutStreamer->emitULEB128IntValue(
- MBBSectionNumBlocks[MBB.getSectionIDNum()]);
+ OutStreamer->emitULEB128IntValue(MBBSectionNumBlocks[MBB.getSectionID()]);
PrevMBBEndSymbol = MBBSymbol;
}
// TODO: Remove this check when version 1 is deprecated.
@@ -1855,7 +1854,9 @@ void AsmPrinter::emitFunctionBody() {
OutContext);
OutStreamer->emitELFSize(CurrentSectionBeginSym, SizeExp);
}
- MBBSectionRanges[MBB.getSectionIDNum()] =
+ assert(!MBBSectionRanges.contains(MBB.getSectionID()) &&
+ "Overwrite section range");
+ MBBSectionRanges[MBB.getSectionID()] =
MBBSectionRange{CurrentSectionBeginSym, MBB.getEndSymbol()};
}
}
@@ -1972,7 +1973,9 @@ void AsmPrinter::emitFunctionBody() {
for (auto &Handler : Handlers)
Handler->markFunctionEnd();
- MBBSectionRanges[MF->front().getSectionIDNum()] =
+ assert(!MBBSectionRanges.contains(MF->front().getSectionID()) &&
+ "Overwrite section range");
+ MBBSectionRanges[MF->front().getSectionID()] =
MBBSectionRange{CurrentFnBegin, CurrentFnEnd};
// Print out jump tables referenced by the function.
@@ -2536,7 +2539,7 @@ bool AsmPrinter::doFinalization(Module &M) {
}
MCSymbol *AsmPrinter::getMBBExceptionSym(const MachineBasicBlock &MBB) {
- auto Res = MBBSectionExceptionSyms.try_emplace(MBB.getSectionIDNum());
+ auto Res = MBBSectionExceptionSyms.try_emplace(MBB.getSectionID());
if (Res.second)
Res.first->second = createTempSymbol("exception");
return Res.first->second;
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index c1e7f01f0eba5..c1e8355353cfd 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -684,7 +684,7 @@ void DwarfCompileUnit::attachRangesOrLowHighPC(
// the order of blocks will be frozen beyond this point.
do {
if (MBB->sameSection(EndMBB) || MBB->isEndSection()) {
- auto MBBSectionRange = Asm->MBBSectionRanges[MBB->getSectionIDNum()];
+ auto MBBSectionRange = Asm->MBBSectionRanges[MBB->getSectionID()];
List.push_back(
{MBB->sameSection(BeginMBB) ? BeginLabel
: MBBSectionRange.BeginLabel,
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 2addf938c8b63..80cd5ec501f25 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -1713,7 +1713,7 @@ bool DwarfDebug::buildLocationList(SmallVectorImpl<DebugLocEntry> &DebugLoc,
const MCSymbol *EndLabel;
if (std::next(EI) == Entries.end()) {
const MachineBasicBlock &EndMBB = Asm->MF->back();
- EndLabel = Asm->MBBSectionRanges[EndMBB.getSectionIDNum()].EndLabel;
+ EndLabel = Asm->MBBSectionRanges[EndMBB.getSectionID()].EndLabel;
if (EI->isClobber())
EndMI = EI->getInstr();
}
@@ -2064,7 +2064,7 @@ void DwarfDebug::beginInstruction(const MachineInstr *MI) {
bool PrevInstInSameSection =
(!PrevInstBB ||
- PrevInstBB->getSectionIDNum() == MI->getParent()->getSectionIDNum());
+ PrevInstBB->getSectionID() == MI->getParent()->getSectionID());
if (DL == PrevInstLoc && PrevInstInSameSection) {
// If we have an ongoing unspecified location, nothing to do here.
if (!DL)
diff --git a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
index 32239535e4d02..1c603f5988ad1 100644
--- a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
@@ -253,8 +253,8 @@ void EHStreamer::computeCallSiteTable(
// We start a call-site range upon function entry and at the beginning of
// every basic block section.
CallSiteRanges.push_back(
- {Asm->MBBSectionRanges[MBB.getSectionIDNum()].BeginLabel,
- Asm->MBBSectionRanges[MBB.getSectionIDNum()].EndLabel,
+ {Asm->MBBSectionRanges[MBB.getSectionID()].BeginLabel,
+ Asm->MBBSectionRanges[MBB.getSectionID()].EndLabel,
Asm->getMBBExceptionSym(MBB), CallSites.size()});
PreviousIsInvoke = false;
SawPotentiallyThrowing = false;
>From 507b0f6714ec94e286d8c1ef16f871e1b1564426 Mon Sep 17 00:00:00 2001
From: Ahmed Bougacha <ahmed at bougacha.org>
Date: Wed, 3 Jul 2024 19:33:41 -0700
Subject: [PATCH 201/246] [AArch64] Clean up ptrauth-call test a bit. NFC.
The test was recently committed, but was written so long ago that it
still had typed pointers. Switch them to opaque ptrs. While there,
tidy up some formatting, and turn a couple CHECKs into CHECK-NEXT.
---
llvm/test/CodeGen/AArch64/ptrauth-call.ll | 64 +++++++++++------------
1 file changed, 32 insertions(+), 32 deletions(-)
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-call.ll b/llvm/test/CodeGen/AArch64/ptrauth-call.ll
index b1249891b04b4..72e158fdf9916 100644
--- a/llvm/test/CodeGen/AArch64/ptrauth-call.ll
+++ b/llvm/test/CodeGen/AArch64/ptrauth-call.ll
@@ -6,14 +6,14 @@
; RUN: llc -mtriple arm64e-apple-darwin -o - %s -asm-verbose=0 \
; RUN: -global-isel -global-isel-abort=1 -verify-machineinstrs \
-; RUN: | FileCheck %s --check-prefixes=CHECK,DARWIN
+; RUN: | FileCheck %s --check-prefixes=CHECK,DARWIN
; RUN: llc -mtriple aarch64 -mattr=+pauth -o - %s -asm-verbose=0 \
; RUN: -global-isel -global-isel-abort=1 -verify-machineinstrs \
-; RUN: | FileCheck %s --check-prefixes=CHECK,ELF
+; RUN: | FileCheck %s --check-prefixes=CHECK,ELF
-define i32 @test_call_ia_0(i32 ()* %arg0) #0 {
+define i32 @test_call_ia_0(ptr %arg0) #0 {
; DARWIN-LABEL: test_call_ia_0:
; DARWIN-NEXT: stp x29, x30, [sp, #-16]!
; DARWIN-NEXT: blraaz x0
@@ -29,7 +29,7 @@ define i32 @test_call_ia_0(i32 ()* %arg0) #0 {
ret i32 %tmp0
}
-define i32 @test_call_ib_0(i32 ()* %arg0) #0 {
+define i32 @test_call_ib_0(ptr %arg0) #0 {
; DARWIN-LABEL: test_call_ib_0:
; DARWIN-NEXT: stp x29, x30, [sp, #-16]!
; DARWIN-NEXT: blrabz x0
@@ -45,21 +45,21 @@ define i32 @test_call_ib_0(i32 ()* %arg0) #0 {
ret i32 %tmp0
}
-define i32 @test_tailcall_ia_0(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ia_0(ptr %arg0) #0 {
; CHECK-LABEL: test_tailcall_ia_0:
-; CHECK: braaz x0
+; CHECK-NEXT: braaz x0
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 0, i64 0) ]
ret i32 %tmp0
}
-define i32 @test_tailcall_ib_0(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ib_0(ptr %arg0) #0 {
; CHECK-LABEL: test_tailcall_ib_0:
-; CHECK: brabz x0
+; CHECK-NEXT: brabz x0
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 1, i64 0) ]
ret i32 %tmp0
}
-define i32 @test_call_ia_imm(i32 ()* %arg0) #0 {
+define i32 @test_call_ia_imm(ptr %arg0) #0 {
; DARWIN-LABEL: test_call_ia_imm:
; DARWIN-NEXT: stp x29, x30, [sp, #-16]!
; DARWIN-NEXT: mov x17, #42
@@ -77,7 +77,7 @@ define i32 @test_call_ia_imm(i32 ()* %arg0) #0 {
ret i32 %tmp0
}
-define i32 @test_call_ib_imm(i32 ()* %arg0) #0 {
+define i32 @test_call_ib_imm(ptr %arg0) #0 {
; DARWIN-LABEL: test_call_ib_imm:
; DARWIN-NEXT: stp x29, x30, [sp, #-16]!
; DARWIN-NEXT: mov x17, #42
@@ -95,7 +95,7 @@ define i32 @test_call_ib_imm(i32 ()* %arg0) #0 {
ret i32 %tmp0
}
-define i32 @test_tailcall_ia_imm(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ia_imm(ptr %arg0) #0 {
; CHECK-LABEL: test_tailcall_ia_imm:
; CHECK-NEXT: mov x16, #42
; CHECK-NEXT: braa x0, x16
@@ -103,7 +103,7 @@ define i32 @test_tailcall_ia_imm(i32 ()* %arg0) #0 {
ret i32 %tmp0
}
-define i32 @test_tailcall_ib_imm(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ib_imm(ptr %arg0) #0 {
; CHECK-LABEL: test_tailcall_ib_imm:
; CHECK-NEXT: mov x16, #42
; CHECK-NEXT: brab x0, x16
@@ -111,7 +111,7 @@ define i32 @test_tailcall_ib_imm(i32 ()* %arg0) #0 {
ret i32 %tmp0
}
-define i32 @test_call_ia_var(i32 ()* %arg0, i64* %arg1) #0 {
+define i32 @test_call_ia_var(ptr %arg0, ptr %arg1) #0 {
; DARWIN-LABEL: test_call_ia_var:
; DARWIN-NEXT: stp x29, x30, [sp, #-16]!
; DARWIN-NEXT: ldr x8, [x1]
@@ -125,12 +125,12 @@ define i32 @test_call_ia_var(i32 ()* %arg0, i64* %arg1) #0 {
; ELF-NEXT: blraa x0, x8
; ELF-NEXT: ldr x30, [sp], #16
; ELF-NEXT: ret
- %tmp0 = load i64, i64* %arg1
+ %tmp0 = load i64, ptr %arg1
%tmp1 = call i32 %arg0() [ "ptrauth"(i32 0, i64 %tmp0) ]
ret i32 %tmp1
}
-define i32 @test_call_ib_var(i32 ()* %arg0, i64* %arg1) #0 {
+define i32 @test_call_ib_var(ptr %arg0, ptr %arg1) #0 {
; DARWIN-LABEL: test_call_ib_var:
; DARWIN-NEXT: stp x29, x30, [sp, #-16]!
; DARWIN-NEXT: ldr x8, [x1]
@@ -144,30 +144,30 @@ define i32 @test_call_ib_var(i32 ()* %arg0, i64* %arg1) #0 {
; ELF-NEXT: blrab x0, x8
; ELF-NEXT: ldr x30, [sp], #16
; ELF-NEXT: ret
- %tmp0 = load i64, i64* %arg1
+ %tmp0 = load i64, ptr %arg1
%tmp1 = call i32 %arg0() [ "ptrauth"(i32 1, i64 %tmp0) ]
ret i32 %tmp1
}
-define i32 @test_tailcall_ia_var(i32 ()* %arg0, i64* %arg1) #0 {
+define i32 @test_tailcall_ia_var(ptr %arg0, ptr %arg1) #0 {
; CHECK-LABEL: test_tailcall_ia_var:
; CHECK: ldr x1, [x1]
; CHECK: braa x0, x1
- %tmp0 = load i64, i64* %arg1
+ %tmp0 = load i64, ptr %arg1
%tmp1 = tail call i32 %arg0() [ "ptrauth"(i32 0, i64 %tmp0) ]
ret i32 %tmp1
}
-define i32 @test_tailcall_ib_var(i32 ()* %arg0, i64* %arg1) #0 {
+define i32 @test_tailcall_ib_var(ptr %arg0, ptr %arg1) #0 {
; CHECK-LABEL: test_tailcall_ib_var:
; CHECK: ldr x1, [x1]
; CHECK: brab x0, x1
- %tmp0 = load i64, i64* %arg1
+ %tmp0 = load i64, ptr %arg1
%tmp1 = tail call i32 %arg0() [ "ptrauth"(i32 1, i64 %tmp0) ]
ret i32 %tmp1
}
-define i32 @test_call_ia_arg(i32 ()* %arg0, i64 %arg1) #0 {
+define i32 @test_call_ia_arg(ptr %arg0, i64 %arg1) #0 {
; DARWIN-LABEL: test_call_ia_arg:
; DARWIN-NEXT: stp x29, x30, [sp, #-16]!
; DARWIN-NEXT: blraa x0, x1
@@ -183,7 +183,7 @@ define i32 @test_call_ia_arg(i32 ()* %arg0, i64 %arg1) #0 {
ret i32 %tmp0
}
-define i32 @test_call_ib_arg(i32 ()* %arg0, i64 %arg1) #0 {
+define i32 @test_call_ib_arg(ptr %arg0, i64 %arg1) #0 {
; DARWIN-LABEL: test_call_ib_arg:
; DARWIN-NEXT: stp x29, x30, [sp, #-16]!
; DARWIN-NEXT: blrab x0, x1
@@ -199,21 +199,21 @@ define i32 @test_call_ib_arg(i32 ()* %arg0, i64 %arg1) #0 {
ret i32 %tmp0
}
-define i32 @test_tailcall_ia_arg(i32 ()* %arg0, i64 %arg1) #0 {
+define i32 @test_tailcall_ia_arg(ptr %arg0, i64 %arg1) #0 {
; CHECK-LABEL: test_tailcall_ia_arg:
; CHECK: braa x0, x1
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 0, i64 %arg1) ]
ret i32 %tmp0
}
-define i32 @test_tailcall_ib_arg(i32 ()* %arg0, i64 %arg1) #0 {
+define i32 @test_tailcall_ib_arg(ptr %arg0, i64 %arg1) #0 {
; CHECK-LABEL: test_tailcall_ib_arg:
; CHECK: brab x0, x1
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 1, i64 %arg1) ]
ret i32 %tmp0
}
-define i32 @test_call_ia_arg_ind(i32 ()** %arg0, i64 %arg1) #0 {
+define i32 @test_call_ia_arg_ind(ptr %arg0, i64 %arg1) #0 {
; DARWIN-LABEL: test_call_ia_arg_ind:
; DARWIN-NEXT: stp x29, x30, [sp, #-16]!
; DARWIN-NEXT: ldr x8, [x0]
@@ -227,12 +227,12 @@ define i32 @test_call_ia_arg_ind(i32 ()** %arg0, i64 %arg1) #0 {
; ELF-NEXT: blraa x8, x1
; ELF-NEXT: ldr x30, [sp], #16
; ELF-NEXT: ret
- %tmp0 = load i32 ()*, i32 ()** %arg0
+ %tmp0 = load ptr, ptr %arg0
%tmp1 = call i32 %tmp0() [ "ptrauth"(i32 0, i64 %arg1) ]
ret i32 %tmp1
}
-define i32 @test_call_ib_arg_ind(i32 ()** %arg0, i64 %arg1) #0 {
+define i32 @test_call_ib_arg_ind(ptr %arg0, i64 %arg1) #0 {
; DARWIN-LABEL: test_call_ib_arg_ind:
; DARWIN-NEXT: stp x29, x30, [sp, #-16]!
; DARWIN-NEXT: ldr x8, [x0]
@@ -246,25 +246,25 @@ define i32 @test_call_ib_arg_ind(i32 ()** %arg0, i64 %arg1) #0 {
; ELF-NEXT: blrab x8, x1
; ELF-NEXT: ldr x30, [sp], #16
; ELF-NEXT: ret
- %tmp0 = load i32 ()*, i32 ()** %arg0
+ %tmp0 = load ptr, ptr %arg0
%tmp1 = call i32 %tmp0() [ "ptrauth"(i32 1, i64 %arg1) ]
ret i32 %tmp1
}
-define i32 @test_tailcall_ia_arg_ind(i32 ()** %arg0, i64 %arg1) #0 {
+define i32 @test_tailcall_ia_arg_ind(ptr %arg0, i64 %arg1) #0 {
; CHECK-LABEL: test_tailcall_ia_arg_ind:
; CHECK: ldr x0, [x0]
; CHECK: braa x0, x1
- %tmp0 = load i32 ()*, i32 ()** %arg0
+ %tmp0 = load ptr, ptr %arg0
%tmp1 = tail call i32 %tmp0() [ "ptrauth"(i32 0, i64 %arg1) ]
ret i32 %tmp1
}
-define i32 @test_tailcall_ib_arg_ind(i32 ()** %arg0, i64 %arg1) #0 {
+define i32 @test_tailcall_ib_arg_ind(ptr %arg0, i64 %arg1) #0 {
; CHECK-LABEL: test_tailcall_ib_arg_ind:
; CHECK: ldr x0, [x0]
; CHECK: brab x0, x1
- %tmp0 = load i32 ()*, i32 ()** %arg0
+ %tmp0 = load ptr, ptr %arg0
%tmp1 = tail call i32 %tmp0() [ "ptrauth"(i32 1, i64 %arg1) ]
ret i32 %tmp1
}
>From ac2013560505f7c85a9d9061ea705124d6681719 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Thu, 4 Jul 2024 11:34:25 +0800
Subject: [PATCH 202/246] [RISCV] Rematerialize vid.v (#97520)
This adds initial support for rematerializing vector instructions,
starting with vid.v since it's simple and has the least number of
operands. It has one passthru operand which we need to check is
undefined. It also has an AVL operand, but it's fine to rematerialize
with it because it's scalar and register allocation is split between
vector and scalar.
RISCVInsertVSETVLI can still happen before vector regalloc if
-riscv-vsetvl-after-rvv-regalloc is false, so this makes sure that we
only rematerialize after regalloc by checking for the implicit uses that
are added.
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 12 ++
llvm/lib/Target/RISCV/RISCVInstrInfo.h | 2 +
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 1 +
llvm/test/CodeGen/RISCV/rvv/remat.ll | 111 ++++++++++++++++++
4 files changed, 126 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/remat.ll
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 67e2f3f5d6373..3e3292ccc148a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -166,6 +166,18 @@ Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
return 0;
}
+bool RISCVInstrInfo::isReallyTriviallyReMaterializable(
+ const MachineInstr &MI) const {
+ if (RISCV::getRVVMCOpcode(MI.getOpcode()) == RISCV::VID_V &&
+ MI.getOperand(1).isUndef() &&
+ /* After RISCVInsertVSETVLI most pseudos will have implicit uses on vl and
+ vtype. Make sure we only rematerialize before RISCVInsertVSETVLI
+ i.e. -riscv-vsetvl-after-rvv-regalloc=true */
+ !MI.hasRegisterImplicitUseOperand(RISCV::VTYPE))
+ return true;
+ return TargetInstrInfo::isReallyTriviallyReMaterializable(MI);
+}
+
static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
unsigned NumRegs) {
return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index e069717aaef23..f0c0953a3e56a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -76,6 +76,8 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex,
unsigned &MemBytes) const override;
+ bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override;
+
void copyPhysRegVector(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
MCRegister DstReg, MCRegister SrcReg, bool KillSrc,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 45a57d1170814..42d6b03968d74 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -6629,6 +6629,7 @@ defm PseudoVIOTA_M: VPseudoVIOTA_M;
//===----------------------------------------------------------------------===//
// 15.9. Vector Element Index Instruction
//===----------------------------------------------------------------------===//
+let isReMaterializable = 1 in
defm PseudoVID : VPseudoVID_V;
} // Predicates = [HasVInstructions]
diff --git a/llvm/test/CodeGen/RISCV/rvv/remat.ll b/llvm/test/CodeGen/RISCV/rvv/remat.ll
new file mode 100644
index 0000000000000..d7a8a13dd3664
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/remat.ll
@@ -0,0 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,POSTRA
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v -riscv-vsetvl-after-rvv-regalloc=false -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,PRERA
+
+define void @vid(ptr %p) {
+; POSTRA-LABEL: vid:
+; POSTRA: # %bb.0:
+; POSTRA-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; POSTRA-NEXT: vid.v v8
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: vl8re64.v v16, (a0)
+; POSTRA-NEXT: vl8re64.v v24, (a0)
+; POSTRA-NEXT: vl8re64.v v0, (a0)
+; POSTRA-NEXT: vl8re64.v v8, (a0)
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: vs8r.v v0, (a0)
+; POSTRA-NEXT: vs8r.v v24, (a0)
+; POSTRA-NEXT: vs8r.v v16, (a0)
+; POSTRA-NEXT: vid.v v8
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: ret
+;
+; PRERA-LABEL: vid:
+; PRERA: # %bb.0:
+; PRERA-NEXT: addi sp, sp, -16
+; PRERA-NEXT: .cfi_def_cfa_offset 16
+; PRERA-NEXT: csrr a1, vlenb
+; PRERA-NEXT: slli a1, a1, 3
+; PRERA-NEXT: sub sp, sp, a1
+; PRERA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; PRERA-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; PRERA-NEXT: vid.v v8
+; PRERA-NEXT: vs8r.v v8, (a0)
+; PRERA-NEXT: vl8re64.v v16, (a0)
+; PRERA-NEXT: addi a1, sp, 16
+; PRERA-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; PRERA-NEXT: vl8re64.v v24, (a0)
+; PRERA-NEXT: vl8re64.v v0, (a0)
+; PRERA-NEXT: vl8re64.v v16, (a0)
+; PRERA-NEXT: vs8r.v v16, (a0)
+; PRERA-NEXT: vs8r.v v0, (a0)
+; PRERA-NEXT: vs8r.v v24, (a0)
+; PRERA-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; PRERA-NEXT: vs8r.v v16, (a0)
+; PRERA-NEXT: vs8r.v v8, (a0)
+; PRERA-NEXT: csrr a0, vlenb
+; PRERA-NEXT: slli a0, a0, 3
+; PRERA-NEXT: add sp, sp, a0
+; PRERA-NEXT: addi sp, sp, 16
+; PRERA-NEXT: ret
+ %vid = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(<vscale x 8 x i64> poison, i64 -1)
+ store volatile <vscale x 8 x i64> %vid, ptr %p
+
+ %a = load volatile <vscale x 8 x i64>, ptr %p
+ %b = load volatile <vscale x 8 x i64>, ptr %p
+ %c = load volatile <vscale x 8 x i64>, ptr %p
+ %d = load volatile <vscale x 8 x i64>, ptr %p
+ store volatile <vscale x 8 x i64> %d, ptr %p
+ store volatile <vscale x 8 x i64> %c, ptr %p
+ store volatile <vscale x 8 x i64> %b, ptr %p
+ store volatile <vscale x 8 x i64> %a, ptr %p
+
+ store volatile <vscale x 8 x i64> %vid, ptr %p
+ ret void
+}
+
+
+define void @vid_passthru(ptr %p, <vscale x 8 x i64> %v) {
+; CHECK-LABEL: vid_passthru:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetivli zero, 1, e64, m8, tu, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vl8re64.v v16, (a0)
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v0, (a0)
+; CHECK-NEXT: vl8re64.v v16, (a0)
+; CHECK-NEXT: vs8r.v v16, (a0)
+; CHECK-NEXT: vs8r.v v0, (a0)
+; CHECK-NEXT: vs8r.v v24, (a0)
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vs8r.v v16, (a0)
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %vid = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(<vscale x 8 x i64> %v, i64 1)
+ store volatile <vscale x 8 x i64> %vid, ptr %p
+
+ %a = load volatile <vscale x 8 x i64>, ptr %p
+ %b = load volatile <vscale x 8 x i64>, ptr %p
+ %c = load volatile <vscale x 8 x i64>, ptr %p
+ %d = load volatile <vscale x 8 x i64>, ptr %p
+ store volatile <vscale x 8 x i64> %d, ptr %p
+ store volatile <vscale x 8 x i64> %c, ptr %p
+ store volatile <vscale x 8 x i64> %b, ptr %p
+ store volatile <vscale x 8 x i64> %a, ptr %p
+
+ store volatile <vscale x 8 x i64> %vid, ptr %p
+ ret void
+}
>From 32f7672acc92d6b3d9b64cfeb9b25c31ae542337 Mon Sep 17 00:00:00 2001
From: jyu2-git <jennifer.yu at intel.com>
Date: Wed, 3 Jul 2024 20:56:53 -0700
Subject: [PATCH 203/246] [Clang][OpenMP] This is addition fix for #92210.
(#94802)
Fix another runtime problem when explicit map both pointer and pointee
in target data region.
In #92210, problem is only addressed in target region, but missing for
target data region.
The change just passing AreBothBasePtrAndPteeMapped in
generateInfoForComponentList when processing target data.
---------
Co-authored-by: Alexey Bataev <a.bataev at gmx.com>
---
clang/lib/CodeGen/CGOpenMPRuntime.cpp | 18 +++++++++++++-
.../target_data_use_device_addr_codegen.cpp | 12 ++++------
...arget_map_both_pointer_pointee_codegen.cpp | 24 +++++++++++++++++++
.../test/mapping/map_both_pointer_pointee.c | 20 ++++++++++++++++
4 files changed, 66 insertions(+), 8 deletions(-)
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index b47b521edd32c..3febeed7b72d9 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -8034,6 +8034,21 @@ class MappableExprsHandler {
MapCombinedInfoTy StructBaseCurInfo;
const Decl *D = Data.first;
const ValueDecl *VD = cast_or_null<ValueDecl>(D);
+ bool HasMapBasePtr = false;
+ bool HasMapArraySec = false;
+ if (VD && VD->getType()->isAnyPointerType()) {
+ for (const auto &M : Data.second) {
+ HasMapBasePtr = any_of(M, [](const MapInfo &L) {
+ return isa_and_present<DeclRefExpr>(L.VarRef);
+ });
+ HasMapArraySec = any_of(M, [](const MapInfo &L) {
+ return isa_and_present<ArraySectionExpr, ArraySubscriptExpr>(
+ L.VarRef);
+ });
+ if (HasMapBasePtr && HasMapArraySec)
+ break;
+ }
+ }
for (const auto &M : Data.second) {
for (const MapInfo &L : M) {
assert(!L.Components.empty() &&
@@ -8050,7 +8065,8 @@ class MappableExprsHandler {
CurInfo, StructBaseCurInfo, PartialStruct,
/*IsFirstComponentList=*/false, L.IsImplicit,
/*GenerateAllInfoForClauses*/ true, L.Mapper, L.ForDeviceAddr, VD,
- L.VarRef);
+ L.VarRef, /*OverlappedElements*/ std::nullopt,
+ HasMapBasePtr && HasMapArraySec);
// If this entry relates to a device pointer, set the relevant
// declaration and add the 'return pointer' flag.
diff --git a/clang/test/OpenMP/target_data_use_device_addr_codegen.cpp b/clang/test/OpenMP/target_data_use_device_addr_codegen.cpp
index ae0653d0585d4..7c4b96971ae70 100644
--- a/clang/test/OpenMP/target_data_use_device_addr_codegen.cpp
+++ b/clang/test/OpenMP/target_data_use_device_addr_codegen.cpp
@@ -13,7 +13,7 @@
// CHECK-DAG: [[SIZES1:@.+]] = private unnamed_addr constant [6 x i64] [i64 4, i64 16, i64 4, i64 4, i64 0, i64 4]
// 64 = 0x40 = OMP_MAP_RETURN_PARAM
-// CHECK-DAG: [[MAPTYPES1:@.+]] = private unnamed_addr constant [6 x i64] [i64 67, i64 67, i64 3, i64 67, i64 67, i64 67]
+// CHECK-DAG: [[MAPTYPES1:@.+]] = private unnamed_addr constant [6 x i64] [i64 67, i64 115, i64 51, i64 67, i64 67, i64 67]
// CHECK-DAG: [[SIZES2:@.+]] = private unnamed_addr constant [6 x i64] [i64 0, i64 4, i64 16, i64 4, i64 4, i64 0]
// 0 = OMP_MAP_NONE
// 281474976710720 = 0x1000000000040 = OMP_MAP_MEMBER_OF | OMP_MAP_RETURN_PARAM
@@ -54,11 +54,9 @@ int main() {
// CHECK: [[SIZES:%.+]] = alloca [6 x i64],
// CHECK: [[VLA_ADDR:%.+]] = alloca float, i64 %{{.+}},
// CHECK: [[PTR:%.+]] = load ptr, ptr [[PTR_ADDR]],
-// CHECK-NEXT: [[P4:%.+]] = load ptr, ptr [[PTR_ADDR]], align 8
-// CHECK-NEXT: [[ARR_IDX:%.+]] = getelementptr inbounds float, ptr [[P4]], i64 3
+// CHECK-NEXT: [[ARR_IDX:%.+]] = getelementptr inbounds float, ptr [[PTR]], i64 3
// CHECK: [[P5:%.+]] = load ptr, ptr [[PTR_ADDR]], align 8
-// CHECK-NEXT: [[P6:%.+]] = load ptr, ptr [[PTR_ADDR]], align 8
-// CHECK-NEXT: [[ARR_IDX1:%.+]] = getelementptr inbounds float, ptr [[P6]], i64 0
+// CHECK-NEXT: [[ARR_IDX1:%.+]] = getelementptr inbounds float, ptr [[P5]], i64 0
// CHECK: [[P7:%.+]] = load ptr, ptr [[REF_ADDR]],
// CHECK-NEXT: [[REF:%.+]] = load ptr, ptr [[REF_ADDR]],
// CHECK-NEXT: [[ARR_IDX2:%.+]] = getelementptr inbounds [4 x float], ptr [[ARR_ADDR]], i64 0, i64 0
@@ -70,11 +68,11 @@ int main() {
// CHECK: [[PTR0:%.+]] = getelementptr inbounds [6 x ptr], ptr [[PTRS]], i32 0, i32 0
// CHECK: store ptr [[A_ADDR]], ptr [[PTR0]],
// CHECK: [[BPTR1:%.+]] = getelementptr inbounds [6 x ptr], ptr [[BPTRS]], i32 0, i32 1
-// CHECK: store ptr [[PTR]], ptr [[BPTR1]],
+// CHECK: store ptr [[PTR_ADDR]], ptr [[BPTR1]],
// CHECK: [[PTR1:%.+]] = getelementptr inbounds [6 x ptr], ptr [[PTRS]], i32 0, i32 1
// CHECK: store ptr [[ARR_IDX]], ptr [[PTR1]],
// CHECK: [[BPTR2:%.+]] = getelementptr inbounds [6 x ptr], ptr [[BPTRS]], i32 0, i32 2
-// CHECK: store ptr [[P5]], ptr [[BPTR2]],
+// CHECK: store ptr [[PTR_ADDR]], ptr [[BPTR2]],
// CHECK: [[PTR2:%.+]] = getelementptr inbounds [6 x ptr], ptr [[PTRS]], i32 0, i32 2
// CHECK: store ptr [[ARR_IDX1]], ptr [[PTR2]],
// CHECK: [[BPTR3:%.+]] = getelementptr inbounds [6 x ptr], ptr [[BPTRS]], i32 0, i32 3
diff --git a/clang/test/OpenMP/target_map_both_pointer_pointee_codegen.cpp b/clang/test/OpenMP/target_map_both_pointer_pointee_codegen.cpp
index e2c27f37f5b9d..1562aaa2760f2 100644
--- a/clang/test/OpenMP/target_map_both_pointer_pointee_codegen.cpp
+++ b/clang/test/OpenMP/target_map_both_pointer_pointee_codegen.cpp
@@ -20,6 +20,10 @@ void foo() {
{
ptr[2] = 8;
}
+ #pragma omp target data map(ptr, ptr[2])
+ {
+ ptr[2] = 9;
+ }
}
#endif
// CHECK-LABEL: define {{[^@]+}}@_Z3foov
@@ -34,6 +38,9 @@ void foo() {
// CHECK-NEXT: [[DOTOFFLOAD_PTRS3:%.*]] = alloca [1 x ptr], align 8
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS4:%.*]] = alloca [1 x ptr], align 8
// CHECK-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS9:%.*]] = alloca [1 x ptr], align 8
+// CHECK-NEXT: [[DOTOFFLOAD_PTRS10:%.*]] = alloca [1 x ptr], align 8
+// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS11:%.*]] = alloca [1 x ptr], align 8
// CHECK-NEXT: [[CALL:%.*]] = call noalias noundef ptr @_Z6malloci(i32 noundef signext 12) #[[ATTR3:[0-9]+]]
// CHECK-NEXT: store ptr [[CALL]], ptr [[PTR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR]], align 8
@@ -124,6 +131,23 @@ void foo() {
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3foov_l19(ptr [[TMP22]]) #[[ATTR3]]
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT7]]
// CHECK: omp_offload.cont7:
+// CHECK-NEXT: [[TMP44:%.*]] = load ptr, ptr [[PTR]], align 8
+// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[TMP44]], i64 2
+// CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
+// CHECK-NEXT: store ptr [[PTR]], ptr [[TMP45]], align 8
+// CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
+// CHECK-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP46]], align 8
+// CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS11]], i64 0, i64 0
+// CHECK-NEXT: store ptr null, ptr [[TMP47]], align 8
+// CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
+// CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
+// CHECK-NEXT: call void @__tgt_target_data_begin_mapper(ptr @[[GLOB1]], i64 -1, i32 1, ptr [[TMP48]], ptr [[TMP49]], ptr @.offload_sizes.3, ptr @.offload_maptypes.4, ptr null, ptr null)
+// CHECK-NEXT: [[TMP50:%.*]] = load ptr, ptr [[PTR]], align 8
+// CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, ptr [[TMP50]], i64 2
+// CHECK-NEXT: store i32 9, ptr [[ARRAYIDX12]], align 4
+// CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
+// CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
+// CHECK-NEXT: call void @__tgt_target_data_end_mapper(ptr @[[GLOB1]], i64 -1, i32 1, ptr [[TMP51]], ptr [[TMP52]], ptr @.offload_sizes.3, ptr @.offload_maptypes.4, ptr null, ptr null)
// CHECK-NEXT: ret void
//
//
diff --git a/offload/test/mapping/map_both_pointer_pointee.c b/offload/test/mapping/map_both_pointer_pointee.c
index 4b724823e7a40..65d7d3d4d2bff 100644
--- a/offload/test/mapping/map_both_pointer_pointee.c
+++ b/offload/test/mapping/map_both_pointer_pointee.c
@@ -10,6 +10,7 @@
#pragma omp declare target
int *ptr1;
#pragma omp end declare target
+int a[10];
#include <stdio.h>
#include <stdlib.h>
@@ -38,5 +39,24 @@ int main() {
// CHECK: 6
printf(" %d \n", ptr2[1]);
free(ptr2);
+
+ a[1] = 111;
+ int *p = &a[0];
+ // CHECK: 111
+ printf("%d %p %p\n", p[1], p, &p); // 111 hst_p1 hst_p2
+#pragma omp target data map(to : p[1 : 3]) map(p)
+#pragma omp target data use_device_addr(p)
+ {
+#pragma omp target has_device_addr(p)
+ {
+ // CHECK: 111
+ printf("%d %p %p\n", p[1], p, &p); // 111 dev_p1 dev_p2
+ p[1] = 222;
+ // CHECK: 222
+ printf("%d %p %p\n", p[1], p, &p); // 222 dev_p1 dev_p2
+ }
+ }
+ // CHECK: 111
+ printf("%d %p %p\n", p[1], p, &p); // 111 hst_p1 hst_p2
return 0;
}
>From 8ce1aed55f3dbb71406dc6feaed3f162ac183d21 Mon Sep 17 00:00:00 2001
From: Slava Zakharin <szakharin at nvidia.com>
Date: Wed, 3 Jul 2024 21:18:56 -0700
Subject: [PATCH 204/246] [flang] Lower MATMUL to type specific runtime calls.
(#97547)
Lower MATMUL to the new runtime entries added in #97406.
---
flang/include/flang/Optimizer/Support/Utils.h | 61 +++++++-
.../flang/Runtime/matmul-instances.inc | 23 +--
.../include/flang/Runtime/matmul-transpose.h | 2 +
flang/include/flang/Runtime/matmul.h | 2 +
flang/lib/Optimizer/Builder/IntrinsicCall.cpp | 15 +-
.../Builder/Runtime/Transformational.cpp | 96 ++++++++++++-
flang/runtime/matmul-transpose.cpp | 53 +------
flang/runtime/matmul.cpp | 53 +------
flang/test/HLFIR/matmul-lowering.fir | 6 +-
flang/test/HLFIR/mul_transpose.f90 | 6 +-
flang/test/Lower/Intrinsics/matmul.f90 | 4 +-
.../Builder/Runtime/RuntimeCallTestBase.h | 9 ++
.../Builder/Runtime/TransformationalTest.cpp | 42 ++++--
flang/unittests/Runtime/Matmul.cpp | 119 ----------------
flang/unittests/Runtime/MatmulTranspose.cpp | 131 ------------------
15 files changed, 230 insertions(+), 392 deletions(-)
diff --git a/flang/include/flang/Optimizer/Support/Utils.h b/flang/include/flang/Optimizer/Support/Utils.h
index ae95a26be1d86..02bec4164fca0 100644
--- a/flang/include/flang/Optimizer/Support/Utils.h
+++ b/flang/include/flang/Optimizer/Support/Utils.h
@@ -84,9 +84,10 @@ inline std::string mlirTypeToString(mlir::Type type) {
return result;
}
-inline std::string numericMlirTypeToFortran(fir::FirOpBuilder &builder,
- mlir::Type type, mlir::Location loc,
- const llvm::Twine &name) {
+inline std::string mlirTypeToIntrinsicFortran(fir::FirOpBuilder &builder,
+ mlir::Type type,
+ mlir::Location loc,
+ const llvm::Twine &name) {
if (type.isF16())
return "REAL(KIND=2)";
else if (type.isBF16())
@@ -123,6 +124,14 @@ inline std::string numericMlirTypeToFortran(fir::FirOpBuilder &builder,
return "COMPLEX(KIND=10)";
else if (type == fir::ComplexType::get(builder.getContext(), 16))
return "COMPLEX(KIND=16)";
+ else if (type == fir::LogicalType::get(builder.getContext(), 1))
+ return "LOGICAL(KIND=1)";
+ else if (type == fir::LogicalType::get(builder.getContext(), 2))
+ return "LOGICAL(KIND=2)";
+ else if (type == fir::LogicalType::get(builder.getContext(), 4))
+ return "LOGICAL(KIND=4)";
+ else if (type == fir::LogicalType::get(builder.getContext(), 8))
+ return "LOGICAL(KIND=8)";
else
fir::emitFatalError(loc, "unsupported type in " + name + ": " +
fir::mlirTypeToString(type));
@@ -133,10 +142,54 @@ inline void intrinsicTypeTODO(fir::FirOpBuilder &builder, mlir::Type type,
const llvm::Twine &intrinsicName) {
TODO(loc,
"intrinsic: " +
- fir::numericMlirTypeToFortran(builder, type, loc, intrinsicName) +
+ fir::mlirTypeToIntrinsicFortran(builder, type, loc, intrinsicName) +
" in " + intrinsicName);
}
+inline void intrinsicTypeTODO2(fir::FirOpBuilder &builder, mlir::Type type1,
+ mlir::Type type2, mlir::Location loc,
+ const llvm::Twine &intrinsicName) {
+ TODO(loc,
+ "intrinsic: {" +
+ fir::mlirTypeToIntrinsicFortran(builder, type2, loc, intrinsicName) +
+ ", " +
+ fir::mlirTypeToIntrinsicFortran(builder, type2, loc, intrinsicName) +
+ "} in " + intrinsicName);
+}
+
+inline std::pair<Fortran::common::TypeCategory, KindMapping::KindTy>
+mlirTypeToCategoryKind(mlir::Location loc, mlir::Type type) {
+ if (type.isF16())
+ return {Fortran::common::TypeCategory::Real, 2};
+ else if (type.isBF16())
+ return {Fortran::common::TypeCategory::Real, 3};
+ else if (type.isF32())
+ return {Fortran::common::TypeCategory::Real, 4};
+ else if (type.isF64())
+ return {Fortran::common::TypeCategory::Real, 8};
+ else if (type.isF80())
+ return {Fortran::common::TypeCategory::Real, 10};
+ else if (type.isF128())
+ return {Fortran::common::TypeCategory::Real, 16};
+ else if (type.isInteger(8))
+ return {Fortran::common::TypeCategory::Integer, 1};
+ else if (type.isInteger(16))
+ return {Fortran::common::TypeCategory::Integer, 2};
+ else if (type.isInteger(32))
+ return {Fortran::common::TypeCategory::Integer, 4};
+ else if (type.isInteger(64))
+ return {Fortran::common::TypeCategory::Integer, 8};
+ else if (type.isInteger(128))
+ return {Fortran::common::TypeCategory::Integer, 16};
+ else if (auto complexType = mlir::dyn_cast<fir::ComplexType>(type))
+ return {Fortran::common::TypeCategory::Complex, complexType.getFKind()};
+ else if (auto logicalType = mlir::dyn_cast<fir::LogicalType>(type))
+ return {Fortran::common::TypeCategory::Logical, logicalType.getFKind()};
+ else
+ fir::emitFatalError(loc,
+ "unsupported type: " + fir::mlirTypeToString(type));
+}
+
/// Find the fir.type_info that was created for this \p recordType in \p module,
/// if any. \p symbolTable can be provided to speed-up the lookup. This tool
/// will match record type even if they have been "altered" in type conversion
diff --git a/flang/include/flang/Runtime/matmul-instances.inc b/flang/include/flang/Runtime/matmul-instances.inc
index 970b03339cd5e..32c6ab06d2521 100644
--- a/flang/include/flang/Runtime/matmul-instances.inc
+++ b/flang/include/flang/Runtime/matmul-instances.inc
@@ -17,6 +17,10 @@
#error "Define MATMUL_DIRECT_INSTANCE before including this file"
#endif
+#ifndef MATMUL_FORCE_ALL_TYPES
+#error "Define MATMUL_FORCE_ALL_TYPES to 0 or 1 before including this file"
+#endif
+
// clang-format off
#define FOREACH_MATMUL_TYPE_PAIR(macro) \
@@ -88,7 +92,7 @@
FOREACH_MATMUL_TYPE_PAIR(MATMUL_INSTANCE)
FOREACH_MATMUL_TYPE_PAIR(MATMUL_DIRECT_INSTANCE)
-#if defined __SIZEOF_INT128__ && !AVOID_NATIVE_UINT128_T
+#if MATMUL_FORCE_ALL_TYPES || (defined __SIZEOF_INT128__ && !AVOID_NATIVE_UINT128_T)
#define FOREACH_MATMUL_TYPE_PAIR_WITH_INT16(macro) \
macro(Integer, 16, Integer, 1) \
macro(Integer, 16, Integer, 2) \
@@ -107,7 +111,7 @@ FOREACH_MATMUL_TYPE_PAIR(MATMUL_DIRECT_INSTANCE)
FOREACH_MATMUL_TYPE_PAIR_WITH_INT16(MATMUL_INSTANCE)
FOREACH_MATMUL_TYPE_PAIR_WITH_INT16(MATMUL_DIRECT_INSTANCE)
-#if LDBL_MANT_DIG == 64
+#if MATMUL_FORCE_ALL_TYPES || LDBL_MANT_DIG == 64
MATMUL_INSTANCE(Integer, 16, Real, 10)
MATMUL_INSTANCE(Integer, 16, Complex, 10)
MATMUL_INSTANCE(Real, 10, Integer, 16)
@@ -117,7 +121,7 @@ MATMUL_DIRECT_INSTANCE(Integer, 16, Complex, 10)
MATMUL_DIRECT_INSTANCE(Real, 10, Integer, 16)
MATMUL_DIRECT_INSTANCE(Complex, 10, Integer, 16)
#endif
-#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+#if MATMUL_FORCE_ALL_TYPES || (LDBL_MANT_DIG == 113 || HAS_FLOAT128)
MATMUL_INSTANCE(Integer, 16, Real, 16)
MATMUL_INSTANCE(Integer, 16, Complex, 16)
MATMUL_INSTANCE(Real, 16, Integer, 16)
@@ -127,9 +131,9 @@ MATMUL_DIRECT_INSTANCE(Integer, 16, Complex, 16)
MATMUL_DIRECT_INSTANCE(Real, 16, Integer, 16)
MATMUL_DIRECT_INSTANCE(Complex, 16, Integer, 16)
#endif
-#endif // defined __SIZEOF_INT128__ && !AVOID_NATIVE_UINT128_T
+#endif // MATMUL_FORCE_ALL_TYPES || (defined __SIZEOF_INT128__ && !AVOID_NATIVE_UINT128_T)
-#if LDBL_MANT_DIG == 64
+#if MATMUL_FORCE_ALL_TYPES || LDBL_MANT_DIG == 64
#define FOREACH_MATMUL_TYPE_PAIR_WITH_REAL10(macro) \
macro(Integer, 1, Real, 10) \
macro(Integer, 1, Complex, 10) \
@@ -171,7 +175,7 @@ MATMUL_DIRECT_INSTANCE(Complex, 16, Integer, 16)
FOREACH_MATMUL_TYPE_PAIR_WITH_REAL10(MATMUL_INSTANCE)
FOREACH_MATMUL_TYPE_PAIR_WITH_REAL10(MATMUL_DIRECT_INSTANCE)
-#if HAS_FLOAT128
+#if MATMUL_FORCE_ALL_TYPES || HAS_FLOAT128
MATMUL_INSTANCE(Real, 10, Real, 16)
MATMUL_INSTANCE(Real, 10, Complex, 16)
MATMUL_INSTANCE(Real, 16, Real, 10)
@@ -189,9 +193,9 @@ MATMUL_DIRECT_INSTANCE(Complex, 10, Complex, 16)
MATMUL_DIRECT_INSTANCE(Complex, 16, Real, 10)
MATMUL_DIRECT_INSTANCE(Complex, 16, Complex, 10)
#endif
-#endif // LDBL_MANT_DIG == 64
+#endif // MATMUL_FORCE_ALL_TYPES || LDBL_MANT_DIG == 64
-#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+#if MATMUL_FORCE_ALL_TYPES || (LDBL_MANT_DIG == 113 || HAS_FLOAT128)
#define FOREACH_MATMUL_TYPE_PAIR_WITH_REAL16(macro) \
macro(Integer, 1, Real, 16) \
macro(Integer, 1, Complex, 16) \
@@ -232,7 +236,7 @@ MATMUL_DIRECT_INSTANCE(Complex, 16, Complex, 10)
FOREACH_MATMUL_TYPE_PAIR_WITH_REAL16(MATMUL_INSTANCE)
FOREACH_MATMUL_TYPE_PAIR_WITH_REAL16(MATMUL_DIRECT_INSTANCE)
-#endif // LDBL_MANT_DIG == 113 || HAS_FLOAT128
+#endif // MATMUL_FORCE_ALL_TYPES || (LDBL_MANT_DIG == 113 || HAS_FLOAT128)
#define FOREACH_MATMUL_LOGICAL_TYPE_PAIR(macro) \
macro(Logical, 1, Logical, 1) \
@@ -257,5 +261,6 @@ FOREACH_MATMUL_LOGICAL_TYPE_PAIR(MATMUL_DIRECT_INSTANCE)
#undef MATMUL_INSTANCE
#undef MATMUL_DIRECT_INSTANCE
+#undef MATMUL_FORCE_ALL_TYPES
// clang-format on
diff --git a/flang/include/flang/Runtime/matmul-transpose.h b/flang/include/flang/Runtime/matmul-transpose.h
index d0a5005a1a8bd..2d79ca10e0895 100644
--- a/flang/include/flang/Runtime/matmul-transpose.h
+++ b/flang/include/flang/Runtime/matmul-transpose.h
@@ -40,6 +40,8 @@ void RTDECL(MatmulTransposeDirect)(const Descriptor &, const Descriptor &,
Descriptor & result, const Descriptor &x, const Descriptor &y, \
const char *sourceFile, int line);
+#define MATMUL_FORCE_ALL_TYPES 0
+
#include "matmul-instances.inc"
} // extern "C"
diff --git a/flang/include/flang/Runtime/matmul.h b/flang/include/flang/Runtime/matmul.h
index 1a5e39eb8813f..a72d4a06ee459 100644
--- a/flang/include/flang/Runtime/matmul.h
+++ b/flang/include/flang/Runtime/matmul.h
@@ -39,6 +39,8 @@ void RTDECL(MatmulDirect)(const Descriptor &, const Descriptor &,
const Descriptor &x, const Descriptor &y, const char *sourceFile, \
int line);
+#define MATMUL_FORCE_ALL_TYPES 0
+
#include "matmul-instances.inc"
} // extern "C"
diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
index 8dd1904939f3e..a1cef7437fa2d 100644
--- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
@@ -701,18 +701,19 @@ prettyPrintIntrinsicName(fir::FirOpBuilder &builder, mlir::Location loc,
if (name == "pow") {
assert(funcType.getNumInputs() == 2 && "power operator has two arguments");
std::string displayName{" ** "};
- sstream << numericMlirTypeToFortran(builder, funcType.getInput(0), loc,
- displayName)
+ sstream << mlirTypeToIntrinsicFortran(builder, funcType.getInput(0), loc,
+ displayName)
<< displayName
- << numericMlirTypeToFortran(builder, funcType.getInput(1), loc,
- displayName);
+ << mlirTypeToIntrinsicFortran(builder, funcType.getInput(1), loc,
+ displayName);
} else {
sstream << name.upper() << "(";
if (funcType.getNumInputs() > 0)
- sstream << numericMlirTypeToFortran(builder, funcType.getInput(0), loc,
- name);
+ sstream << mlirTypeToIntrinsicFortran(builder, funcType.getInput(0), loc,
+ name);
for (mlir::Type argType : funcType.getInputs().drop_front()) {
- sstream << ", " << numericMlirTypeToFortran(builder, argType, loc, name);
+ sstream << ", "
+ << mlirTypeToIntrinsicFortran(builder, argType, loc, name);
}
sstream << ")";
}
diff --git a/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp b/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp
index 6d3d85e8df69f..8f08b01fe0097 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp
@@ -329,11 +329,64 @@ void fir::runtime::genEoshiftVector(fir::FirOpBuilder &builder,
builder.create<fir::CallOp>(loc, eoshiftFunc, args);
}
+/// Define ForcedMatmul<ACAT><AKIND><BCAT><BKIND> models.
+struct ForcedMatmulTypeModel {
+ static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() {
+ return [](mlir::MLIRContext *ctx) {
+ auto boxRefTy =
+ fir::runtime::getModel<Fortran::runtime::Descriptor &>()(ctx);
+ auto boxTy =
+ fir::runtime::getModel<const Fortran::runtime::Descriptor &>()(ctx);
+ auto strTy = fir::runtime::getModel<const char *>()(ctx);
+ auto intTy = fir::runtime::getModel<int>()(ctx);
+ auto voidTy = fir::runtime::getModel<void>()(ctx);
+ return mlir::FunctionType::get(
+ ctx, {boxRefTy, boxTy, boxTy, strTy, intTy}, {voidTy});
+ };
+ }
+};
+
+#define MATMUL_INSTANCE(ACAT, AKIND, BCAT, BKIND) \
+ struct ForcedMatmul##ACAT##AKIND##BCAT##BKIND \
+ : public ForcedMatmulTypeModel { \
+ static constexpr const char *name = \
+ ExpandAndQuoteKey(RTNAME(Matmul##ACAT##AKIND##BCAT##BKIND)); \
+ };
+
+#define MATMUL_DIRECT_INSTANCE(ACAT, AKIND, BCAT, BKIND)
+#define MATMUL_FORCE_ALL_TYPES 1
+
+#include "flang/Runtime/matmul-instances.inc"
+
/// Generate call to Matmul intrinsic runtime routine.
void fir::runtime::genMatmul(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value resultBox, mlir::Value matrixABox,
mlir::Value matrixBBox) {
- auto func = fir::runtime::getRuntimeFunc<mkRTKey(Matmul)>(loc, builder);
+ mlir::func::FuncOp func;
+ auto boxATy = matrixABox.getType();
+ auto arrATy = fir::dyn_cast_ptrOrBoxEleTy(boxATy);
+ auto arrAEleTy = mlir::cast<fir::SequenceType>(arrATy).getEleTy();
+ auto [aCat, aKind] = fir::mlirTypeToCategoryKind(loc, arrAEleTy);
+ auto boxBTy = matrixBBox.getType();
+ auto arrBTy = fir::dyn_cast_ptrOrBoxEleTy(boxBTy);
+ auto arrBEleTy = mlir::cast<fir::SequenceType>(arrBTy).getEleTy();
+ auto [bCat, bKind] = fir::mlirTypeToCategoryKind(loc, arrBEleTy);
+
+#define MATMUL_INSTANCE(ACAT, AKIND, BCAT, BKIND) \
+ if (!func && aCat == TypeCategory::ACAT && aKind == AKIND && \
+ bCat == TypeCategory::BCAT && bKind == BKIND) { \
+ func = \
+ fir::runtime::getRuntimeFunc<ForcedMatmul##ACAT##AKIND##BCAT##BKIND>( \
+ loc, builder); \
+ }
+
+#define MATMUL_DIRECT_INSTANCE(ACAT, AKIND, BCAT, BKIND)
+#define MATMUL_FORCE_ALL_TYPES 1
+#include "flang/Runtime/matmul-instances.inc"
+
+ if (!func) {
+ fir::intrinsicTypeTODO2(builder, arrAEleTy, arrBEleTy, loc, "MATMUL");
+ }
auto fTy = func.getFunctionType();
auto sourceFile = fir::factory::locationToFilename(builder, loc);
auto sourceLine =
@@ -344,13 +397,48 @@ void fir::runtime::genMatmul(fir::FirOpBuilder &builder, mlir::Location loc,
builder.create<fir::CallOp>(loc, func, args);
}
-/// Generate call to MatmulTranspose intrinsic runtime routine.
+/// Define ForcedMatmulTranspose<ACAT><AKIND><BCAT><BKIND> models.
+#define MATMUL_INSTANCE(ACAT, AKIND, BCAT, BKIND) \
+ struct ForcedMatmulTranspose##ACAT##AKIND##BCAT##BKIND \
+ : public ForcedMatmulTypeModel { \
+ static constexpr const char *name = \
+ ExpandAndQuoteKey(RTNAME(MatmulTranspose##ACAT##AKIND##BCAT##BKIND)); \
+ };
+
+#define MATMUL_DIRECT_INSTANCE(ACAT, AKIND, BCAT, BKIND)
+#define MATMUL_FORCE_ALL_TYPES 1
+
+#include "flang/Runtime/matmul-instances.inc"
+
void fir::runtime::genMatmulTranspose(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value resultBox,
mlir::Value matrixABox,
mlir::Value matrixBBox) {
- auto func =
- fir::runtime::getRuntimeFunc<mkRTKey(MatmulTranspose)>(loc, builder);
+ mlir::func::FuncOp func;
+ auto boxATy = matrixABox.getType();
+ auto arrATy = fir::dyn_cast_ptrOrBoxEleTy(boxATy);
+ auto arrAEleTy = mlir::cast<fir::SequenceType>(arrATy).getEleTy();
+ auto [aCat, aKind] = fir::mlirTypeToCategoryKind(loc, arrAEleTy);
+ auto boxBTy = matrixBBox.getType();
+ auto arrBTy = fir::dyn_cast_ptrOrBoxEleTy(boxBTy);
+ auto arrBEleTy = mlir::cast<fir::SequenceType>(arrBTy).getEleTy();
+ auto [bCat, bKind] = fir::mlirTypeToCategoryKind(loc, arrBEleTy);
+
+#define MATMUL_INSTANCE(ACAT, AKIND, BCAT, BKIND) \
+ if (!func && aCat == TypeCategory::ACAT && aKind == AKIND && \
+ bCat == TypeCategory::BCAT && bKind == BKIND) { \
+ func = fir::runtime::getRuntimeFunc< \
+ ForcedMatmulTranspose##ACAT##AKIND##BCAT##BKIND>(loc, builder); \
+ }
+
+#define MATMUL_DIRECT_INSTANCE(ACAT, AKIND, BCAT, BKIND)
+#define MATMUL_FORCE_ALL_TYPES 1
+#include "flang/Runtime/matmul-instances.inc"
+
+ if (!func) {
+ fir::intrinsicTypeTODO2(builder, arrAEleTy, arrBEleTy, loc,
+ "MATMUL-TRANSPOSE");
+ }
auto fTy = func.getFunctionType();
auto sourceFile = fir::factory::locationToFilename(builder, loc);
auto sourceLine =
diff --git a/flang/runtime/matmul-transpose.cpp b/flang/runtime/matmul-transpose.cpp
index 1c998fa8cf6c1..283472650a1c6 100644
--- a/flang/runtime/matmul-transpose.cpp
+++ b/flang/runtime/matmul-transpose.cpp
@@ -343,48 +343,6 @@ inline static RT_API_ATTRS void DoMatmulTranspose(
RT_DIAG_POP
-// Maps the dynamic type information from the arguments' descriptors
-// to the right instantiation of DoMatmul() for valid combinations of
-// types.
-template <bool IS_ALLOCATING> struct MatmulTranspose {
- using ResultDescriptor =
- std::conditional_t<IS_ALLOCATING, Descriptor, const Descriptor>;
- template <TypeCategory XCAT, int XKIND> struct MM1 {
- template <TypeCategory YCAT, int YKIND> struct MM2 {
- RT_API_ATTRS void operator()(ResultDescriptor &result,
- const Descriptor &x, const Descriptor &y,
- Terminator &terminator) const {
- if constexpr (constexpr auto resultType{
- GetResultType(XCAT, XKIND, YCAT, YKIND)}) {
- if constexpr (Fortran::common::IsNumericTypeCategory(
- resultType->first) ||
- resultType->first == TypeCategory::Logical) {
- return DoMatmulTranspose<IS_ALLOCATING, resultType->first,
- resultType->second, CppTypeFor<XCAT, XKIND>,
- CppTypeFor<YCAT, YKIND>>(result, x, y, terminator);
- }
- }
- terminator.Crash("MATMUL-TRANSPOSE: bad operand types (%d(%d), %d(%d))",
- static_cast<int>(XCAT), XKIND, static_cast<int>(YCAT), YKIND);
- }
- };
- RT_API_ATTRS void operator()(ResultDescriptor &result, const Descriptor &x,
- const Descriptor &y, Terminator &terminator, TypeCategory yCat,
- int yKind) const {
- ApplyType<MM2, void>(yCat, yKind, terminator, result, x, y, terminator);
- }
- };
- RT_API_ATTRS void operator()(ResultDescriptor &result, const Descriptor &x,
- const Descriptor &y, const char *sourceFile, int line) const {
- Terminator terminator{sourceFile, line};
- auto xCatKind{x.type().GetCategoryAndKind()};
- auto yCatKind{y.type().GetCategoryAndKind()};
- RUNTIME_CHECK(terminator, xCatKind.has_value() && yCatKind.has_value());
- ApplyType<MM1, void>(xCatKind->first, xCatKind->second, terminator, result,
- x, y, terminator, yCatKind->first, yCatKind->second);
- }
-};
-
template <bool IS_ALLOCATING, TypeCategory XCAT, int XKIND, TypeCategory YCAT,
int YKIND>
struct MatmulTransposeHelper {
@@ -414,15 +372,6 @@ namespace Fortran::runtime {
extern "C" {
RT_EXT_API_GROUP_BEGIN
-void RTDEF(MatmulTranspose)(Descriptor &result, const Descriptor &x,
- const Descriptor &y, const char *sourceFile, int line) {
- MatmulTranspose<true>{}(result, x, y, sourceFile, line);
-}
-void RTDEF(MatmulTransposeDirect)(const Descriptor &result, const Descriptor &x,
- const Descriptor &y, const char *sourceFile, int line) {
- MatmulTranspose<false>{}(result, x, y, sourceFile, line);
-}
-
#define MATMUL_INSTANCE(XCAT, XKIND, YCAT, YKIND) \
void RTDEF(MatmulTranspose##XCAT##XKIND##YCAT##YKIND)(Descriptor & result, \
const Descriptor &x, const Descriptor &y, const char *sourceFile, \
@@ -439,6 +388,8 @@ void RTDEF(MatmulTransposeDirect)(const Descriptor &result, const Descriptor &x,
TypeCategory::YCAT, YKIND>{}(result, x, y, sourceFile, line); \
}
+#define MATMUL_FORCE_ALL_TYPES 0
+
#include "flang/Runtime/matmul-instances.inc"
RT_EXT_API_GROUP_END
diff --git a/flang/runtime/matmul.cpp b/flang/runtime/matmul.cpp
index 504d1aa4dc4a4..252557e2f9e7a 100644
--- a/flang/runtime/matmul.cpp
+++ b/flang/runtime/matmul.cpp
@@ -443,48 +443,6 @@ static inline RT_API_ATTRS void DoMatmul(
RT_DIAG_POP
-// Maps the dynamic type information from the arguments' descriptors
-// to the right instantiation of DoMatmul() for valid combinations of
-// types.
-template <bool IS_ALLOCATING> struct Matmul {
- using ResultDescriptor =
- std::conditional_t<IS_ALLOCATING, Descriptor, const Descriptor>;
- template <TypeCategory XCAT, int XKIND> struct MM1 {
- template <TypeCategory YCAT, int YKIND> struct MM2 {
- RT_API_ATTRS void operator()(ResultDescriptor &result,
- const Descriptor &x, const Descriptor &y,
- Terminator &terminator) const {
- if constexpr (constexpr auto resultType{
- GetResultType(XCAT, XKIND, YCAT, YKIND)}) {
- if constexpr (Fortran::common::IsNumericTypeCategory(
- resultType->first) ||
- resultType->first == TypeCategory::Logical) {
- return DoMatmul<IS_ALLOCATING, resultType->first,
- resultType->second, CppTypeFor<XCAT, XKIND>,
- CppTypeFor<YCAT, YKIND>>(result, x, y, terminator);
- }
- }
- terminator.Crash("MATMUL: bad operand types (%d(%d), %d(%d))",
- static_cast<int>(XCAT), XKIND, static_cast<int>(YCAT), YKIND);
- }
- };
- RT_API_ATTRS void operator()(ResultDescriptor &result, const Descriptor &x,
- const Descriptor &y, Terminator &terminator, TypeCategory yCat,
- int yKind) const {
- ApplyType<MM2, void>(yCat, yKind, terminator, result, x, y, terminator);
- }
- };
- RT_API_ATTRS void operator()(ResultDescriptor &result, const Descriptor &x,
- const Descriptor &y, const char *sourceFile, int line) const {
- Terminator terminator{sourceFile, line};
- auto xCatKind{x.type().GetCategoryAndKind()};
- auto yCatKind{y.type().GetCategoryAndKind()};
- RUNTIME_CHECK(terminator, xCatKind.has_value() && yCatKind.has_value());
- ApplyType<MM1, void>(xCatKind->first, xCatKind->second, terminator, result,
- x, y, terminator, yCatKind->first, yCatKind->second);
- }
-};
-
template <bool IS_ALLOCATING, TypeCategory XCAT, int XKIND, TypeCategory YCAT,
int YKIND>
struct MatmulHelper {
@@ -514,15 +472,6 @@ namespace Fortran::runtime {
extern "C" {
RT_EXT_API_GROUP_BEGIN
-void RTDEF(Matmul)(Descriptor &result, const Descriptor &x, const Descriptor &y,
- const char *sourceFile, int line) {
- Matmul<true>{}(result, x, y, sourceFile, line);
-}
-void RTDEF(MatmulDirect)(const Descriptor &result, const Descriptor &x,
- const Descriptor &y, const char *sourceFile, int line) {
- Matmul<false>{}(result, x, y, sourceFile, line);
-}
-
#define MATMUL_INSTANCE(XCAT, XKIND, YCAT, YKIND) \
void RTDEF(Matmul##XCAT##XKIND##YCAT##YKIND)(Descriptor & result, \
const Descriptor &x, const Descriptor &y, const char *sourceFile, \
@@ -539,6 +488,8 @@ void RTDEF(MatmulDirect)(const Descriptor &result, const Descriptor &x,
YKIND>{}(result, x, y, sourceFile, line); \
}
+#define MATMUL_FORCE_ALL_TYPES 0
+
#include "flang/Runtime/matmul-instances.inc"
RT_EXT_API_GROUP_END
diff --git a/flang/test/HLFIR/matmul-lowering.fir b/flang/test/HLFIR/matmul-lowering.fir
index 85a73dd45160f..fd76db2659516 100644
--- a/flang/test/HLFIR/matmul-lowering.fir
+++ b/flang/test/HLFIR/matmul-lowering.fir
@@ -29,7 +29,7 @@ func.func @_QPmatmul1(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "lh
// CHECK: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-DAG: %[[LHS_ARG:.*]] = fir.convert %[[LHS_VAR]]#1 : (!fir.box<!fir.array<?x?xi32>>) -> !fir.box<none>
// CHECK-DAG: %[[RHS_ARG:.*]] = fir.convert %[[RHS_VAR]]#1 : (!fir.box<!fir.array<?x?xi32>>) -> !fir.box<none>
-// CHECK: %[[NONE:.*]] = fir.call @_FortranAMatmul(%[[RET_ARG]], %[[LHS_ARG]], %[[RHS_ARG]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) fastmath<contract>
+// CHECK: %[[NONE:.*]] = fir.call @_FortranAMatmulInteger4Integer4(%[[RET_ARG]], %[[LHS_ARG]], %[[RHS_ARG]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) fastmath<contract>
// CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]]
// CHECK-DAG: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]]
@@ -71,7 +71,7 @@ func.func @_QPtest(%arg0: !fir.ref<!fir.array<3x3xf32>> {fir.bindc_name = "a"},
}
// just check that we apply the patterns successfully. The details are checked above
// CHECK-LABEL: func.func @_QPtest(
-// CHECK: fir.call @_FortranAMatmul({{.*}}, {{.*}}, {{.*}}, {{.*}}, {{.*}}) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.box<none>, !fir.ref<i8>, i32) -> none
-// CHECK: fir.call @_FortranAMatmul({{.*}}, {{.*}}, {{.*}}, {{.*}}, {{.*}}) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.box<none>, !fir.ref<i8>, i32) -> none
+// CHECK: fir.call @_FortranAMatmulReal4Real4({{.*}}, {{.*}}, {{.*}}, {{.*}}, {{.*}}) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.box<none>, !fir.ref<i8>, i32) -> none
+// CHECK: fir.call @_FortranAMatmulReal4Real4({{.*}}, {{.*}}, {{.*}}, {{.*}}, {{.*}}) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.box<none>, !fir.ref<i8>, i32) -> none
// CHECK: return
// CHECK-NEXT: }
diff --git a/flang/test/HLFIR/mul_transpose.f90 b/flang/test/HLFIR/mul_transpose.f90
index 378ecfe4886aa..7cfbfe39d0ea8 100644
--- a/flang/test/HLFIR/mul_transpose.f90
+++ b/flang/test/HLFIR/mul_transpose.f90
@@ -44,7 +44,7 @@ subroutine mul_transpose(a, b, res)
! CHECK-LOWERING: %[[MUL_CONV_RES:.*]] = fir.convert %[[MUL_RES_BOX:.*]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
! CHECK-LOWERING: %[[LHS_CONV:.*]] = fir.convert %[[LHS_BOX]] : (!fir.box<!fir.array<1x2xf32>>) -> !fir.box<none>
! CHECK-LOWERING: %[[B_BOX_CONV:.*]] = fir.convert %[[B_BOX]] : (!fir.box<!fir.array<2x2xf32>>) -> !fir.box<none>
-! CHECK-LOWERING: fir.call @_FortranAMatmul(%[[MUL_CONV_RES]], %[[LHS_CONV]], %[[B_BOX_CONV]], %[[LOC_STR2:.*]], %[[LOC_N2:.*]])
+! CHECK-LOWERING: fir.call @_FortranAMatmulReal4Real4(%[[MUL_CONV_RES]], %[[LHS_CONV]], %[[B_BOX_CONV]], %[[LOC_STR2:.*]], %[[LOC_N2:.*]])
! CHECK-LOWERING: %[[MUL_RES_LD:.*]] = fir.load %[[MUL_RES_BOX:.*]]
! CHECK-LOWERING: %[[MUL_RES_ADDR:.*]] = fir.box_addr %[[MUL_RES_LD]]
! CHECK-LOWERING: %[[MUL_RES_VAR:.*]]:2 = hlfir.declare %[[MUL_RES_ADDR]]({{.*}}) {uniq_name = ".tmp.intrinsic_result"}
@@ -60,7 +60,7 @@ subroutine mul_transpose(a, b, res)
! CHECK-LOWERING-OPT: %[[MUL_CONV_RES:.*]] = fir.convert %[[MUL_RES_BOX:.*]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
! CHECK-LOWERING-OPT: %[[LHS_CONV:.*]] = fir.convert %[[LHS_BOX]] : (!fir.box<!fir.array<2x1xf32>>) -> !fir.box<none>
! CHECK-LOWERING-OPT: %[[B_BOX_CONV:.*]] = fir.convert %[[B_BOX]] : (!fir.box<!fir.array<2x2xf32>>) -> !fir.box<none>
-! CHECK-LOWERING-OPT: fir.call @_FortranAMatmulTranspose(%[[MUL_CONV_RES]], %[[LHS_CONV]], %[[B_BOX_CONV]], %[[LOC_STR2:.*]], %[[LOC_N2:.*]])
+! CHECK-LOWERING-OPT: fir.call @_FortranAMatmulTransposeReal4Real4(%[[MUL_CONV_RES]], %[[LHS_CONV]], %[[B_BOX_CONV]], %[[LOC_STR2:.*]], %[[LOC_N2:.*]])
! CHECK-LOWERING-OPT: %[[MUL_RES_LD:.*]] = fir.load %[[MUL_RES_BOX:.*]]
! CHECK-LOWERING-OPT: %[[MUL_RES_ADDR:.*]] = fir.box_addr %[[MUL_RES_LD]]
! CHECK-LOWERING-OPT: %[[MUL_RES_VAR:.*]]:2 = hlfir.declare %[[MUL_RES_ADDR]]({{.*}}) {uniq_name = ".tmp.intrinsic_result"}
@@ -81,7 +81,7 @@ subroutine mul_transpose(a, b, res)
! CHECK-BUFFERING: %[[TRANSPOSE_RES_BOX:.*]] = fir.embox %[[TRANSPOSE_RES_REF]]({{.*}})
! CHECK-BUFFERING: %[[LHS_CONV:.*]] = fir.convert %[[TRANSPOSE_RES_BOX]] : (!fir.box<!fir.array<1x2xf32>>) -> !fir.box<none>
! [argument handling unchanged]
-! CHECK-BUFFERING: fir.call @_FortranAMatmul(
+! CHECK-BUFFERING: fir.call @_FortranAMatmulReal4Real4(
! CHECK-BUFFERING: %[[MUL_RES_LD:.*]] = fir.load %[[MUL_RES_BOX:.*]]
! CHECK-BUFFERING: %[[MUL_RES_ADDR:.*]] = fir.box_addr %[[MUL_RES_LD]]
! CHECK-BUFFERING: %[[MUL_RES_VAR:.*]]:2 = hlfir.declare %[[MUL_RES_ADDR]]({{.*}}) {uniq_name = ".tmp.intrinsic_result"}
diff --git a/flang/test/Lower/Intrinsics/matmul.f90 b/flang/test/Lower/Intrinsics/matmul.f90
index e9a8220dc6ab7..db60963320144 100644
--- a/flang/test/Lower/Intrinsics/matmul.f90
+++ b/flang/test/Lower/Intrinsics/matmul.f90
@@ -23,7 +23,7 @@
! CHECK: %[[RESULT_BOX_ADDR_RUNTIME:.*]] = fir.convert %[[RESULT_BOX_ADDR]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %[[X_BOX_RUNTIME:.*]] = fir.convert %[[X_BOX]] : (!fir.box<!fir.array<3x1xf32>>) -> !fir.box<none>
! CHECK: %[[Y_BOX_RUNTIME:.*]] = fir.convert %[[Y_BOX]] : (!fir.box<!fir.array<1x3xf32>>) -> !fir.box<none>
-! CHECK: {{.*}}fir.call @_FortranAMatmul(%[[RESULT_BOX_ADDR_RUNTIME]], %[[X_BOX_RUNTIME]], %[[Y_BOX_RUNTIME]], {{.*}}, {{.*}} {{.*}}: (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.box<none>, !fir.ref<i8>, i32) -> none
+! CHECK: {{.*}}fir.call @_FortranAMatmulReal4Real4(%[[RESULT_BOX_ADDR_RUNTIME]], %[[X_BOX_RUNTIME]], %[[Y_BOX_RUNTIME]], {{.*}}, {{.*}} {{.*}}: (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.box<none>, !fir.ref<i8>, i32) -> none
! CHECK: %[[RESULT_BOX:.*]] = fir.load %[[RESULT_BOX_ADDR]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
! CHECK: %[[RESULT_TMP:.*]] = fir.box_addr %[[RESULT_BOX]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>) -> !fir.heap<!fir.array<?x?xf32>>
! CHECK: %[[Z_COPY_FROM_RESULT:.*]] = fir.do_loop
@@ -50,7 +50,7 @@ subroutine matmul_test(x,y,z)
!CHECK: %[[RESULT_BOX_RUNTIME:.*]] = fir.convert %[[RESULT_BOX_ADDR]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.logical<4>>>>>) -> !fir.ref<!fir.box<none>>
!CHECK: %[[X_BOX_RUNTIME:.*]] = fir.convert %[[X_BOX]] : (!fir.box<!fir.array<?x?x!fir.logical<4>>>) -> !fir.box<none>
!CHECK: %[[Y_BOX_RUNTIME:.*]] = fir.convert %[[Y_BOX]] : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> !fir.box<none>
-!CHECK: {{.*}}fir.call @_FortranAMatmul(%[[RESULT_BOX_RUNTIME]], %[[X_BOX_RUNTIME]], %[[Y_BOX_RUNTIME]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.box<none>, !fir.ref<i8>, i32) -> none
+!CHECK: {{.*}}fir.call @_FortranAMatmulLogical4Logical4(%[[RESULT_BOX_RUNTIME]], %[[X_BOX_RUNTIME]], %[[Y_BOX_RUNTIME]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.box<none>, !fir.ref<i8>, i32) -> none
!CHECK: %[[RESULT_BOX:.*]] = fir.load %[[RESULT_BOX_ADDR]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.logical<4>>>>>
!CHECK: %[[RESULT_TMP:.*]] = fir.box_addr %[[RESULT_BOX]] : (!fir.box<!fir.heap<!fir.array<?x!fir.logical<4>>>>) -> !fir.heap<!fir.array<?x!fir.logical<4>>>
!CHECK: %[[Z_COPY_FROM_RESULT:.*]] = fir.do_loop
diff --git a/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h b/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h
index 00c27f4bb2142..4ace359f055b0 100644
--- a/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h
+++ b/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h
@@ -58,6 +58,11 @@ struct RuntimeCallTest : public testing::Test {
char1Ty = fir::CharacterType::getSingleton(builder.getContext(), 1);
char2Ty = fir::CharacterType::getSingleton(builder.getContext(), 2);
char4Ty = fir::CharacterType::getSingleton(builder.getContext(), 4);
+
+ logical1Ty = fir::LogicalType::get(builder.getContext(), 1);
+ logical2Ty = fir::LogicalType::get(builder.getContext(), 2);
+ logical4Ty = fir::LogicalType::get(builder.getContext(), 4);
+ logical8Ty = fir::LogicalType::get(builder.getContext(), 8);
}
mlir::MLIRContext context;
@@ -84,6 +89,10 @@ struct RuntimeCallTest : public testing::Test {
mlir::Type char1Ty;
mlir::Type char2Ty;
mlir::Type char4Ty;
+ mlir::Type logical1Ty;
+ mlir::Type logical2Ty;
+ mlir::Type logical4Ty;
+ mlir::Type logical8Ty;
};
/// Check that the \p op is a `fir::CallOp` operation and its name matches
diff --git a/flang/unittests/Optimizer/Builder/Runtime/TransformationalTest.cpp b/flang/unittests/Optimizer/Builder/Runtime/TransformationalTest.cpp
index d5884ae3febbb..28266bb90400b 100644
--- a/flang/unittests/Optimizer/Builder/Runtime/TransformationalTest.cpp
+++ b/flang/unittests/Optimizer/Builder/Runtime/TransformationalTest.cpp
@@ -144,15 +144,41 @@ TEST_F(RuntimeCallTest, genEoshiftVectorTest) {
checkCallOpFromResultBox(result, "_FortranAEoshiftVector", 4);
}
+void testGenMatmul(fir::FirOpBuilder &builder, mlir::Type eleTy1,
+ mlir::Type eleTy2, llvm::StringRef funcName) {
+ auto loc = builder.getUnknownLoc();
+ mlir::Type resultTy =
+ fir::ReferenceType::get(fir::BoxType::get(builder.getNoneType()));
+ mlir::Type seqTy1 =
+ fir::SequenceType::get(fir::SequenceType::Shape(2, 10), eleTy1);
+ mlir::Type seqTy2 =
+ fir::SequenceType::get(fir::SequenceType::Shape(2, 10), eleTy2);
+ mlir::Type boxTy1 = fir::BoxType::get(seqTy1);
+ mlir::Type boxTy2 = fir::BoxType::get(seqTy2);
+ mlir::Value result = builder.create<fir::UndefOp>(loc, resultTy);
+ mlir::Value matrixA = builder.create<fir::UndefOp>(loc, boxTy1);
+ mlir::Value matrixB = builder.create<fir::UndefOp>(loc, boxTy2);
+ fir::runtime::genMatmul(builder, loc, result, matrixA, matrixB);
+ checkCallOpFromResultBox(result, funcName, 3);
+}
+
TEST_F(RuntimeCallTest, genMatmulTest) {
- auto loc = firBuilder->getUnknownLoc();
- mlir::Type seqTy =
- fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty);
- mlir::Value result = firBuilder->create<fir::UndefOp>(loc, seqTy);
- mlir::Value matrixA = firBuilder->create<fir::UndefOp>(loc, seqTy);
- mlir::Value matrixB = firBuilder->create<fir::UndefOp>(loc, seqTy);
- fir::runtime::genMatmul(*firBuilder, loc, matrixA, matrixB, result);
- checkCallOpFromResultBox(result, "_FortranAMatmul", 3);
+ testGenMatmul(*firBuilder, i32Ty, i16Ty, "_FortranAMatmulInteger4Integer2");
+ testGenMatmul(*firBuilder, i32Ty, f64Ty, "_FortranAMatmulInteger4Real8");
+ testGenMatmul(*firBuilder, i32Ty, c8Ty, "_FortranAMatmulInteger4Complex8");
+ testGenMatmul(*firBuilder, f32Ty, i16Ty, "_FortranAMatmulReal4Integer2");
+ testGenMatmul(*firBuilder, f32Ty, f64Ty, "_FortranAMatmulReal4Real8");
+ testGenMatmul(*firBuilder, f32Ty, c8Ty, "_FortranAMatmulReal4Complex8");
+ testGenMatmul(*firBuilder, c4Ty, i16Ty, "_FortranAMatmulComplex4Integer2");
+ testGenMatmul(*firBuilder, c4Ty, f64Ty, "_FortranAMatmulComplex4Real8");
+ testGenMatmul(*firBuilder, c4Ty, c8Ty, "_FortranAMatmulComplex4Complex8");
+ testGenMatmul(*firBuilder, f80Ty, f128Ty, "_FortranAMatmulReal10Real16");
+ testGenMatmul(*firBuilder, f80Ty, i128Ty, "_FortranAMatmulReal10Integer16");
+ testGenMatmul(*firBuilder, f128Ty, i128Ty, "_FortranAMatmulReal16Integer16");
+ testGenMatmul(
+ *firBuilder, logical1Ty, logical2Ty, "_FortranAMatmulLogical1Logical2");
+ testGenMatmul(
+ *firBuilder, logical4Ty, logical8Ty, "_FortranAMatmulLogical4Logical8");
}
TEST_F(RuntimeCallTest, genPackTest) {
diff --git a/flang/unittests/Runtime/Matmul.cpp b/flang/unittests/Runtime/Matmul.cpp
index 226dbc5ae9eeb..c3fed9b972df2 100644
--- a/flang/unittests/Runtime/Matmul.cpp
+++ b/flang/unittests/Runtime/Matmul.cpp
@@ -40,29 +40,6 @@ TEST(Matmul, Basic) {
StaticDescriptor<2, true> statDesc;
Descriptor &result{statDesc.descriptor()};
- RTNAME(Matmul)(result, *x, *y, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 2);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 2);
- EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(1).Extent(), 2);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
-
- std::memset(
- result.raw().base_addr, 0, result.Elements() * result.ElementBytes());
- result.GetDimension(0).SetLowerBound(0);
- result.GetDimension(1).SetLowerBound(2);
- RTNAME(MatmulDirect)(result, *x, *y, __FILE__, __LINE__);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
- result.Destroy();
-
RTNAME(MatmulInteger4Integer2)(result, *x, *y, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -86,16 +63,6 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
- RTNAME(Matmul)(result, *v, *x, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 1);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 3);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -2);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -8);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -14);
- result.Destroy();
-
RTNAME(MatmulInteger8Integer4)(result, *v, *x, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -106,16 +73,6 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -14);
result.Destroy();
- RTNAME(Matmul)(result, *y, *v, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 1);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 3);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -24);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -27);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
- result.Destroy();
-
RTNAME(MatmulInteger2Integer8)(result, *y, *v, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -159,19 +116,6 @@ TEST(Matmul, Basic) {
/*uppers=*/nullptr, /*strides=*/nullptr)};
ASSERT_EQ(errorY2, 0) << "CFI_section failed for Y2: " << errorY2;
- RTNAME(Matmul)(result, sectionX2, *y, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 2);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 2);
- EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(1).Extent(), 2);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
- result.Destroy();
-
RTNAME(MatmulInteger4Integer2)(result, sectionX2, *y, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -185,19 +129,6 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
- RTNAME(Matmul)(result, *x, sectionY2, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 2);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 2);
- EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(1).Extent(), 2);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
- result.Destroy();
-
RTNAME(MatmulInteger4Integer2)(result, *x, sectionY2, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -211,19 +142,6 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
- RTNAME(Matmul)(result, sectionX2, sectionY2, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 2);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 2);
- EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(1).Extent(), 2);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
- result.Destroy();
-
RTNAME(MatmulInteger4Integer2)
(result, sectionX2, sectionY2, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
@@ -238,16 +156,6 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
- RTNAME(Matmul)(result, *v, sectionX2, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 1);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 3);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -2);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -8);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -14);
- result.Destroy();
-
RTNAME(MatmulInteger8Integer4)(result, *v, sectionX2, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -258,16 +166,6 @@ TEST(Matmul, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -14);
result.Destroy();
- RTNAME(Matmul)(result, sectionY2, *v, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 1);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 3);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -24);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -27);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
- result.Destroy();
-
RTNAME(MatmulInteger2Integer8)(result, sectionY2, *v, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -285,23 +183,6 @@ TEST(Matmul, Basic) {
std::vector<std::uint8_t>{false, false, false, true, true, false})};
auto yLog{MakeArray<TypeCategory::Logical, 2>(std::vector<int>{3, 2},
std::vector<std::uint16_t>{false, false, false, true, true, false})};
- RTNAME(Matmul)(result, *xLog, *yLog, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 2);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 2);
- EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(1).Extent(), 2);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Logical, 2}));
- EXPECT_FALSE(
- static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(0)));
- EXPECT_FALSE(
- static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(1)));
- EXPECT_FALSE(
- static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(2)));
- EXPECT_TRUE(
- static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(3)));
- result.Destroy();
-
RTNAME(MatmulLogical1Logical2)(result, *xLog, *yLog, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
diff --git a/flang/unittests/Runtime/MatmulTranspose.cpp b/flang/unittests/Runtime/MatmulTranspose.cpp
index 391c2e1b144ea..c582e945dc7c9 100644
--- a/flang/unittests/Runtime/MatmulTranspose.cpp
+++ b/flang/unittests/Runtime/MatmulTranspose.cpp
@@ -46,29 +46,6 @@ TEST(MatmulTranspose, Basic) {
StaticDescriptor<2, true> statDesc;
Descriptor &result{statDesc.descriptor()};
- RTNAME(MatmulTranspose)(result, *x, *y, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 2);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 2);
- EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(1).Extent(), 2);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
-
- std::memset(
- result.raw().base_addr, 0, result.Elements() * result.ElementBytes());
- result.GetDimension(0).SetLowerBound(0);
- result.GetDimension(1).SetLowerBound(2);
- RTNAME(MatmulTransposeDirect)(result, *x, *y, __FILE__, __LINE__);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
- result.Destroy();
-
RTNAME(MatmulTransposeInteger4Integer2)(result, *x, *y, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -93,16 +70,6 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
- RTNAME(MatmulTranspose)(result, *z, *v, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 1);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 3);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -24);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -27);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
- result.Destroy();
-
RTNAME(MatmulTransposeInteger2Integer8)(result, *z, *v, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -113,27 +80,6 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
result.Destroy();
- RTNAME(MatmulTranspose)(result, *m, *z, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 2);
- ASSERT_EQ(result.GetDimension(0).LowerBound(), 1);
- ASSERT_EQ(result.GetDimension(0).UpperBound(), 4);
- ASSERT_EQ(result.GetDimension(1).LowerBound(), 1);
- ASSERT_EQ(result.GetDimension(1).UpperBound(), 3);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 2}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(0), 0);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(1), 9);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(2), 6);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(3), 15);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(4), 0);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(5), 10);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(6), 7);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(7), 17);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(8), 0);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(9), 11);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(10), 8);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int16_t>(11), 19);
- result.Destroy();
-
RTNAME(MatmulTransposeInteger2Integer2)(result, *m, *z, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
ASSERT_EQ(result.GetDimension(0).LowerBound(), 1);
@@ -204,19 +150,6 @@ TEST(MatmulTranspose, Basic) {
§ionZ2.raw(), &z2->raw(), lowersZ2, uppersZ2, /*strides=*/nullptr)};
ASSERT_EQ(errorZ2, 0) << "CFI_section failed for Z2: " << errorZ2;
- RTNAME(MatmulTranspose)(result, sectionX2, *y, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 2);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 2);
- EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(1).Extent(), 2);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
- result.Destroy();
-
RTNAME(MatmulTransposeInteger4Integer2)
(result, sectionX2, *y, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
@@ -231,19 +164,6 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
- RTNAME(MatmulTranspose)(result, *x, sectionY2, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 2);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 2);
- EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(1).Extent(), 2);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
- result.Destroy();
-
RTNAME(MatmulTransposeInteger4Integer2)
(result, *x, sectionY2, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
@@ -258,19 +178,6 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
- RTNAME(MatmulTranspose)(result, sectionX2, sectionY2, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 2);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 2);
- EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(1).Extent(), 2);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 4}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(0), 46);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(1), 67);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(2), 64);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
- result.Destroy();
-
RTNAME(MatmulTransposeInteger4Integer2)
(result, sectionX2, sectionY2, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
@@ -285,16 +192,6 @@ TEST(MatmulTranspose, Basic) {
EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int32_t>(3), 94);
result.Destroy();
- RTNAME(MatmulTranspose)(result, sectionZ2, *v, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 1);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 3);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Integer, 8}));
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(0), -24);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(1), -27);
- EXPECT_EQ(*result.ZeroBasedIndexedElement<std::int64_t>(2), -30);
- result.Destroy();
-
RTNAME(MatmulTransposeInteger2Integer8)
(result, sectionZ2, *v, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
@@ -315,23 +212,6 @@ TEST(MatmulTranspose, Basic) {
std::vector<std::uint16_t>{false, false, false, true, true, false})};
auto vLog{MakeArray<TypeCategory::Logical, 1>(
std::vector<int>{3}, std::vector<std::uint8_t>{true, false, true})};
- RTNAME(MatmulTranspose)(result, *xLog, *yLog, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 2);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 2);
- EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(1).Extent(), 2);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Logical, 2}));
- EXPECT_FALSE(
- static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(0)));
- EXPECT_FALSE(
- static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(1)));
- EXPECT_TRUE(
- static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(2)));
- EXPECT_FALSE(
- static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(3)));
- result.Destroy();
-
RTNAME(MatmulTransposeLogical1Logical2)
(result, *xLog, *yLog, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 2);
@@ -350,17 +230,6 @@ TEST(MatmulTranspose, Basic) {
static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(3)));
result.Destroy();
- RTNAME(MatmulTranspose)(result, *yLog, *vLog, __FILE__, __LINE__);
- ASSERT_EQ(result.rank(), 1);
- EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
- EXPECT_EQ(result.GetDimension(0).Extent(), 2);
- ASSERT_EQ(result.type(), (TypeCode{TypeCategory::Logical, 2}));
- EXPECT_FALSE(
- static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(0)));
- EXPECT_TRUE(
- static_cast<bool>(*result.ZeroBasedIndexedElement<std::uint16_t>(1)));
- result.Destroy();
-
RTNAME(MatmulTransposeLogical2Logical1)
(result, *yLog, *vLog, __FILE__, __LINE__);
ASSERT_EQ(result.rank(), 1);
>From 24cee1c41c05c9e934e46c4aa48e88caaba29786 Mon Sep 17 00:00:00 2001
From: Vyacheslav Levytskyy <vyacheslav.levytskyy at intel.com>
Date: Thu, 4 Jul 2024 07:29:21 +0200
Subject: [PATCH 205/246] [SPIR-V] Add __spirv_ wrappers to Non-Uniform,
Atomic, Convert Instructions (#96790)
This PR:
* adds missing __spirv_ wrappers to Non-Uniform, Atomic, and Convert
Instructions,
* fixes emission of Group builtins,
* adds relevant checks to test cases to cover newly added __spirv_
wrappers.
---
llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp | 34 ++++++---
llvm/lib/Target/SPIRV/SPIRVBuiltins.td | 49 ++++++++++++-
.../test/CodeGen/SPIRV/instructions/atomic.ll | 25 +++++++
.../SPIRV/instructions/integer-casts.ll | 52 ++++++++++++++
llvm/test/CodeGen/SPIRV/transcoding/OpDot.ll | 17 ++++-
.../SPIRV/transcoding/sub_group_ballot.ll | 22 ++++++
.../sub_group_non_uniform_arithmetic.ll | 69 +++++++++++++++----
.../transcoding/sub_group_non_uniform_vote.ll | 14 ++++
.../SPIRV/transcoding/sub_group_shuffle.ll | 4 ++
.../transcoding/sub_group_shuffle_relative.ll | 8 +++
10 files changed, 269 insertions(+), 25 deletions(-)
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index dfec10bec3f9e..286bdb9a7ebac 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -1066,15 +1066,17 @@ static bool generateGroupInst(const SPIRV::IncomingCall *Call,
Register ScopeReg = Call->Arguments[0];
if (!MRI->getRegClassOrNull(ScopeReg))
MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
- Register ValueReg = Call->Arguments[2];
- if (!MRI->getRegClassOrNull(ValueReg))
- MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
- MIRBuilder.buildInstr(GroupBuiltin->Opcode)
- .addDef(Call->ReturnRegister)
- .addUse(GR->getSPIRVTypeID(Call->ReturnType))
- .addUse(ScopeReg)
- .addImm(GrpOp)
- .addUse(ValueReg);
+ auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
+ .addDef(Call->ReturnRegister)
+ .addUse(GR->getSPIRVTypeID(Call->ReturnType))
+ .addUse(ScopeReg)
+ .addImm(GrpOp);
+ for (unsigned i = 2; i < Call->Arguments.size(); ++i) {
+ Register ArgReg = Call->Arguments[i];
+ if (!MRI->getRegClassOrNull(ArgReg))
+ MRI->setRegClass(ArgReg, &SPIRV::IDRegClass);
+ MIB.addUse(ArgReg);
+ }
return true;
}
@@ -1467,6 +1469,9 @@ static bool generateAtomicInst(const SPIRV::IncomingCall *Call,
case SPIRV::OpAtomicFlagClear:
return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
default:
+ if (Call->isSpirvOp())
+ return buildOpFromWrapper(MIRBuilder, Opcode, Call,
+ GR->getSPIRVTypeID(Call->ReturnType));
return false;
}
}
@@ -1510,6 +1515,9 @@ static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call,
static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call,
MachineIRBuilder &MIRBuilder,
SPIRVGlobalRegistry *GR) {
+ if (Call->isSpirvOp())
+ return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
+ GR->getSPIRVTypeID(Call->ReturnType));
unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode();
bool IsVec = Opcode == SPIRV::OpTypeVector;
// Use OpDot only in case of vector args and OpFMul in case of scalar args.
@@ -2232,6 +2240,14 @@ static bool generateConvertInst(const StringRef DemangledCall,
const SPIRV::ConvertBuiltin *Builtin =
SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
+ if (!Builtin && Call->isSpirvOp()) {
+ const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
+ unsigned Opcode =
+ SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
+ return buildOpFromWrapper(MIRBuilder, Opcode, Call,
+ GR->getSPIRVTypeID(Call->ReturnType));
+ }
+
if (Builtin->IsSaturated)
buildOpDecorate(Call->ReturnRegister, MIRBuilder,
SPIRV::Decoration::SaturatedConversion, {});
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
index fb88332ab8902..5c057a79afa0c 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
@@ -99,6 +99,7 @@ def lookupBuiltin : SearchIndex {
// Dot builtin record:
def : DemangledBuiltin<"dot", OpenCL_std, Dot, 2, 2>;
+def : DemangledBuiltin<"__spirv_Dot", OpenCL_std, Dot, 2, 2>;
// Image builtin records:
def : DemangledBuiltin<"read_imagei", OpenCL_std, ReadImage, 2, 4>;
@@ -617,6 +618,10 @@ defm : DemangledNativeBuiltin<"atomic_flag_test_and_set_explicit", OpenCL_std, A
defm : DemangledNativeBuiltin<"atomic_flag_clear", OpenCL_std, Atomic, 1, 1, OpAtomicFlagClear>;
defm : DemangledNativeBuiltin<"__spirv_AtomicFlagClear", OpenCL_std, Atomic, 3, 3, OpAtomicFlagClear>;
defm : DemangledNativeBuiltin<"atomic_flag_clear_explicit", OpenCL_std, Atomic, 2, 3, OpAtomicFlagClear>;
+defm : DemangledNativeBuiltin<"__spirv_AtomicSMin", OpenCL_std, Atomic, 4, 4, OpAtomicSMin>;
+defm : DemangledNativeBuiltin<"__spirv_AtomicSMax", OpenCL_std, Atomic, 4, 4, OpAtomicSMax>;
+defm : DemangledNativeBuiltin<"__spirv_AtomicUMin", OpenCL_std, Atomic, 4, 4, OpAtomicUMin>;
+defm : DemangledNativeBuiltin<"__spirv_AtomicUMax", OpenCL_std, Atomic, 4, 4, OpAtomicUMax>;
// Barrier builtin records:
defm : DemangledNativeBuiltin<"barrier", OpenCL_std, Barrier, 1, 3, OpControlBarrier>;
@@ -782,27 +787,41 @@ defm : DemangledGroupBuiltin<"group_broadcast_first", OnlySub, OpGroupNonUniform
// cl_khr_subgroup_non_uniform_vote
defm : DemangledGroupBuiltin<"group_elect", OnlySub, OpGroupNonUniformElect>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformElect", 1, 1, OpGroupNonUniformElect>;
defm : DemangledGroupBuiltin<"group_non_uniform_all", OnlySub, OpGroupNonUniformAll>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformAll", 2, 2, OpGroupNonUniformAll>;
defm : DemangledGroupBuiltin<"group_non_uniform_any", OnlySub, OpGroupNonUniformAny>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformAny", 2, 2, OpGroupNonUniformAny>;
defm : DemangledGroupBuiltin<"group_non_uniform_all_equal", OnlySub, OpGroupNonUniformAllEqual>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformAllEqual", 2, 2, OpGroupNonUniformAllEqual>;
// cl_khr_subgroup_ballot
defm : DemangledGroupBuiltin<"group_ballot", OnlySub, OpGroupNonUniformBallot>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformBallot", 2, 2, OpGroupNonUniformBallot>;
defm : DemangledGroupBuiltin<"group_inverse_ballot", OnlySub, OpGroupNonUniformInverseBallot>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformInverseBallot", 2, 2, OpGroupNonUniformInverseBallot>;
defm : DemangledGroupBuiltin<"group_ballot_bit_extract", OnlySub, OpGroupNonUniformBallotBitExtract>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformBallotBitExtract", 3, 3, OpGroupNonUniformBallotBitExtract>;
defm : DemangledGroupBuiltin<"group_ballot_bit_count", OnlySub, OpGroupNonUniformBallotBitCount>;
defm : DemangledGroupBuiltin<"group_ballot_inclusive_scan", OnlySub, OpGroupNonUniformBallotBitCount>;
defm : DemangledGroupBuiltin<"group_ballot_exclusive_scan", OnlySub, OpGroupNonUniformBallotBitCount>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformBallotBitCount", 3, 3, OpGroupNonUniformBallotBitCount>;
defm : DemangledGroupBuiltin<"group_ballot_find_lsb", OnlySub, OpGroupNonUniformBallotFindLSB>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformBallotFindLSB", 2, 2, OpGroupNonUniformBallotFindLSB>;
defm : DemangledGroupBuiltin<"group_ballot_find_msb", OnlySub, OpGroupNonUniformBallotFindMSB>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformBallotFindMSB", 2, 2, OpGroupNonUniformBallotFindMSB>;
// cl_khr_subgroup_shuffle
defm : DemangledGroupBuiltin<"group_shuffle", OnlySub, OpGroupNonUniformShuffle>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformShuffle", 3, 3, OpGroupNonUniformShuffle>;
defm : DemangledGroupBuiltin<"group_shuffle_xor", OnlySub, OpGroupNonUniformShuffleXor>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformShuffleXor", 3, 3, OpGroupNonUniformShuffleXor>;
// cl_khr_subgroup_shuffle_relative
defm : DemangledGroupBuiltin<"group_shuffle_up", OnlySub, OpGroupNonUniformShuffleUp>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformShuffleUp", 3, 3, OpGroupNonUniformShuffleUp>;
defm : DemangledGroupBuiltin<"group_shuffle_down", OnlySub, OpGroupNonUniformShuffleDown>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformShuffleDown", 3, 3, OpGroupNonUniformShuffleDown>;
defm : DemangledGroupBuiltin<"group_iadd", WorkOrSub, OpGroupIAdd>;
defm : DemangledGroupBuiltin<"group_reduce_adds", WorkOrSub, OpGroupIAdd>;
@@ -865,6 +884,7 @@ defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_addu", WorkOrSub,
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_adds", WorkOrSub, OpGroupNonUniformIAdd>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_addu", WorkOrSub, OpGroupNonUniformIAdd>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_adds", WorkOrSub, OpGroupNonUniformIAdd>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformIAdd", 3, 4, OpGroupNonUniformIAdd>;
defm : DemangledGroupBuiltin<"group_non_uniform_fadd", WorkOrSub, OpGroupNonUniformFAdd>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_addf", WorkOrSub, OpGroupNonUniformFAdd>;
@@ -879,6 +899,7 @@ defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_addd", WorkOrSub,
defm : DemangledGroupBuiltin<"group_clustered_reduce_addf", WorkOrSub, OpGroupNonUniformFAdd>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_addh", WorkOrSub, OpGroupNonUniformFAdd>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_addd", WorkOrSub, OpGroupNonUniformFAdd>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformFAdd", 3, 4, OpGroupNonUniformFAdd>;
defm : DemangledGroupBuiltin<"group_non_uniform_imul", WorkOrSub, OpGroupNonUniformIMul>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_mulu", WorkOrSub, OpGroupNonUniformIMul>;
@@ -889,6 +910,7 @@ defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_mulu", WorkOrSub,
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_muls", WorkOrSub, OpGroupNonUniformIMul>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_mulu", WorkOrSub, OpGroupNonUniformIMul>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_muls", WorkOrSub, OpGroupNonUniformIMul>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformIMul", 3, 4, OpGroupNonUniformIMul>;
defm : DemangledGroupBuiltin<"group_non_uniform_fmul", WorkOrSub, OpGroupNonUniformFMul>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_mulf", WorkOrSub, OpGroupNonUniformFMul>;
@@ -903,19 +925,21 @@ defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_muld", WorkOrSub,
defm : DemangledGroupBuiltin<"group_clustered_reduce_mulf", WorkOrSub, OpGroupNonUniformFMul>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_mulh", WorkOrSub, OpGroupNonUniformFMul>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_muld", WorkOrSub, OpGroupNonUniformFMul>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformFMul", 3, 4, OpGroupNonUniformFMul>;
defm : DemangledGroupBuiltin<"group_non_uniform_smin", WorkOrSub, OpGroupNonUniformSMin>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_mins", WorkOrSub, OpGroupNonUniformSMin>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_inclusive_mins", WorkOrSub, OpGroupNonUniformSMin>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_mins", WorkOrSub, OpGroupNonUniformSMin>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_mins", WorkOrSub, OpGroupNonUniformSMin>;
-
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformSMin", 3, 4, OpGroupNonUniformSMin>;
defm : DemangledGroupBuiltin<"group_non_uniform_umin", WorkOrSub, OpGroupNonUniformUMin>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_minu", WorkOrSub, OpGroupNonUniformUMin>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_inclusive_minu", WorkOrSub, OpGroupNonUniformUMin>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_minu", WorkOrSub, OpGroupNonUniformUMin>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_minu", WorkOrSub, OpGroupNonUniformUMin>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformUMin", 3, 4, OpGroupNonUniformUMin>;
defm : DemangledGroupBuiltin<"group_non_uniform_fmin", WorkOrSub, OpGroupNonUniformFMin>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_minf", WorkOrSub, OpGroupNonUniformFMin>;
@@ -930,18 +954,21 @@ defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_mind", WorkOrSub,
defm : DemangledGroupBuiltin<"group_clustered_reduce_minf", WorkOrSub, OpGroupNonUniformFMin>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_minh", WorkOrSub, OpGroupNonUniformFMin>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_mind", WorkOrSub, OpGroupNonUniformFMin>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformFMin", 3, 4, OpGroupNonUniformFMin>;
defm : DemangledGroupBuiltin<"group_non_uniform_smax", WorkOrSub, OpGroupNonUniformSMax>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_maxs", WorkOrSub, OpGroupNonUniformSMax>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_inclusive_maxs", WorkOrSub, OpGroupNonUniformSMax>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_maxs", WorkOrSub, OpGroupNonUniformSMax>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_maxs", WorkOrSub, OpGroupNonUniformSMax>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformSMax", 3, 4, OpGroupNonUniformSMax>;
defm : DemangledGroupBuiltin<"group_non_uniform_umax", WorkOrSub, OpGroupNonUniformUMax>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_maxu", WorkOrSub, OpGroupNonUniformUMax>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_inclusive_maxu", WorkOrSub, OpGroupNonUniformUMax>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_maxu", WorkOrSub, OpGroupNonUniformUMax>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_maxu", WorkOrSub, OpGroupNonUniformUMax>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformUMax", 3, 4, OpGroupNonUniformUMax>;
defm : DemangledGroupBuiltin<"group_non_uniform_fmax", WorkOrSub, OpGroupNonUniformFMax>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_maxf", WorkOrSub, OpGroupNonUniformFMax>;
@@ -956,6 +983,7 @@ defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_maxd", WorkOrSub,
defm : DemangledGroupBuiltin<"group_clustered_reduce_maxf", WorkOrSub, OpGroupNonUniformFMax>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_maxh", WorkOrSub, OpGroupNonUniformFMax>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_maxd", WorkOrSub, OpGroupNonUniformFMax>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformFMax", 3, 4, OpGroupNonUniformFMax>;
defm : DemangledGroupBuiltin<"group_non_uniform_iand", WorkOrSub, OpGroupNonUniformBitwiseAnd>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_andu", WorkOrSub, OpGroupNonUniformBitwiseAnd>;
@@ -966,6 +994,7 @@ defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_andu", WorkOrSub,
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_ands", WorkOrSub, OpGroupNonUniformBitwiseAnd>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_andu", WorkOrSub, OpGroupNonUniformBitwiseAnd>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_ands", WorkOrSub, OpGroupNonUniformBitwiseAnd>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformBitwiseAnd", 3, 4, OpGroupNonUniformBitwiseAnd>;
defm : DemangledGroupBuiltin<"group_non_uniform_ior", WorkOrSub, OpGroupNonUniformBitwiseOr>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_oru", WorkOrSub, OpGroupNonUniformBitwiseOr>;
@@ -976,6 +1005,7 @@ defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_oru", WorkOrSub,
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_ors", WorkOrSub, OpGroupNonUniformBitwiseOr>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_oru", WorkOrSub, OpGroupNonUniformBitwiseOr>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_ors", WorkOrSub, OpGroupNonUniformBitwiseOr>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformBitwiseOr", 3, 4, OpGroupNonUniformBitwiseOr>;
defm : DemangledGroupBuiltin<"group_non_uniform_ixor", WorkOrSub, OpGroupNonUniformBitwiseXor>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_xoru", WorkOrSub, OpGroupNonUniformBitwiseXor>;
@@ -986,24 +1016,28 @@ defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_xoru", WorkOrSub,
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_xors", WorkOrSub, OpGroupNonUniformBitwiseXor>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_xoru", WorkOrSub, OpGroupNonUniformBitwiseXor>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_xors", WorkOrSub, OpGroupNonUniformBitwiseXor>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformBitwiseXor", 3, 4, OpGroupNonUniformBitwiseXor>;
defm : DemangledGroupBuiltin<"group_non_uniform_logical_iand", WorkOrSub, OpGroupNonUniformLogicalAnd>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_logical_ands", WorkOrSub, OpGroupNonUniformLogicalAnd>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_inclusive_logical_ands", WorkOrSub, OpGroupNonUniformLogicalAnd>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_logical_ands", WorkOrSub, OpGroupNonUniformLogicalAnd>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_logical_and", WorkOrSub, OpGroupNonUniformLogicalAnd>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformLogicalAnd", 3, 4, OpGroupNonUniformLogicalAnd>;
defm : DemangledGroupBuiltin<"group_non_uniform_logical_ior", WorkOrSub, OpGroupNonUniformLogicalOr>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_logical_ors", WorkOrSub, OpGroupNonUniformLogicalOr>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_inclusive_logical_ors", WorkOrSub, OpGroupNonUniformLogicalOr>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_logical_ors", WorkOrSub, OpGroupNonUniformLogicalOr>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_logical_or", WorkOrSub, OpGroupNonUniformLogicalOr>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformLogicalOr", 3, 4, OpGroupNonUniformLogicalOr>;
defm : DemangledGroupBuiltin<"group_non_uniform_logical_ixor", WorkOrSub, OpGroupNonUniformLogicalXor>;
defm : DemangledGroupBuiltin<"group_non_uniform_reduce_logical_xors", WorkOrSub, OpGroupNonUniformLogicalXor>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_inclusive_logical_xors", WorkOrSub, OpGroupNonUniformLogicalXor>;
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_logical_xors", WorkOrSub, OpGroupNonUniformLogicalXor>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_logical_xor", WorkOrSub, OpGroupNonUniformLogicalXor>;
+defm : DemangledGroupBuiltinWrapper<"__spirv_GroupNonUniformLogicalXor", 3, 4, OpGroupNonUniformLogicalXor>;
// cl_khr_subgroup_rotate / SPV_KHR_subgroup_rotate
defm : DemangledGroupBuiltin<"group_rotate", OnlySub, OpGroupNonUniformRotateKHR>;
@@ -1381,6 +1415,19 @@ defm : DemangledConvertBuiltin<"convert_long", OpenCL_std>;
defm : DemangledConvertBuiltin<"convert_ulong", OpenCL_std>;
defm : DemangledConvertBuiltin<"convert_float", OpenCL_std>;
+defm : DemangledNativeBuiltin<"__spirv_ConvertFToU", OpenCL_std, Convert, 1, 1, OpConvertFToU>;
+defm : DemangledNativeBuiltin<"__spirv_ConvertFToS", OpenCL_std, Convert, 1, 1, OpConvertFToS>;
+defm : DemangledNativeBuiltin<"__spirv_ConvertSToF", OpenCL_std, Convert, 1, 1, OpConvertSToF>;
+defm : DemangledNativeBuiltin<"__spirv_ConvertUToF", OpenCL_std, Convert, 1, 1, OpConvertUToF>;
+defm : DemangledNativeBuiltin<"__spirv_UConvert", OpenCL_std, Convert, 1, 1, OpUConvert>;
+defm : DemangledNativeBuiltin<"__spirv_SConvert", OpenCL_std, Convert, 1, 1, OpSConvert>;
+defm : DemangledNativeBuiltin<"__spirv_FConvert", OpenCL_std, Convert, 1, 1, OpFConvert>;
+defm : DemangledNativeBuiltin<"__spirv_QuantizeToF16", OpenCL_std, Convert, 1, 1, OpQuantizeToF16>;
+defm : DemangledNativeBuiltin<"__spirv_ConvertPtrToU", OpenCL_std, Convert, 1, 1, OpConvertPtrToU>;
+defm : DemangledNativeBuiltin<"__spirv_SatConvertSToU", OpenCL_std, Convert, 1, 1, OpSatConvertSToU>;
+defm : DemangledNativeBuiltin<"__spirv_SatConvertUToS", OpenCL_std, Convert, 1, 1, OpSatConvertUToS>;
+defm : DemangledNativeBuiltin<"__spirv_ConvertUToPtr", OpenCL_std, Convert, 1, 1, OpConvertUToPtr>;
+
// cl_intel_bfloat16_conversions / SPV_INTEL_bfloat16_conversion
// Multiclass used to define at the same time both a demangled builtin records
// and a corresponding convert builtin records.
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
index ce59bb2064027..8a19fc78238c6 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
@@ -1,3 +1,6 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
@@ -12,6 +15,7 @@
; CHECK-DAG: OpName [[XOR:%.*]] "test_xor"
; CHECK-DAG: [[I32Ty:%.*]] = OpTypeInt 32 0
+; CHECK-DAG: [[I64Ty:%.*]] = OpTypeInt 64 0
;; Device scope is encoded with constant 1
; CHECK-DAG: [[SCOPE:%.*]] = OpConstant [[I32Ty]] 1
;; "monotonic" maps to the relaxed memory semantics, encoded with constant 0
@@ -133,3 +137,24 @@ define i32 @test_xor(i32* %ptr, i32 %val) {
%r = atomicrmw xor i32* %ptr, i32 %val monotonic
ret i32 %r
}
+
+; CHECK: OpFunction
+; CHECK: [[Arg1:%.*]] = OpFunctionParameter
+; CHECK: [[Arg2:%.*]] = OpFunctionParameter
+; CHECK: OpAtomicSMin [[I64Ty]] %[[#]] [[SCOPE]] [[RELAXED]] [[Arg2]]
+; CHECK: OpAtomicSMax [[I64Ty]] %[[#]] [[SCOPE]] [[RELAXED]] [[Arg2]]
+; CHECK: OpAtomicUMin [[I64Ty]] %[[#]] [[SCOPE]] [[RELAXED]] [[Arg2]]
+; CHECK: OpAtomicUMax [[I64Ty]] %[[#]] [[SCOPE]] [[RELAXED]] [[Arg2]]
+; CHECK: OpFunctionEnd
+define dso_local spir_kernel void @test_wrappers(ptr addrspace(4) %arg, i64 %val) {
+ %r1 = call spir_func i64 @__spirv_AtomicSMin(ptr addrspace(4) %arg, i32 1, i32 0, i64 %val)
+ %r2 = call spir_func i64 @__spirv_AtomicSMax(ptr addrspace(4) %arg, i32 1, i32 0, i64 %val)
+ %r3 = call spir_func i64 @__spirv_AtomicUMin(ptr addrspace(4) %arg, i32 1, i32 0, i64 %val)
+ %r4 = call spir_func i64 @__spirv_AtomicUMax(ptr addrspace(4) %arg, i32 1, i32 0, i64 %val)
+ ret void
+}
+
+declare dso_local spir_func i64 @__spirv_AtomicSMin(ptr addrspace(4), i32, i32, i64)
+declare dso_local spir_func i64 @__spirv_AtomicSMax(ptr addrspace(4), i32, i32, i64)
+declare dso_local spir_func i64 @__spirv_AtomicUMin(ptr addrspace(4), i32, i32, i64)
+declare dso_local spir_func i64 @__spirv_AtomicUMax(ptr addrspace(4), i32, i32, i64)
diff --git a/llvm/test/CodeGen/SPIRV/instructions/integer-casts.ll b/llvm/test/CodeGen/SPIRV/instructions/integer-casts.ll
index a84ef3f70c575..18c39ac939879 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/integer-casts.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/integer-casts.ll
@@ -1,4 +1,8 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpName [[TRUNC32_16:%.*]] "i32toi16"
; CHECK-DAG: OpName [[TRUNC32_8:%.*]] "i32toi8"
@@ -20,6 +24,9 @@
; CHECK-DAG: OpName [[ZEXT8_16v4:%.*]] "u8tou16v4"
; CHECK-DAG: OpName [[ZEXT16_32v4:%.*]] "u16tou32v4"
+; CHECK-DAG: [[F32:%.*]] = OpTypeFloat 32
+; CHECK-DAG: [[F16:%.*]] = OpTypeFloat 16
+; CHECK-DAG: [[U64:%.*]] = OpTypeInt 64 0
; CHECK-DAG: [[U32:%.*]] = OpTypeInt 32 0
; CHECK-DAG: [[U16:%.*]] = OpTypeInt 16 0
; CHECK-DAG: [[U8:%.*]] = OpTypeInt 8 0
@@ -227,3 +234,48 @@ define <4 x i32> @u16tou32v4(<4 x i16> %a) {
%r = zext <4 x i16> %a to <4 x i32>
ret <4 x i32> %r
}
+
+; CHECK: OpFunction
+; CHECK: [[Arg1:%.*]] = OpFunctionParameter
+; CHECK: [[Arg2:%.*]] = OpFunctionParameter
+; CHECK: %[[#]] = OpConvertFToU [[U32]] %[[#]]
+; CHECK: %[[#]] = OpConvertFToS [[U32]] %[[#]]
+; CHECK: %[[#]] = OpConvertSToF [[F32]] %[[#]]
+; CHECK: %[[#]] = OpConvertUToF [[F32]] %[[#]]
+; CHECK: %[[#]] = OpUConvert [[U32]] %[[#]]
+; CHECK: %[[#]] = OpSConvert [[U32]] %[[#]]
+; CHECK: %[[#]] = OpFConvert [[F16]] %[[#]]
+; CHECK: %[[#]] = OpQuantizeToF16 [[F32]] %[[#]]
+; CHECK: %[[#]] = OpSatConvertSToU [[U64]] %[[#]]
+; CHECK: %[[#]] = OpSatConvertUToS [[U64]] %[[#]]
+; CHECK: %[[#]] = OpConvertPtrToU [[U64]] [[Arg1]]
+; CHECK: %[[#]] = OpConvertUToPtr %[[#]] [[Arg2]]
+; CHECK: OpFunctionEnd
+define dso_local spir_kernel void @test_wrappers(ptr addrspace(4) %arg, i64 %arg_ptr) {
+ %r1 = call spir_func i32 @__spirv_ConvertFToU(float 0.000000e+00)
+ %r2 = call spir_func i32 @__spirv_ConvertFToS(float 0.000000e+00)
+ %r3 = call spir_func float @__spirv_ConvertSToF(i32 1)
+ %r4 = call spir_func float @__spirv_ConvertUToF(i32 1)
+ %r5 = call spir_func i32 @__spirv_UConvert(i64 1)
+ %r6 = call spir_func i32 @__spirv_SConvert(i64 1)
+ %r7 = call spir_func half @__spirv_FConvert(float 0.000000e+00)
+ %r8 = call spir_func float @__spirv_QuantizeToF16(float 0.000000e+00)
+ %r9 = call spir_func i64 @__spirv_SatConvertSToU(i64 1)
+ %r10 = call spir_func i64 @__spirv_SatConvertUToS(i64 1)
+ %r11 = call spir_func i64 @__spirv_ConvertPtrToU(ptr addrspace(4) %arg)
+ %r12 = call spir_func ptr addrspace(4) @__spirv_ConvertUToPtr(i64 %arg_ptr)
+ ret void
+}
+
+declare dso_local spir_func i32 @__spirv_ConvertFToU(float)
+declare dso_local spir_func i32 @__spirv_ConvertFToS(float)
+declare dso_local spir_func float @__spirv_ConvertSToF(i32)
+declare dso_local spir_func float @__spirv_ConvertUToF(i32)
+declare dso_local spir_func i32 @__spirv_UConvert(i64)
+declare dso_local spir_func i32 @__spirv_SConvert(i64)
+declare dso_local spir_func half @__spirv_FConvert(float)
+declare dso_local spir_func float @__spirv_QuantizeToF16(float)
+declare dso_local spir_func i64 @__spirv_SatConvertSToU(i64)
+declare dso_local spir_func i64 @__spirv_SatConvertUToS(i64)
+declare dso_local spir_func i64 @__spirv_ConvertPtrToU(ptr addrspace(4))
+declare dso_local spir_func ptr addrspace(4) @__spirv_ConvertUToPtr(i64)
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpDot.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpDot.ll
index c5042c2b8229f..58fcc3688c89d 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpDot.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpDot.ll
@@ -1,4 +1,11 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: %[[#TyFloat:]] = OpTypeFloat 32
+; CHECK-SPIRV-DAG: %[[#TyHalf:]] = OpTypeFloat 16
;; The OpDot operands must be vectors; check that translating dot with
;; scalar arguments does not result in OpDot.
@@ -16,15 +23,21 @@ entry:
;; The OpDot operands must be vectors; check that translating dot with
;; vector arguments results in OpDot.
; CHECK-SPIRV-LABEL: %[[#]] = OpFunction %[[#]] None %[[#]]
-; CHECK-SPIRV: %[[#]] = OpDot %[[#]] %[[#]] %[[#]]
+; CHECK-SPIRV: %[[#]] = OpDot %[[#TyFloat]] %[[#]] %[[#]]
+; CHECK-SPIRV: %[[#]] = OpDot %[[#TyFloat]] %[[#]] %[[#]]
+; CHECK-SPIRV: %[[#]] = OpDot %[[#TyHalf]] %[[#]] %[[#]]
; CHECK-SPIRV: OpFunctionEnd
-define spir_kernel void @testVector(<2 x float> %f) {
+define spir_kernel void @testVector(<2 x float> %f, <2 x half> %h) {
entry:
%call = tail call spir_func float @_Z3dotDv2_fS_(<2 x float> %f, <2 x float> %f)
+ %call2 = tail call spir_func float @__spirv_Dot(<2 x float> %f, <2 x float> %f)
+ %call3 = tail call spir_func half @_Z11__spirv_DotDv2_DF16_S_(<2 x half> %h, <2 x half> %h)
ret void
}
declare spir_func float @_Z3dotff(float, float)
declare spir_func float @_Z3dotDv2_fS_(<2 x float>, <2 x float>)
+declare spir_func float @__spirv_Dot(<2 x float>, <2 x float>)
+declare spir_func half @_Z11__spirv_DotDv2_DF16_S_(<2 x half>, <2 x half>)
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_ballot.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_ballot.ll
index 6cc9e0f332928..c579859a3f531 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_ballot.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_ballot.ll
@@ -844,55 +844,77 @@ declare dso_local spir_func double @_Z25sub_group_broadcast_firstd(double) local
; CHECK-SPIRV: OpFunction
; CHECK-SPIRV: %[[#ballot:]] = OpGroupNonUniformBallot %[[#int4]] %[[#ScopeSubgroup]] %[[#false]]
+; CHECK-SPIRV: %[[#ballot2:]] = OpGroupNonUniformBallot %[[#int4]] %[[#ScopeSubgroup]] %[[#false]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformInverseBallot %[[#bool]] %[[#ScopeSubgroup]] %[[#ballot]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformInverseBallot %[[#bool]] %[[#ScopeSubgroup]] %[[#ballot2]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotBitExtract %[[#bool]] %[[#ScopeSubgroup]] %[[#ballot]] %[[#int_0]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotBitExtract %[[#bool]] %[[#ScopeSubgroup]] %[[#ballot2]] %[[#int_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotBitCount %[[#int]] %[[#ScopeSubgroup]] Reduce %[[#ballot]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotBitCount %[[#int]] %[[#ScopeSubgroup]] Reduce %[[#ballot2]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotBitCount %[[#int]] %[[#ScopeSubgroup]] InclusiveScan %[[#ballot]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotBitCount %[[#int]] %[[#ScopeSubgroup]] InclusiveScan %[[#ballot2]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotBitCount %[[#int]] %[[#ScopeSubgroup]] ExclusiveScan %[[#ballot]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotBitCount %[[#int]] %[[#ScopeSubgroup]] ExclusiveScan %[[#ballot2]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotFindLSB %[[#int]] %[[#ScopeSubgroup]] %[[#ballot]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotFindLSB %[[#int]] %[[#ScopeSubgroup]] %[[#ballot2]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotFindMSB %[[#int]] %[[#ScopeSubgroup]] %[[#ballot]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotFindMSB %[[#int]] %[[#ScopeSubgroup]] %[[#ballot2]]
; CHECK-SPIRV: OpFunctionEnd
define dso_local spir_kernel void @testBallotOperations(i32 addrspace(1)* nocapture) local_unnamed_addr {
%2 = tail call spir_func <4 x i32> @_Z16sub_group_balloti(i32 0)
+ %r2 = tail call spir_func <4 x i32> @__spirv_GroupNonUniformBallot(i32 3, i1 false)
%3 = tail call spir_func i32 @_Z24sub_group_inverse_ballotDv4_j(<4 x i32> %2)
+ %r3 = tail call spir_func i1 @__spirv_GroupNonUniformInverseBallot(i32 3, <4 x i32> %r2)
store i32 %3, i32 addrspace(1)* %0, align 4
%4 = tail call spir_func i32 @_Z28sub_group_ballot_bit_extractDv4_jj(<4 x i32> %2, i32 0)
+ %r4 = tail call spir_func i32 @__spirv_GroupNonUniformBallotBitExtract(i32 3, <4 x i32> %r2, i32 0)
%5 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
store i32 %4, i32 addrspace(1)* %5, align 4
%6 = tail call spir_func i32 @_Z26sub_group_ballot_bit_countDv4_j(<4 x i32> %2)
+ %r6 = tail call spir_func i32 @__spirv_GroupNonUniformBallotBitCount(i32 3, i32 0, <4 x i32> %r2)
%7 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
store i32 %6, i32 addrspace(1)* %7, align 4
%8 = tail call spir_func i32 @_Z31sub_group_ballot_inclusive_scanDv4_j(<4 x i32> %2)
+ %r8 = tail call spir_func i32 @__spirv_GroupNonUniformBallotBitCount(i32 3, i32 1, <4 x i32> %r2)
%9 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 3
store i32 %8, i32 addrspace(1)* %9, align 4
%10 = tail call spir_func i32 @_Z31sub_group_ballot_exclusive_scanDv4_j(<4 x i32> %2)
+ %r10 = tail call spir_func i32 @__spirv_GroupNonUniformBallotBitCount(i32 3, i32 2, <4 x i32> %r2)
%11 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 4
store i32 %10, i32 addrspace(1)* %11, align 4
%12 = tail call spir_func i32 @_Z25sub_group_ballot_find_lsbDv4_j(<4 x i32> %2)
+ %r12 = tail call spir_func i32 @__spirv_GroupNonUniformBallotFindLSB(i32 3, <4 x i32> %r2)
%13 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 5
store i32 %12, i32 addrspace(1)* %13, align 4
%14 = tail call spir_func i32 @_Z25sub_group_ballot_find_msbDv4_j(<4 x i32> %2)
+ %r14 = tail call spir_func i32 @__spirv_GroupNonUniformBallotFindMSB(i32 3, <4 x i32> %r2)
%15 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 6
store i32 %14, i32 addrspace(1)* %15, align 4
ret void
}
declare dso_local spir_func <4 x i32> @_Z16sub_group_balloti(i32) local_unnamed_addr
+declare dso_local spir_func <4 x i32> @__spirv_GroupNonUniformBallot(i32, i1)
declare dso_local spir_func i32 @_Z24sub_group_inverse_ballotDv4_j(<4 x i32>) local_unnamed_addr
+declare dso_local spir_func i1 @__spirv_GroupNonUniformInverseBallot(i32, <4 x i32>)
declare dso_local spir_func i32 @_Z28sub_group_ballot_bit_extractDv4_jj(<4 x i32>, i32) local_unnamed_addr
+declare dso_local spir_func i1 @__spirv_GroupNonUniformBallotBitExtract(i32, <4 x i32>, i32) local_unnamed_addr
declare dso_local spir_func i32 @_Z26sub_group_ballot_bit_countDv4_j(<4 x i32>) local_unnamed_addr
+declare dso_local spir_func i32 @__spirv_GroupNonUniformBallotBitCount(i32, i32, <4 x i32>)
declare dso_local spir_func i32 @_Z31sub_group_ballot_inclusive_scanDv4_j(<4 x i32>) local_unnamed_addr
declare dso_local spir_func i32 @_Z31sub_group_ballot_exclusive_scanDv4_j(<4 x i32>) local_unnamed_addr
declare dso_local spir_func i32 @_Z25sub_group_ballot_find_lsbDv4_j(<4 x i32>) local_unnamed_addr
+declare dso_local spir_func i32 @__spirv_GroupNonUniformBallotFindLSB(i32, <4 x i32>)
declare dso_local spir_func i32 @_Z25sub_group_ballot_find_msbDv4_j(<4 x i32>) local_unnamed_addr
+declare dso_local spir_func i32 @__spirv_GroupNonUniformBallotFindMSB(i32, <4 x i32>)
; CHECK-SPIRV: OpFunction
; CHECK-SPIRV: %[[#]] = OpLoad %[[#int4]] %[[#eqMask]]
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll
index 8f4910ff512f8..adf73fe153dea 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll
@@ -331,8 +331,10 @@
; CHECK-SPIRV-DAG: %[[#false:]] = OpConstantFalse %[[#bool]]
; CHECK-SPIRV-DAG: %[[#ScopeSubgroup:]] = OpConstant %[[#int]] 3
; CHECK-SPIRV-DAG: %[[#char_0:]] = OpConstant %[[#char]] 0
+; CHECK-SPIRV-DAG: %[[#char_10:]] = OpConstant %[[#char]] 10
; CHECK-SPIRV-DAG: %[[#short_0:]] = OpConstant %[[#short]] 0
; CHECK-SPIRV-DAG: %[[#int_0:]] = OpConstant %[[#int]] 0
+; CHECK-SPIRV-DAG: %[[#int_32:]] = OpConstant %[[#int]] 32
; CHECK-SPIRV-DAG: %[[#long_0:]] = OpConstantNull %[[#long]]
; CHECK-SPIRV-DAG: %[[#half_0:]] = OpConstant %[[#half]] 0
; CHECK-SPIRV-DAG: %[[#float_0:]] = OpConstant %[[#float]] 0
@@ -340,9 +342,13 @@
; CHECK-SPIRV: OpFunction
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformIAdd %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_0]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformIAdd %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_10]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformIMul %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_0]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformIMul %[[#char]] %[[#ScopeSubgroup]] InclusiveScan %[[#char_10]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMin %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_0]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMin %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_10]] %[[#int_32]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMax %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_0]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMax %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_10]] %[[#int_32]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformIAdd %[[#char]] %[[#ScopeSubgroup]] InclusiveScan %[[#char_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformIMul %[[#char]] %[[#ScopeSubgroup]] InclusiveScan %[[#char_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMin %[[#char]] %[[#ScopeSubgroup]] InclusiveScan %[[#char_0]]
@@ -355,14 +361,18 @@
define dso_local spir_kernel void @testNonUniformArithmeticChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
%2 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_addc(i8 signext 0)
+ %r2 = tail call spir_func signext i8 @__spirv_GroupNonUniformIAdd(i32 3, i32 0, i8 signext 10)
store i8 %2, i8 addrspace(1)* %0, align 1
%3 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_mulc(i8 signext 0)
+ %r3 = tail call spir_func signext i8 @__spirv_GroupNonUniformIMul(i32 3, i32 1, i8 signext 10)
%4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
store i8 %3, i8 addrspace(1)* %4, align 1
%5 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_minc(i8 signext 0)
+ %r5 = tail call spir_func signext i8 @__spirv_GroupNonUniformSMin(i32 3, i32 0, i8 signext 10, i32 32)
%6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
store i8 %5, i8 addrspace(1)* %6, align 1
%7 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_maxc(i8 signext 0)
+ %r7 = tail call spir_func signext i8 @__spirv_GroupNonUniformSMax(i32 3, i32 0, i8 signext 10, i32 32)
%8 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 3
store i8 %7, i8 addrspace(1)* %8, align 1
%9 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_inclusive_addc(i8 signext 0)
@@ -393,12 +403,16 @@ define dso_local spir_kernel void @testNonUniformArithmeticChar(i8 addrspace(1)*
}
declare dso_local spir_func signext i8 @_Z32sub_group_non_uniform_reduce_addc(i8 signext) local_unnamed_addr
+declare dso_local spir_func signext i8 @__spirv_GroupNonUniformIAdd(i32, i32, i8)
declare dso_local spir_func signext i8 @_Z32sub_group_non_uniform_reduce_mulc(i8 signext) local_unnamed_addr
+declare dso_local spir_func signext i8 @__spirv_GroupNonUniformIMul(i32, i32, i8)
declare dso_local spir_func signext i8 @_Z32sub_group_non_uniform_reduce_minc(i8 signext) local_unnamed_addr
+declare dso_local spir_func signext i8 @__spirv_GroupNonUniformSMin(i32, i32, i8, i32)
declare dso_local spir_func signext i8 @_Z32sub_group_non_uniform_reduce_maxc(i8 signext) local_unnamed_addr
+declare dso_local spir_func signext i8 @__spirv_GroupNonUniformSMax(i32, i32, i8, i32)
declare dso_local spir_func signext i8 @_Z40sub_group_non_uniform_scan_inclusive_addc(i8 signext) local_unnamed_addr
@@ -576,7 +590,9 @@ declare dso_local spir_func signext i16 @_Z40sub_group_non_uniform_scan_exclusiv
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformIAdd %[[#short]] %[[#ScopeSubgroup]] Reduce %[[#short_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformIMul %[[#short]] %[[#ScopeSubgroup]] Reduce %[[#short_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMin %[[#short]] %[[#ScopeSubgroup]] Reduce %[[#short_0]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMin %[[#short]] %[[#ScopeSubgroup]] Reduce %[[#short_0]] %[[#int_32]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMax %[[#short]] %[[#ScopeSubgroup]] Reduce %[[#short_0]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMax %[[#short]] %[[#ScopeSubgroup]] Reduce %[[#short_0]] %[[#int_32]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformIAdd %[[#short]] %[[#ScopeSubgroup]] InclusiveScan %[[#short_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformIMul %[[#short]] %[[#ScopeSubgroup]] InclusiveScan %[[#short_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMin %[[#short]] %[[#ScopeSubgroup]] InclusiveScan %[[#short_0]]
@@ -594,9 +610,11 @@ define dso_local spir_kernel void @testNonUniformArithmeticUShort(i16 addrspace(
%4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
store i16 %3, i16 addrspace(1)* %4, align 2
%5 = tail call spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_mint(i16 zeroext 0)
+ %r5 = tail call spir_func signext i16 @__spirv_GroupNonUniformUMin(i32 3, i32 0, i16 signext 0, i32 32)
%6 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 2
store i16 %5, i16 addrspace(1)* %6, align 2
%7 = tail call spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_maxt(i16 zeroext 0)
+ %r7 = tail call spir_func signext i16 @__spirv_GroupNonUniformUMax(i32 3, i32 0, i16 signext 0, i32 32)
%8 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 3
store i16 %7, i16 addrspace(1)* %8, align 2
%9 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_inclusive_addt(i16 zeroext 0)
@@ -631,8 +649,10 @@ declare dso_local spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_addt(i
declare dso_local spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_mult(i16 zeroext) local_unnamed_addr
declare dso_local spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_mint(i16 zeroext) local_unnamed_addr
+declare dso_local spir_func zeroext i16 @__spirv_GroupNonUniformUMin(i32, i32, i16 signext, i32)
declare dso_local spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_maxt(i16 zeroext) local_unnamed_addr
+declare dso_local spir_func zeroext i16 @__spirv_GroupNonUniformUMax(i32, i32, i16 signext, i32)
declare dso_local spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_inclusive_addt(i16 zeroext) local_unnamed_addr
@@ -963,10 +983,10 @@ declare dso_local spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_minm(i
declare dso_local spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_maxm(i64) local_unnamed_addr
; CHECK-SPIRV: OpFunction
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFAdd %[[#float]] %[[#ScopeSubgroup]] Reduce %[[#float_0]]
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFMul %[[#float]] %[[#ScopeSubgroup]] Reduce %[[#float_0]]
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFMin %[[#float]] %[[#ScopeSubgroup]] Reduce %[[#float_0]]
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFMax %[[#float]] %[[#ScopeSubgroup]] Reduce %[[#float_0]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformFAdd %[[#float]] %[[#ScopeSubgroup]] Reduce %[[#float_0]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformFMul %[[#float]] %[[#ScopeSubgroup]] Reduce %[[#float_0]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformFMin %[[#float]] %[[#ScopeSubgroup]] Reduce %[[#float_0]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformFMax %[[#float]] %[[#ScopeSubgroup]] Reduce %[[#float_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFAdd %[[#float]] %[[#ScopeSubgroup]] InclusiveScan %[[#float_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFMul %[[#float]] %[[#ScopeSubgroup]] InclusiveScan %[[#float_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFMin %[[#float]] %[[#ScopeSubgroup]] InclusiveScan %[[#float_0]]
@@ -979,14 +999,18 @@ declare dso_local spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_maxm(i
define dso_local spir_kernel void @testNonUniformArithmeticFloat(float addrspace(1)* nocapture) local_unnamed_addr {
%2 = tail call spir_func float @_Z32sub_group_non_uniform_reduce_addf(float 0.000000e+00)
+ %r2 = tail call spir_func float @__spirv_GroupNonUniformFAdd(i32 3, i32 0, float 0.000000e+00)
store float %2, float addrspace(1)* %0, align 4
%3 = tail call spir_func float @_Z32sub_group_non_uniform_reduce_mulf(float 0.000000e+00)
+ %r3 = tail call spir_func float @__spirv_GroupNonUniformFMul(i32 3, i32 0, float 0.000000e+00)
%4 = getelementptr inbounds float, float addrspace(1)* %0, i64 1
store float %3, float addrspace(1)* %4, align 4
%5 = tail call spir_func float @_Z32sub_group_non_uniform_reduce_minf(float 0.000000e+00)
+ %r5 = tail call spir_func float @__spirv_GroupNonUniformFMin(i32 3, i32 0, float 0.000000e+00)
%6 = getelementptr inbounds float, float addrspace(1)* %0, i64 2
store float %5, float addrspace(1)* %6, align 4
%7 = tail call spir_func float @_Z32sub_group_non_uniform_reduce_maxf(float 0.000000e+00)
+ %r7 = tail call spir_func float @__spirv_GroupNonUniformFMax(i32 3, i32 0, float 0.000000e+00)
%8 = getelementptr inbounds float, float addrspace(1)* %0, i64 3
store float %7, float addrspace(1)* %8, align 4
%9 = tail call spir_func float @_Z40sub_group_non_uniform_scan_inclusive_addf(float 0.000000e+00)
@@ -1017,12 +1041,16 @@ define dso_local spir_kernel void @testNonUniformArithmeticFloat(float addrspace
}
declare dso_local spir_func float @_Z32sub_group_non_uniform_reduce_addf(float) local_unnamed_addr
+declare dso_local spir_func float @__spirv_GroupNonUniformFAdd(i32, i32, float)
declare dso_local spir_func float @_Z32sub_group_non_uniform_reduce_mulf(float) local_unnamed_addr
+declare dso_local spir_func float @__spirv_GroupNonUniformFMul(i32, i32, float)
declare dso_local spir_func float @_Z32sub_group_non_uniform_reduce_minf(float) local_unnamed_addr
+declare dso_local spir_func float @__spirv_GroupNonUniformFMin(i32, i32, float)
declare dso_local spir_func float @_Z32sub_group_non_uniform_reduce_maxf(float) local_unnamed_addr
+declare dso_local spir_func float @__spirv_GroupNonUniformFMax(i32, i32, float)
declare dso_local spir_func float @_Z40sub_group_non_uniform_scan_inclusive_addf(float) local_unnamed_addr
@@ -1197,12 +1225,12 @@ declare dso_local spir_func double @_Z40sub_group_non_uniform_scan_exclusive_min
declare dso_local spir_func double @_Z40sub_group_non_uniform_scan_exclusive_maxd(double) local_unnamed_addr
; CHECK-SPIRV: OpFunction
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseAnd %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_0]]
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseOr %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_0]]
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_0]]
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseAnd %[[#char]] %[[#ScopeSubgroup]] InclusiveScan %[[#char_0]]
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseOr %[[#char]] %[[#ScopeSubgroup]] InclusiveScan %[[#char_0]]
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#char]] %[[#ScopeSubgroup]] InclusiveScan %[[#char_0]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformBitwiseAnd %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_0]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformBitwiseOr %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_0]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformBitwiseXor %[[#char]] %[[#ScopeSubgroup]] Reduce %[[#char_0]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformBitwiseAnd %[[#char]] %[[#ScopeSubgroup]] InclusiveScan %[[#char_0]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformBitwiseOr %[[#char]] %[[#ScopeSubgroup]] InclusiveScan %[[#char_0]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformBitwiseXor %[[#char]] %[[#ScopeSubgroup]] InclusiveScan %[[#char_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseAnd %[[#char]] %[[#ScopeSubgroup]] ExclusiveScan %[[#char_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseOr %[[#char]] %[[#ScopeSubgroup]] ExclusiveScan %[[#char_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#char]] %[[#ScopeSubgroup]] ExclusiveScan %[[#char_0]]
@@ -1210,20 +1238,26 @@ declare dso_local spir_func double @_Z40sub_group_non_uniform_scan_exclusive_max
define dso_local spir_kernel void @testNonUniformBitwiseChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
%2 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_andc(i8 signext 0)
+ %r2 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseAnd(i32 3, i32 0, i8 signext 0)
store i8 %2, i8 addrspace(1)* %0, align 1
%3 = tail call spir_func signext i8 @_Z31sub_group_non_uniform_reduce_orc(i8 signext 0)
+ %r3 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseOr(i32 3, i32 0, i8 signext 0)
%4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
store i8 %3, i8 addrspace(1)* %4, align 1
%5 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_xorc(i8 signext 0)
+ %r5 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseXor(i32 3, i32 0, i8 signext 0)
%6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
store i8 %5, i8 addrspace(1)* %6, align 1
%7 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_inclusive_andc(i8 signext 0)
+ %r7 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseAnd(i32 3, i32 1, i8 signext 0)
%8 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 3
store i8 %7, i8 addrspace(1)* %8, align 1
%9 = tail call spir_func signext i8 @_Z39sub_group_non_uniform_scan_inclusive_orc(i8 signext 0)
+ %r9 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseOr(i32 3, i32 1, i8 signext 0)
%10 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 4
store i8 %9, i8 addrspace(1)* %10, align 1
%11 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_inclusive_xorc(i8 signext 0)
+ %r11 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseXor(i32 3, i32 1, i8 signext 0)
%12 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 5
store i8 %11, i8 addrspace(1)* %12, align 1
%13 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_exclusive_andc(i8 signext 0)
@@ -1239,10 +1273,13 @@ define dso_local spir_kernel void @testNonUniformBitwiseChar(i8 addrspace(1)* no
}
declare dso_local spir_func signext i8 @_Z32sub_group_non_uniform_reduce_andc(i8 signext) local_unnamed_addr
+declare dso_local spir_func signext i8 @__spirv_GroupNonUniformBitwiseAnd(i32, i32, i8 signext)
declare dso_local spir_func signext i8 @_Z31sub_group_non_uniform_reduce_orc(i8 signext) local_unnamed_addr
+declare dso_local spir_func signext i8 @__spirv_GroupNonUniformBitwiseOr(i32, i32, i8 signext)
declare dso_local spir_func signext i8 @_Z32sub_group_non_uniform_reduce_xorc(i8 signext) local_unnamed_addr
+declare dso_local spir_func signext i8 @__spirv_GroupNonUniformBitwiseXor(i32, i32, i8 signext)
declare dso_local spir_func signext i8 @_Z40sub_group_non_uniform_scan_inclusive_andc(i8 signext) local_unnamed_addr
@@ -1677,9 +1714,9 @@ declare dso_local spir_func i64 @_Z39sub_group_non_uniform_scan_exclusive_orm(i6
declare dso_local spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_xorm(i64) local_unnamed_addr
; CHECK-SPIRV: OpFunction
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformLogicalAnd %[[#bool]] %[[#ScopeSubgroup]] Reduce %[[#false]]
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformLogicalOr %[[#bool]] %[[#ScopeSubgroup]] Reduce %[[#false]]
-; CHECK-SPIRV: %[[#]] = OpGroupNonUniformLogicalXor %[[#bool]] %[[#ScopeSubgroup]] Reduce %[[#false]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformLogicalAnd %[[#bool]] %[[#ScopeSubgroup]] Reduce %[[#false]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformLogicalOr %[[#bool]] %[[#ScopeSubgroup]] Reduce %[[#false]]
+; CHECK-SPIRV-COUNT-2: %[[#]] = OpGroupNonUniformLogicalXor %[[#bool]] %[[#ScopeSubgroup]] Reduce %[[#false]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformLogicalAnd %[[#bool]] %[[#ScopeSubgroup]] InclusiveScan %[[#false]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformLogicalOr %[[#bool]] %[[#ScopeSubgroup]] InclusiveScan %[[#false]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformLogicalXor %[[#bool]] %[[#ScopeSubgroup]] InclusiveScan %[[#false]]
@@ -1690,11 +1727,14 @@ declare dso_local spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_xorm(i
define dso_local spir_kernel void @testNonUniformLogical(i32 addrspace(1)* nocapture) local_unnamed_addr {
%2 = tail call spir_func i32 @_Z40sub_group_non_uniform_reduce_logical_andi(i32 0)
+ %r2 = tail call spir_func i1 @__spirv_GroupNonUniformLogicalAnd(i32 3, i32 0, i1 false)
store i32 %2, i32 addrspace(1)* %0, align 4
%3 = tail call spir_func i32 @_Z39sub_group_non_uniform_reduce_logical_ori(i32 0)
+ %r3 = tail call spir_func i1 @__spirv_GroupNonUniformLogicalOr(i32 3, i32 0, i1 false)
%4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
store i32 %3, i32 addrspace(1)* %4, align 4
%5 = tail call spir_func i32 @_Z40sub_group_non_uniform_reduce_logical_xori(i32 0)
+ %r5 = tail call spir_func i1 @__spirv_GroupNonUniformLogicalXor(i32 3, i32 0, i1 false)
%6 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
store i32 %5, i32 addrspace(1)* %6, align 4
%7 = tail call spir_func i32 @_Z48sub_group_non_uniform_scan_inclusive_logical_andi(i32 0)
@@ -1719,10 +1759,13 @@ define dso_local spir_kernel void @testNonUniformLogical(i32 addrspace(1)* nocap
}
declare dso_local spir_func i32 @_Z40sub_group_non_uniform_reduce_logical_andi(i32) local_unnamed_addr
+declare dso_local spir_func i1 @__spirv_GroupNonUniformLogicalAnd(i32, i32, i1)
declare dso_local spir_func i32 @_Z39sub_group_non_uniform_reduce_logical_ori(i32) local_unnamed_addr
+declare dso_local spir_func i1 @__spirv_GroupNonUniformLogicalOr(i32, i32, i1)
declare dso_local spir_func i32 @_Z40sub_group_non_uniform_reduce_logical_xori(i32) local_unnamed_addr
+declare dso_local spir_func i1 @__spirv_GroupNonUniformLogicalXor(i32, i32, i1)
declare dso_local spir_func i32 @_Z48sub_group_non_uniform_scan_inclusive_logical_andi(i32) local_unnamed_addr
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_vote.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_vote.ll
index 1073473a224df..183f1d2eeef59 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_vote.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_vote.ll
@@ -75,8 +75,10 @@
; CHECK-SPIRV-DAG: %[[#double:]] = OpTypeFloat 64
; CHECK-SPIRV-DAG: %[[#false:]] = OpConstantFalse %[[#bool]]
+; CHECK-SPIRV-DAG: %[[#true:]] = OpConstantTrue %[[#bool]]
; CHECK-SPIRV-DAG: %[[#ScopeSubgroup:]] = OpConstant %[[#int]] 3
; CHECK-SPIRV-DAG: %[[#char_0:]] = OpConstant %[[#char]] 0
+; CHECK-SPIRV-DAG: %[[#char_10:]] = OpConstant %[[#char]] 10
; CHECK-SPIRV-DAG: %[[#short_0:]] = OpConstant %[[#short]] 0
; CHECK-SPIRV-DAG: %[[#int_0:]] = OpConstant %[[#int]] 0
; CHECK-SPIRV-DAG: %[[#long_0:]] = OpConstantNull %[[#long]]
@@ -86,42 +88,52 @@
; CHECK-SPIRV: OpFunction
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformElect %[[#bool]] %[[#ScopeSubgroup]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformElect %[[#bool]] %[[#ScopeSubgroup]]
; CHECK-SPIRV: OpFunctionEnd
define dso_local spir_kernel void @testSubGroupElect(i32 addrspace(1)* nocapture) local_unnamed_addr {
%2 = tail call spir_func i32 @_Z15sub_group_electv()
+ %r2 = tail call spir_func i1 @__spirv_GroupNonUniformElect(i32 3)
store i32 %2, i32 addrspace(1)* %0, align 4
ret void
}
declare dso_local spir_func i32 @_Z15sub_group_electv() local_unnamed_addr
+declare dso_local spir_func i1 @__spirv_GroupNonUniformElect(i32)
; CHECK-SPIRV: OpFunction
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAll %[[#bool]] %[[#ScopeSubgroup]] %[[#false]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAll %[[#bool]] %[[#ScopeSubgroup]] %[[#true]]
; CHECK-SPIRV: OpFunctionEnd
define dso_local spir_kernel void @testSubGroupNonUniformAll(i32 addrspace(1)* nocapture) local_unnamed_addr {
%2 = tail call spir_func i32 @_Z25sub_group_non_uniform_alli(i32 0)
+ %r2 = tail call spir_func i1 @__spirv_GroupNonUniformAll(i32 3, i1 true)
store i32 %2, i32 addrspace(1)* %0, align 4
ret void
}
declare dso_local spir_func i32 @_Z25sub_group_non_uniform_alli(i32) local_unnamed_addr
+declare dso_local spir_func i1 @__spirv_GroupNonUniformAll(i32, i1)
; CHECK-SPIRV: OpFunction
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAny %[[#bool]] %[[#ScopeSubgroup]] %[[#false]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAny %[[#bool]] %[[#ScopeSubgroup]] %[[#true]]
; CHECK-SPIRV: OpFunctionEnd
define dso_local spir_kernel void @testSubGroupNonUniformAny(i32 addrspace(1)* nocapture) local_unnamed_addr {
%2 = tail call spir_func i32 @_Z25sub_group_non_uniform_anyi(i32 0)
+ %r2 = tail call spir_func i1 @__spirv_GroupNonUniformAny(i32 3, i1 true)
store i32 %2, i32 addrspace(1)* %0, align 4
ret void
}
declare dso_local spir_func i32 @_Z25sub_group_non_uniform_anyi(i32) local_unnamed_addr
+declare dso_local spir_func i1 @__spirv_GroupNonUniformAny(i32, i1)
; CHECK-SPIRV: OpFunction
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAllEqual %[[#bool]] %[[#ScopeSubgroup]] %[[#char_0]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAllEqual %[[#bool]] %[[#ScopeSubgroup]] %[[#char_10]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAllEqual %[[#bool]] %[[#ScopeSubgroup]] %[[#char_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAllEqual %[[#bool]] %[[#ScopeSubgroup]] %[[#short_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAllEqual %[[#bool]] %[[#ScopeSubgroup]] %[[#short_0]]
@@ -136,6 +148,7 @@ declare dso_local spir_func i32 @_Z25sub_group_non_uniform_anyi(i32) local_unnam
define dso_local spir_kernel void @testSubGroupNonUniformAllEqual(i32 addrspace(1)* nocapture) local_unnamed_addr {
%2 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equalc(i8 signext 0)
+ %r2 = tail call spir_func i1 @__spirv_GroupNonUniformAllEqual(i32 3, i8 signext 10)
store i32 %2, i32 addrspace(1)* %0, align 4
%3 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equalh(i8 zeroext 0)
store i32 %3, i32 addrspace(1)* %0, align 4
@@ -161,6 +174,7 @@ define dso_local spir_kernel void @testSubGroupNonUniformAllEqual(i32 addrspace(
}
declare dso_local spir_func i32 @_Z31sub_group_non_uniform_all_equalc(i8 signext) local_unnamed_addr
+declare dso_local spir_func i1 @__spirv_GroupNonUniformAllEqual(i32, i8 signext)
declare dso_local spir_func i32 @_Z31sub_group_non_uniform_all_equalh(i8 zeroext) local_unnamed_addr
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle.ll
index 9ed5c78bd0ede..370e63e9b5c91 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle.ll
@@ -107,16 +107,20 @@
define dso_local spir_kernel void @testShuffleChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
%2 = tail call spir_func signext i8 @_Z17sub_group_shufflecj(i8 signext 0, i32 0)
+ %r2 = tail call spir_func signext i8 @__spirv_GroupNonUniformShuffle(i32 3, i8 signext 0, i32 0)
store i8 %2, i8 addrspace(1)* %0, align 1
%3 = tail call spir_func signext i8 @_Z21sub_group_shuffle_xorcj(i8 signext 0, i32 0)
+ %r3 = tail call spir_func signext i8 @__spirv_GroupNonUniformShuffleXor(i32 3, i8 signext 0, i32 0)
%4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
store i8 %3, i8 addrspace(1)* %4, align 1
ret void
}
declare dso_local spir_func signext i8 @_Z17sub_group_shufflecj(i8 signext, i32) local_unnamed_addr
+declare dso_local spir_func signext i8 @__spirv_GroupNonUniformShuffle(i32, i8 signext, i32)
declare dso_local spir_func signext i8 @_Z21sub_group_shuffle_xorcj(i8 signext, i32) local_unnamed_addr
+declare dso_local spir_func signext i8 @__spirv_GroupNonUniformShuffleXor(i32, i8 signext, i32)
; CHECK-SPIRV: OpFunction
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffle %[[#char]] %[[#ScopeSubgroup]] %[[#char_0]] %[[#int_0]]
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle_relative.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle_relative.ll
index 3ad2c2d87549c..af2bc9b1c77cf 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle_relative.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle_relative.ll
@@ -102,13 +102,17 @@
; CHECK-SPIRV: OpFunction
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleUp %[[#char]] %[[#ScopeSubgroup]] %[[#char_0]] %[[#int_0]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleUp %[[#char]] %[[#ScopeSubgroup]] %[[#char_0]] %[[#int_0]]
+; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#char]] %[[#ScopeSubgroup]] %[[#char_0]] %[[#int_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#char]] %[[#ScopeSubgroup]] %[[#char_0]] %[[#int_0]]
; CHECK-SPIRV: OpFunctionEnd
define dso_local spir_kernel void @testShuffleRelativeChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
%2 = tail call spir_func signext i8 @_Z20sub_group_shuffle_upcj(i8 signext 0, i32 0)
+ %w2 = tail call spir_func i8 @__spirv_GroupNonUniformShuffleUp(i32 3, i8 signext 0, i32 0)
store i8 %2, i8 addrspace(1)* %0, align 1
%3 = tail call spir_func signext i8 @_Z22sub_group_shuffle_downcj(i8 signext 0, i32 0)
+ %w3 = tail call spir_func i8 @__spirv_GroupNonUniformShuffleDown(i32 3, i8 signext 0, i32 0)
%4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
store i8 %3, i8 addrspace(1)* %4, align 1
ret void
@@ -118,6 +122,10 @@ declare dso_local spir_func signext i8 @_Z20sub_group_shuffle_upcj(i8 signext, i
declare dso_local spir_func signext i8 @_Z22sub_group_shuffle_downcj(i8 signext, i32) local_unnamed_addr
+declare dso_local spir_func i8 @__spirv_GroupNonUniformShuffleUp(i32, i8, i32)
+
+declare dso_local spir_func i8 @__spirv_GroupNonUniformShuffleDown(i32, i8, i32)
+
; CHECK-SPIRV: OpFunction
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleUp %[[#char]] %[[#ScopeSubgroup]] %[[#char_0]] %[[#int_0]]
; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#char]] %[[#ScopeSubgroup]] %[[#char_0]] %[[#int_0]]
>From 843117050d63db6de983d4840ab3786f91c118b3 Mon Sep 17 00:00:00 2001
From: goldsteinn <35538541+goldsteinn at users.noreply.github.com>
Date: Thu, 4 Jul 2024 13:29:56 +0800
Subject: [PATCH 206/246] [emacs] More consistently highlight value keywords
that appear in vectors (#97594)
Previously something like `<i8 123, i8 poison>` would not properly
highlight the `poison` keyword at the end.
---
llvm/utils/emacs/llvm-mode.el | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/llvm/utils/emacs/llvm-mode.el b/llvm/utils/emacs/llvm-mode.el
index 4d7c425780eb8..dab37833ff63a 100644
--- a/llvm/utils/emacs/llvm-mode.el
+++ b/llvm/utils/emacs/llvm-mode.el
@@ -61,6 +61,8 @@
`(,(concat "\\<" llvm-mode-primitive-type-regexp "\\>") . font-lock-type-face)
;; Integer literals
'("\\b[-]?[0-9]+\\b" . font-lock-preprocessor-face)
+ ;; Values that can appear in a vec
+ '("\\b\\(true\\|false\\|null\\|undef\\|poison\\|none\\)\\b" . font-lock-keyword-face)
;; Floating point constants
'("\\b[-+]?[0-9]+.[0-9]*\\([eE][-+]?[0-9]+\\)?\\b" . font-lock-preprocessor-face)
;; Hex constants
@@ -74,7 +76,7 @@
"private" "internal" "weak" "weak_odr" "linkonce" "linkonce_odr" "available_externally" "appending" "common" "extern_weak" "external"
"uninitialized" "implementation" "..."
;; Values
- "true" "false" "null" "undef" "zeroinitializer" "none" "c" "asm" "blockaddress" "poison"
+ "zeroinitializer" "c" "asm" "blockaddress"
;; Calling conventions
"ccc" "fastcc" "coldcc" "anyregcc" "preserve_mostcc" "preserve_allcc"
>From 2acb068865e009e103391d25b18ed90b4c65d6cb Mon Sep 17 00:00:00 2001
From: Daniil Kovalev <dkovalev at accesssoftek.com>
Date: Thu, 4 Jul 2024 09:34:12 +0300
Subject: [PATCH 207/246] [PAC][llvm-readobj][ELF][AArch64] Define AUTH
relocations for signed GOT (#96158)
- AUTH variant GOT-generating relocations
https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#auth-variant-got-generating-relocations
- AUTH variant dynamic relocations for signed GOT
https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#additional-auth-variant-dynamic-relocations-for-signed-got
---
.../llvm/BinaryFormat/ELFRelocs/AArch64.def | 17 ++++++++++
.../llvm-readobj/ELF/reloc-types-aarch64.test | 34 +++++++++++++++++++
2 files changed, 51 insertions(+)
diff --git a/llvm/include/llvm/BinaryFormat/ELFRelocs/AArch64.def b/llvm/include/llvm/BinaryFormat/ELFRelocs/AArch64.def
index cb05db85e2b5a..6f0e948a98c5f 100644
--- a/llvm/include/llvm/BinaryFormat/ELFRelocs/AArch64.def
+++ b/llvm/include/llvm/BinaryFormat/ELFRelocs/AArch64.def
@@ -144,6 +144,23 @@ ELF_RELOC(R_AARCH64_IRELATIVE, 0x408)
// https://github.com/ARM-software/abi-aa
ELF_RELOC(R_AARCH64_AUTH_ABS64, 0x244)
ELF_RELOC(R_AARCH64_AUTH_RELATIVE, 0x411)
+ELF_RELOC(R_AARCH64_AUTH_MOVW_GOTOFF_G0, 0x8110)
+ELF_RELOC(R_AARCH64_AUTH_MOVW_GOTOFF_G0_NC, 0x8111)
+ELF_RELOC(R_AARCH64_AUTH_MOVW_GOTOFF_G1, 0x8112)
+ELF_RELOC(R_AARCH64_AUTH_MOVW_GOTOFF_G1_NC, 0x8113)
+ELF_RELOC(R_AARCH64_AUTH_MOVW_GOTOFF_G2, 0x8114)
+ELF_RELOC(R_AARCH64_AUTH_MOVW_GOTOFF_G2_NC, 0x8115)
+ELF_RELOC(R_AARCH64_AUTH_MOVW_GOTOFF_G3, 0x8116)
+ELF_RELOC(R_AARCH64_AUTH_GOT_LD_PREL19, 0x8117)
+ELF_RELOC(R_AARCH64_AUTH_LD64_GOTOFF_LO15, 0x8118)
+ELF_RELOC(R_AARCH64_AUTH_ADR_GOT_PAGE, 0x8119)
+ELF_RELOC(R_AARCH64_AUTH_LD64_GOT_LO12_NC, 0x811a)
+ELF_RELOC(R_AARCH64_AUTH_LD64_GOTPAGE_LO15, 0x811b)
+ELF_RELOC(R_AARCH64_AUTH_GOT_ADD_LO12_NC, 0x811c)
+ELF_RELOC(R_AARCH64_AUTH_GOT_ADR_PREL_LO21, 0x811d)
+ELF_RELOC(R_AARCH64_AUTH_GLOB_DAT, 0xe201)
+ELF_RELOC(R_AARCH64_AUTH_TLSDESC, 0xe202)
+ELF_RELOC(R_AARCH64_AUTH_IRELATIVE, 0xe203)
// ELF32
// ELF_RELOC(R_AARCH64_P32_NONE, 0)
diff --git a/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test b/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test
index cf95b51700267..2833c0b59ae13 100644
--- a/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test
+++ b/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test
@@ -130,6 +130,23 @@
# CHECK: Type: R_AARCH64_TLSDESC (1031)
# CHECK: Type: R_AARCH64_IRELATIVE (1032)
# CHECK: Type: R_AARCH64_AUTH_RELATIVE (1041)
+# CHECK: Type: R_AARCH64_AUTH_MOVW_GOTOFF_G0 (33040)
+# CHECK: Type: R_AARCH64_AUTH_MOVW_GOTOFF_G0_NC (33041)
+# CHECK: Type: R_AARCH64_AUTH_MOVW_GOTOFF_G1 (33042)
+# CHECK: Type: R_AARCH64_AUTH_MOVW_GOTOFF_G1_NC (33043)
+# CHECK: Type: R_AARCH64_AUTH_MOVW_GOTOFF_G2 (33044)
+# CHECK: Type: R_AARCH64_AUTH_MOVW_GOTOFF_G2_NC (33045)
+# CHECK: Type: R_AARCH64_AUTH_MOVW_GOTOFF_G3 (33046)
+# CHECK: Type: R_AARCH64_AUTH_GOT_LD_PREL19 (33047)
+# CHECK: Type: R_AARCH64_AUTH_LD64_GOTOFF_LO15 (33048)
+# CHECK: Type: R_AARCH64_AUTH_ADR_GOT_PAGE (33049)
+# CHECK: Type: R_AARCH64_AUTH_LD64_GOT_LO12_NC (33050)
+# CHECK: Type: R_AARCH64_AUTH_LD64_GOTPAGE_LO15 (33051)
+# CHECK: Type: R_AARCH64_AUTH_GOT_ADD_LO12_NC (33052)
+# CHECK: Type: R_AARCH64_AUTH_GOT_ADR_PREL_LO21 (33053)
+# CHECK: Type: R_AARCH64_AUTH_GLOB_DAT (57857)
+# CHECK: Type: R_AARCH64_AUTH_TLSDESC (57858)
+# CHECK: Type: R_AARCH64_AUTH_IRELATIVE (57859)
--- !ELF
FileHeader:
@@ -267,3 +284,20 @@ Sections:
- Type: R_AARCH64_TLSDESC
- Type: R_AARCH64_IRELATIVE
- Type: R_AARCH64_AUTH_RELATIVE
+ - Type: R_AARCH64_AUTH_MOVW_GOTOFF_G0
+ - Type: R_AARCH64_AUTH_MOVW_GOTOFF_G0_NC
+ - Type: R_AARCH64_AUTH_MOVW_GOTOFF_G1
+ - Type: R_AARCH64_AUTH_MOVW_GOTOFF_G1_NC
+ - Type: R_AARCH64_AUTH_MOVW_GOTOFF_G2
+ - Type: R_AARCH64_AUTH_MOVW_GOTOFF_G2_NC
+ - Type: R_AARCH64_AUTH_MOVW_GOTOFF_G3
+ - Type: R_AARCH64_AUTH_GOT_LD_PREL19
+ - Type: R_AARCH64_AUTH_LD64_GOTOFF_LO15
+ - Type: R_AARCH64_AUTH_ADR_GOT_PAGE
+ - Type: R_AARCH64_AUTH_LD64_GOT_LO12_NC
+ - Type: R_AARCH64_AUTH_LD64_GOTPAGE_LO15
+ - Type: R_AARCH64_AUTH_GOT_ADD_LO12_NC
+ - Type: R_AARCH64_AUTH_GOT_ADR_PREL_LO21
+ - Type: R_AARCH64_AUTH_GLOB_DAT
+ - Type: R_AARCH64_AUTH_TLSDESC
+ - Type: R_AARCH64_AUTH_IRELATIVE
>From 86187ed2998e43be62176c2c4a7b204cc52f6ce6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Timm=20B=C3=A4der?= <tbaeder at redhat.com>
Date: Thu, 4 Jul 2024 07:26:27 +0200
Subject: [PATCH 208/246] [clang][Interp] Fix init chain in local initializers
---
clang/lib/AST/Interp/Compiler.cpp | 4 +++-
clang/test/AST/Interp/records.cpp | 12 ++++++++++++
2 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/clang/lib/AST/Interp/Compiler.cpp b/clang/lib/AST/Interp/Compiler.cpp
index 775cabf7f8c59..2af4c38c5ac3d 100644
--- a/clang/lib/AST/Interp/Compiler.cpp
+++ b/clang/lib/AST/Interp/Compiler.cpp
@@ -3577,6 +3577,7 @@ VarCreationState Compiler<Emitter>::visitVarDecl(const VarDecl *VD, bool Topleve
return !Init || (checkDecl() && initGlobal(*GlobalIndex));
} else {
VariableScope<Emitter> LocalScope(this, VD);
+ InitLinkScope<Emitter> ILS(this, InitLink::Decl(VD));
if (VarT) {
unsigned Offset = this->allocateLocalPrimitive(
@@ -3911,7 +3912,8 @@ bool Compiler<Emitter>::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
SourceLocScope<Emitter> SLS(this, E);
bool Old = InitStackActive;
- InitStackActive = !isa<FunctionDecl>(E->getUsedContext());
+ InitStackActive =
+ !(E->getUsedContext()->getDeclKind() == Decl::CXXConstructor);
bool Result = this->delegate(E->getExpr());
InitStackActive = Old;
return Result;
diff --git a/clang/test/AST/Interp/records.cpp b/clang/test/AST/Interp/records.cpp
index 9f341f5bc6d1d..1554e54275598 100644
--- a/clang/test/AST/Interp/records.cpp
+++ b/clang/test/AST/Interp/records.cpp
@@ -1482,3 +1482,15 @@ namespace FloatAPValue {
ClassTemplateArgRefTemplate<ClassTemplateArgObj.Arg> ClassTemplateArgRefObj;
}
#endif
+
+namespace LocalWithThisPtrInit {
+ struct S {
+ int i;
+ int *p = &i;
+ };
+ constexpr int foo() {
+ S s{2};
+ return *s.p;
+ }
+ static_assert(foo() == 2, "");
+}
>From 948862b24d209ddcf5a93845e1ce327d108761ce Mon Sep 17 00:00:00 2001
From: bangyu shen <94283495+shubaoyu2 at users.noreply.github.com>
Date: Thu, 4 Jul 2024 14:43:53 +0800
Subject: [PATCH 209/246] [mlir][nvvm] Fix the verifier of `wgmma.mma_async`
wrt transposed layouts (#97538)
the WGMMA expect layouts for A/B are row/col, the transposed version
should be col/row. when checking other datatypes cannot use transposed
layout, it should reject col-major for A and row-major for B
---
mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp | 5 +++-
.../Conversion/NVVMToLLVM/nvvm-to-llvm.mlir | 24 +++++++++----------
2 files changed, 16 insertions(+), 13 deletions(-)
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index 036a9a15af838..4d1896551101e 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -878,9 +878,12 @@ LogicalResult NVVM::WgmmaMmaAsyncOp::verify() {
}
// Check transpose (only available for f16/bf16)
+ // Matrices A should be stored in row-major and B in column-major.
+ // Only f16/bf16 matrices can be stored in either column-major or row-major
+ // by setting the tranpose value(imm-trans-a,imm-trans-b) in PTX code.
if ((typeA != WGMMATypes::f16 && typeA != WGMMATypes::bf16) &&
(getLayoutA() == mlir::NVVM::MMALayout::col ||
- getLayoutB() == mlir::NVVM::MMALayout::col)) {
+ getLayoutB() == mlir::NVVM::MMALayout::row)) {
return emitOpError()
<< "given layouts layout_a = " << stringifyMMALayout(getLayoutA())
<< " and layout_b = " << stringifyMMALayout(getLayoutB())
diff --git a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
index 21947c242461e..375e2951a037c 100644
--- a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
+++ b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
@@ -397,19 +397,19 @@ func.func @wgmma_s32_s8_s8_satfinite(%descA : i64, %descB : i64) -> !mat16i32{
#nvvm.shape<m = 64, n = 8, k = 32>,
D [<s32>, #nvvm.wgmma_scale_out<one>, <satfinite>],
A [<s8>, #nvvm.wgmma_scale_in<one>, <row>],
- B [<s8>, #nvvm.wgmma_scale_in<one>, <row>]
+ B [<s8>, #nvvm.wgmma_scale_in<one>, <col>]
: !mat16i32 -> !mat16i32
%result2 = nvvm.wgmma.mma_async %descA, %descB, %result1,
#nvvm.shape<m = 64, n = 8, k = 32>,
D [<s32>, #nvvm.wgmma_scale_out<one>, <satfinite>],
A [<s8>, #nvvm.wgmma_scale_in<one>, <row>],
- B [<s8>, #nvvm.wgmma_scale_in<one>, <row>]
+ B [<s8>, #nvvm.wgmma_scale_in<one>, <col>]
: !mat16i32 -> !mat16i32
%result3 = nvvm.wgmma.mma_async %descA, %descB, %result2,
#nvvm.shape<m = 64, n = 8, k = 32>,
D [<s32>, #nvvm.wgmma_scale_out<one>, <satfinite>],
A [<s8>, #nvvm.wgmma_scale_in<one>, <row>],
- B [<s8>, #nvvm.wgmma_scale_in<one>, <row>]
+ B [<s8>, #nvvm.wgmma_scale_in<one>, <col>]
: !mat16i32 -> !mat16i32
return %result3 : !mat16i32
}
@@ -458,19 +458,19 @@ func.func @wgmma_s32_u8_u8(%descA : i64, %descB : i64) -> !mat16i32 {
#nvvm.shape<m = 64, n = 8, k = 32>,
D [<s32>, #nvvm.wgmma_scale_out<one>],
A [<u8>, #nvvm.wgmma_scale_in<one>, <row>],
- B [<u8>, #nvvm.wgmma_scale_in<one>, <row>]
+ B [<u8>, #nvvm.wgmma_scale_in<one>, <col>]
: !mat16i32 -> !mat16i32
%result2 = nvvm.wgmma.mma_async %descA, %descB, %result1,
#nvvm.shape<m = 64, n = 8, k = 32>,
D [<s32>, #nvvm.wgmma_scale_out<one>],
A [<u8>, #nvvm.wgmma_scale_in<one>, <row>],
- B [<u8>, #nvvm.wgmma_scale_in<one>, <row>]
+ B [<u8>, #nvvm.wgmma_scale_in<one>, <col>]
: !mat16i32 -> !mat16i32
%result3 = nvvm.wgmma.mma_async %descA, %descB, %result2,
#nvvm.shape<m = 64, n = 8, k = 32>,
D [<s32>, #nvvm.wgmma_scale_out<one>],
A [<u8>, #nvvm.wgmma_scale_in<one>, <row>],
- B [<u8>, #nvvm.wgmma_scale_in<one>, <row>]
+ B [<u8>, #nvvm.wgmma_scale_in<one>, <col>]
: !mat16i32 -> !mat16i32
return %result3 : !mat16i32
}
@@ -500,13 +500,13 @@ func.func @wgmma_f32_tf32_tf32(%descA : i64, %descB : i64) -> !mat32f32 {
#nvvm.shape<m = 64, n = 64, k = 8>,
D [#nvvm.wgmma_type<f32>, #nvvm.wgmma_scale_out<one>],
A [#nvvm.wgmma_type<tf32>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>],
- B [#nvvm.wgmma_type<tf32>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>]
+ B [#nvvm.wgmma_type<tf32>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<col>]
: !mat32f32 -> !mat32f32
%result2 = nvvm.wgmma.mma_async %descA, %descB, %result1,
#nvvm.shape<m = 64, n = 64, k = 8>,
D [#nvvm.wgmma_type<f32>, #nvvm.wgmma_scale_out<one>],
A [#nvvm.wgmma_type<tf32>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>],
- B [#nvvm.wgmma_type<tf32>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>]
+ B [#nvvm.wgmma_type<tf32>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<col>]
: !mat32f32 -> !mat32f32
return %result2 : !mat32f32
}
@@ -533,13 +533,13 @@ func.func @wgmma_f32_e4m3_e4m3(%descA : i64, %descB : i64) -> !mat32f32 {
#nvvm.shape<m = 64, n = 64, k = 32>,
D [#nvvm.wgmma_type<f32>, #nvvm.wgmma_scale_out<one>],
A [#nvvm.wgmma_type<e4m3>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>],
- B [#nvvm.wgmma_type<e4m3>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>]
+ B [#nvvm.wgmma_type<e4m3>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<col>]
: !mat32f32 -> !mat32f32
%result2 = nvvm.wgmma.mma_async %descA, %descB, %result1,
#nvvm.shape<m = 64, n = 64, k = 32>,
D [#nvvm.wgmma_type<f32>, #nvvm.wgmma_scale_out<one>],
A [#nvvm.wgmma_type<e4m3>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>],
- B [#nvvm.wgmma_type<e4m3>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>]
+ B [#nvvm.wgmma_type<e4m3>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<col>]
: !mat32f32 -> !mat32f32
return %result2 : !mat32f32
}
@@ -565,13 +565,13 @@ func.func @wgmma_f32_e5m2_e4m3(%descA : i64, %descB : i64) -> !mat32f32 {
#nvvm.shape<m = 64, n = 64, k = 32>,
D [#nvvm.wgmma_type<f32>, #nvvm.wgmma_scale_out<one>],
A [#nvvm.wgmma_type<e5m2>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>],
- B [#nvvm.wgmma_type<e4m3>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>]
+ B [#nvvm.wgmma_type<e4m3>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<col>]
: !mat32f32 -> !mat32f32
%result2 = nvvm.wgmma.mma_async %descA, %descB, %result1,
#nvvm.shape<m = 64, n = 64, k = 32>,
D [#nvvm.wgmma_type<f32>, #nvvm.wgmma_scale_out<one>],
A [#nvvm.wgmma_type<e5m2>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>],
- B [#nvvm.wgmma_type<e4m3>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<row>]
+ B [#nvvm.wgmma_type<e4m3>, #nvvm.wgmma_scale_in<one>, #nvvm.mma_layout<col>]
: !mat32f32 -> !mat32f32
return %result2 : !mat32f32
}
>From 0ad6ac8c5338e42192bc006576397a02b838d265 Mon Sep 17 00:00:00 2001
From: Nikhil Kalra <1368497+nikalra at users.noreply.github.com>
Date: Wed, 3 Jul 2024 23:49:33 -0700
Subject: [PATCH 210/246] [NFC][MLIR] Fix: `alloca` promotion for
`AllocationOpInterface` (#97672)
The std::optional returned by buildPromotedAlloc was directly
dereferenced and assumed to be non-null, even though the documentation
for AllocationOpInterface indicates that std::nullopt is a legal value
if buffer stack promotion is not supported (and is the default value
supplied by the TableGen interface file). This patch removes the direct
dereference so that the optional can be null-checked prior to use.
Co-authored-by: Nikhil Kalra <nkalra at apple.com>
---
.../Bufferization/Transforms/BufferOptimizations.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferOptimizations.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferOptimizations.cpp
index d7056f35cbc8d..93c1f9a4f2b55 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferOptimizations.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferOptimizations.cpp
@@ -397,12 +397,12 @@ class BufferPlacementPromotion : BufferPlacementTransformationBase {
OpBuilder builder(startOperation);
Operation *allocOp = alloc.getDefiningOp();
if (auto allocInterface = dyn_cast<AllocationOpInterface>(allocOp)) {
- Operation *alloca =
- allocInterface.buildPromotedAlloc(builder, alloc).value();
+ std::optional<Operation *> alloca =
+ allocInterface.buildPromotedAlloc(builder, alloc);
if (!alloca)
continue;
// Replace the original alloc by a newly created alloca.
- allocOp->replaceAllUsesWith(alloca);
+ allocOp->replaceAllUsesWith(alloca.value());
allocOp->erase();
}
}
>From 2dda8a2650927e4b0fbb459507684455e196d9a9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Timm=20B=C3=A4der?= <tbaeder at redhat.com>
Date: Thu, 4 Jul 2024 08:49:50 +0200
Subject: [PATCH 211/246] Revert "[clang][Interp] Fix init chain in local
initializers"
This reverts commit 86187ed2998e43be62176c2c4a7b204cc52f6ce6.
Seems like this breaks buildbots:
https://lab.llvm.org/buildbot/#/builders/56/builds/1638
---
clang/lib/AST/Interp/Compiler.cpp | 4 +---
clang/test/AST/Interp/records.cpp | 12 ------------
2 files changed, 1 insertion(+), 15 deletions(-)
diff --git a/clang/lib/AST/Interp/Compiler.cpp b/clang/lib/AST/Interp/Compiler.cpp
index 2af4c38c5ac3d..775cabf7f8c59 100644
--- a/clang/lib/AST/Interp/Compiler.cpp
+++ b/clang/lib/AST/Interp/Compiler.cpp
@@ -3577,7 +3577,6 @@ VarCreationState Compiler<Emitter>::visitVarDecl(const VarDecl *VD, bool Topleve
return !Init || (checkDecl() && initGlobal(*GlobalIndex));
} else {
VariableScope<Emitter> LocalScope(this, VD);
- InitLinkScope<Emitter> ILS(this, InitLink::Decl(VD));
if (VarT) {
unsigned Offset = this->allocateLocalPrimitive(
@@ -3912,8 +3911,7 @@ bool Compiler<Emitter>::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
SourceLocScope<Emitter> SLS(this, E);
bool Old = InitStackActive;
- InitStackActive =
- !(E->getUsedContext()->getDeclKind() == Decl::CXXConstructor);
+ InitStackActive = !isa<FunctionDecl>(E->getUsedContext());
bool Result = this->delegate(E->getExpr());
InitStackActive = Old;
return Result;
diff --git a/clang/test/AST/Interp/records.cpp b/clang/test/AST/Interp/records.cpp
index 1554e54275598..9f341f5bc6d1d 100644
--- a/clang/test/AST/Interp/records.cpp
+++ b/clang/test/AST/Interp/records.cpp
@@ -1482,15 +1482,3 @@ namespace FloatAPValue {
ClassTemplateArgRefTemplate<ClassTemplateArgObj.Arg> ClassTemplateArgRefObj;
}
#endif
-
-namespace LocalWithThisPtrInit {
- struct S {
- int i;
- int *p = &i;
- };
- constexpr int foo() {
- S s{2};
- return *s.p;
- }
- static_assert(foo() == 2, "");
-}
>From 30df62992e890310550259afbe458b845c0d6b89 Mon Sep 17 00:00:00 2001
From: Michael Buch <michaelbuch12 at gmail.com>
Date: Thu, 4 Jul 2024 08:50:31 +0200
Subject: [PATCH 212/246] [lldb][DataFormatter][NFC] Remove duplicate
null-check in std::map iterator formatter
The nullness is already checked a few lines before this.
---
lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp | 3 ---
1 file changed, 3 deletions(-)
diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
index 0f9f93b727ce8..ad467c3966e60 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
+++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
@@ -245,9 +245,6 @@ lldb_private::formatters::LibCxxMapIteratorSyntheticFrontEnd::Update() {
if (!target_sp)
return lldb::ChildCacheState::eRefetch;
- if (!valobj_sp)
- return lldb::ChildCacheState::eRefetch;
-
// this must be a ValueObject* because it is a child of the ValueObject we
// are producing children for it if were a ValueObjectSP, we would end up
// with a loop (iterator -> synthetic -> child -> parent == iterator) and
>From c67653fbc3dd4ea9ab2a32c481a9ac4411020c7b Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 3 Jul 2024 23:56:17 -0700
Subject: [PATCH 213/246] [RISCV] Support vXf16 vector_shuffle with Zvfhmin.
(#97491)
We can shuffle vXf16 vectors just like vXi16 vectors. We don't need any
FP instructions. Update the predicates for vrgather and vslides patterns
to only check the predicates based on the equivalent integer type. If we
use the FP type it will check Zvfh and block Zvfhmin.
These are probably not the only patterns that need to be fixed, but the
test from the bug report no longer crashes.
Fixes #97477
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 17 +++++--
.../Target/RISCV/RISCVInstrInfoVVLPatterns.td | 6 +--
.../RISCV/rvv/fixed-vectors-fp-shuffles.ll | 47 +++++++++++++++++++
3 files changed, 63 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6fe683410d59c..45368a01a0a73 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1319,7 +1319,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
VT, Custom);
setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
- ISD::EXTRACT_SUBVECTOR},
+ ISD::EXTRACT_SUBVECTOR, ISD::VECTOR_SHUFFLE},
VT, Custom);
setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
@@ -5040,6 +5040,14 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
}
+ MVT SplatVT = ContainerVT;
+
+ // If we don't have Zfh, we need to use an integer scalar load.
+ if (SVT == MVT::f16 && !Subtarget.hasStdExtZfh()) {
+ SVT = MVT::i16;
+ SplatVT = ContainerVT.changeVectorElementType(SVT);
+ }
+
// Otherwise use a scalar load and splat. This will give the best
// opportunity to fold a splat into the operation. ISel can turn it into
// the x0 strided load if we aren't able to fold away the select.
@@ -5055,10 +5063,11 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
Ld->getMemOperand()->getFlags());
DAG.makeEquivalentMemoryOrdering(Ld, V);
- unsigned Opc =
- VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
+ unsigned Opc = SplatVT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
+ : RISCVISD::VMV_V_X_VL;
SDValue Splat =
- DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
+ DAG.getNode(Opc, DL, SplatVT, DAG.getUNDEF(ContainerVT), V, VL);
+ Splat = DAG.getBitcast(ContainerVT, Splat);
return convertFromScalableVector(VT, Splat, DAG, Subtarget);
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index a7945f2ee6c1b..cc294bf9254e8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -2117,7 +2117,8 @@ multiclass VPatWidenFPMulAccVL_VV_VF_RM<SDNode vop, string instruction_name> {
multiclass VPatSlideVL_VX_VI<SDNode vop, string instruction_name> {
foreach vti = AllVectors in {
- let Predicates = GetVTypePredicates<vti>.Predicates in {
+ defvar ivti = GetIntVTypeInfo<vti>.Vti;
+ let Predicates = GetVTypePredicates<ivti>.Predicates in {
def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd),
(vti.Vector vti.RegClass:$rs1),
uimm5:$rs2, (vti.Mask V0),
@@ -3001,8 +3002,7 @@ foreach vti = AllFloatVectors in {
(vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
}
defvar ivti = GetIntVTypeInfo<vti>.Vti;
- let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
- GetVTypePredicates<ivti>.Predicates) in {
+ let Predicates = GetVTypePredicates<ivti>.Predicates in {
def : Pat<(vti.Vector
(riscv_vrgather_vv_vl vti.RegClass:$rs2,
(ivti.Vector vti.RegClass:$rs1),
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
index 45c0a22b1939f..6408402ef787f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <4 x half> @shuffle_v4f16(<4 x half> %x, <4 x half> %y) {
; CHECK-LABEL: shuffle_v4f16:
@@ -262,6 +264,51 @@ define <8 x double> @splice_binary2(<8 x double> %x, <8 x double> %y) {
%s = shufflevector <8 x double> %x, <8 x double> %y, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
ret <8 x double> %s
}
+
+define <4 x half> @vrgather_permute_shuffle_vu_v4f16(<4 x half> %x) {
+; CHECK-LABEL: vrgather_permute_shuffle_vu_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, 4096
+; CHECK-NEXT: addi a0, a0, 513
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.s.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %s = shufflevector <4 x half> %x, <4 x half> poison, <4 x i32> <i32 1, i32 2, i32 0, i32 1>
+ ret <4 x half> %s
+}
+
+define <4 x half> @vrgather_shuffle_vv_v4f16(<4 x half> %x, <4 x half> %y) {
+; CHECK-LABEL: vrgather_shuffle_vv_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI21_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI21_0)
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT: vle16.v v11, (a0)
+; CHECK-NEXT: vmv.v.i v0, 8
+; CHECK-NEXT: vrgather.vv v10, v8, v11
+; CHECK-NEXT: vrgather.vi v10, v9, 1, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+ %s = shufflevector <4 x half> %x, <4 x half> %y, <4 x i32> <i32 1, i32 2, i32 0, i32 5>
+ ret <4 x half> %s
+}
+
+define <4 x half> @vrgather_shuffle_vx_v4f16_load(ptr %p) {
+; CHECK-LABEL: vrgather_shuffle_vx_v4f16_load:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, 2
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vlse16.v v8, (a0), zero
+; CHECK-NEXT: ret
+ %v = load <4 x half>, ptr %p
+ %s = shufflevector <4 x half> %v, <4 x half> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x half> %s
+}
+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; RV32: {{.*}}
; RV64: {{.*}}
>From 899fe2cf318f6e3c7a183125ff17ef53944b2989 Mon Sep 17 00:00:00 2001
From: goldsteinn <35538541+goldsteinn at users.noreply.github.com>
Date: Thu, 4 Jul 2024 15:14:39 +0800
Subject: [PATCH 214/246] [CVP][LVI] Fix incorrect scalar type when getting
constant folded vec (#97682)
Fixes #97674
After #97428 added support for vectors, our constant ranges can now be
from splat vectors so when they reduce to a singe constant value, we
need to return the original type as opposed to just an int.
---
llvm/lib/Analysis/LazyValueInfo.cpp | 5 +++--
.../CorrelatedValuePropagation/vectors.ll | 21 +++++++++++++++++++
2 files changed, 24 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index b30e6a6a367c5..4209ee4a77953 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -1370,6 +1370,7 @@ LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
// If V is the condition of the branch itself, then we know exactly what
// it is.
+ // NB: The condition on a `br` can't be a vector type.
if (Condition == Val)
return ValueLatticeElement::get(ConstantInt::get(
Type::getInt1Ty(Val->getContext()), isTrueDest));
@@ -1723,7 +1724,7 @@ Constant *LazyValueInfo::getConstant(Value *V, Instruction *CxtI) {
if (Result.isConstantRange()) {
const ConstantRange &CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
- return ConstantInt::get(V->getContext(), *SingleVal);
+ return ConstantInt::get(V->getType(), *SingleVal);
}
return nullptr;
}
@@ -1758,7 +1759,7 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
if (Result.isConstantRange()) {
const ConstantRange &CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
- return ConstantInt::get(V->getContext(), *SingleVal);
+ return ConstantInt::get(V->getType(), *SingleVal);
}
return nullptr;
}
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll b/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll
index 0024b0a5c75c9..caaed628ed43e 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/vectors.ll
@@ -220,3 +220,24 @@ define <2 x i16> @and_with_poison(<2 x i8> %a) {
%res = and <2 x i16> %zext, <i16 u0xff, i16 poison>
ret <2 x i16> %res
}
+
+
+
+define <4 x i64> @issue_97674_getConstantOnEdge(i1 %cond) {
+entry:
+ br i1 %cond, label %if.then, label %if.end
+
+if.then:
+ %folds = add <4 x i64> zeroinitializer, <i64 1, i64 1, i64 1, i64 1>
+ br label %if.end
+
+if.end:
+ %r = phi <4 x i64> [ %folds, %if.then ], [ zeroinitializer, %entry ]
+ ret <4 x i64> %r
+}
+
+define <4 x i64> @issue_97674_getConstant() {
+entry:
+ %folds = add <4 x i64> zeroinitializer, zeroinitializer
+ ret <4 x i64> %folds
+}
>From a2ed21648c5faa148037f9e022fccdcb5178cbc3 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 3 Jul 2024 18:25:06 +0200
Subject: [PATCH 215/246] [LVI] Simplify the getPredicateResult()
implementation (NFC)
By using ConstantRange::icmp().
---
llvm/lib/Analysis/LazyValueInfo.cpp | 26 +++++---------------------
1 file changed, 5 insertions(+), 21 deletions(-)
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 4209ee4a77953..caa5b2559690a 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -1792,27 +1792,11 @@ getPredicateResult(CmpInst::Predicate Pred, Constant *C,
if (!CI) return LazyValueInfo::Unknown;
const ConstantRange &CR = Val.getConstantRange();
- if (Pred == ICmpInst::ICMP_EQ) {
- if (!CR.contains(CI->getValue()))
- return LazyValueInfo::False;
-
- if (CR.isSingleElement())
- return LazyValueInfo::True;
- } else if (Pred == ICmpInst::ICMP_NE) {
- if (!CR.contains(CI->getValue()))
- return LazyValueInfo::True;
-
- if (CR.isSingleElement())
- return LazyValueInfo::False;
- } else {
- // Handle more complex predicates.
- ConstantRange TrueValues =
- ConstantRange::makeExactICmpRegion(Pred, CI->getValue());
- if (TrueValues.contains(CR))
- return LazyValueInfo::True;
- if (TrueValues.inverse().contains(CR))
- return LazyValueInfo::False;
- }
+ ConstantRange RHS(CI->getValue());
+ if (CR.icmp(Pred, RHS))
+ return LazyValueInfo::True;
+ if (CR.icmp(CmpInst::getInversePredicate(Pred), RHS))
+ return LazyValueInfo::False;
return LazyValueInfo::Unknown;
}
>From 97a2bd8415dc6792b99ec0f091ad7570673c3f37 Mon Sep 17 00:00:00 2001
From: Alexander Belyaev <pifon at google.com>
Date: Thu, 4 Jul 2024 09:24:23 +0200
Subject: [PATCH 216/246] Revert "[mlir][loops] Reland Refactor
LoopFuseSiblingOp and support parallel fusion #94391 (#97607)"
This reverts commit edbc0e30a9e587cee1189be023b9385adc2f239a.
Reason for rollback. ASAN complains about this PR:
==4320==ERROR: AddressSanitizer: heap-use-after-free on address 0x502000006cd8 at pc 0x55e2978d63cf bp 0x7ffe6431c2b0 sp 0x7ffe6431c2a8
READ of size 8 at 0x502000006cd8 thread T0
#0 0x55e2978d63ce in map<llvm::MutableArrayRef<mlir::BlockArgument> &, llvm::MutableArrayRef<mlir::BlockArgument>, nullptr> mlir/include/mlir/IR/IRMapping.h:40:11
#1 0x55e2978d63ce in mlir::createFused(mlir::LoopLikeOpInterface, mlir::LoopLikeOpInterface, mlir::RewriterBase&, std::__u::function<llvm::SmallVector<mlir::Value, 6u> (mlir::OpBuilder&, mlir::Location, llvm::ArrayRef<mlir::BlockArgument>)>, llvm::function_ref<void (mlir::RewriterBase&, mlir::LoopLikeOpInterface, mlir::LoopLikeOpInterface&, mlir::IRMapping)>) mlir/lib/Interfaces/LoopLikeInterface.cpp:156:11
#2 0x55e2952a614b in mlir::fuseIndependentSiblingForLoops(mlir::scf::ForOp, mlir::scf::ForOp, mlir::RewriterBase&) mlir/lib/Dialect/SCF/Utils/Utils.cpp:1398:43
#3 0x55e291480c6f in mlir::transform::LoopFuseSiblingOp::apply(mlir::transform::TransformRewriter&, mlir::transform::TransformResults&, mlir::transform::TransformState&) mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp:482:17
#4 0x55e29149ed5e in mlir::transform::detail::TransformOpInterfaceInterfaceTraits::Model<mlir::transform::LoopFuseSiblingOp>::apply(mlir::transform::detail::TransformOpInterfaceInterfaceTraits::Concept const*, mlir::Operation*, mlir::transform::TransformRewriter&, mlir::transform::TransformResults&, mlir::transform::TransformState&) blaze-out/k8-opt-asan/bin/mlir/include/mlir/Dialect/Transform/Interfaces/TransformInterfaces.h.inc:477:56
#5 0x55e297494a60 in apply blaze-out/k8-opt-asan/bin/mlir/include/mlir/Dialect/Transform/Interfaces/TransformInterfaces.cpp.inc:61:14
#6 0x55e297494a60 in mlir::transform::TransformState::applyTransform(mlir::transform::TransformOpInterface) mlir/lib/Dialect/Transform/Interfaces/TransformInterfaces.cpp:953:48
#7 0x55e294646a8d in applySequenceBlock(mlir::Block&, mlir::transform::FailurePropagationMode, mlir::transform::TransformState&, mlir::transform::TransformResults&) mlir/lib/Dialect/Transform/IR/TransformOps.cpp:1788:15
#8 0x55e29464f927 in mlir::transform::NamedSequenceOp::apply(mlir::transform::TransformRewriter&, mlir::transform::TransformResults&, mlir::transform::TransformState&) mlir/lib/Dialect/Transform/IR/TransformOps.cpp:2155:10
#9 0x55e2945d28ee in mlir::transform::detail::TransformOpInterfaceInterfaceTraits::Model<mlir::transform::NamedSequenceOp>::apply(mlir::transform::detail::TransformOpInterfaceInterfaceTraits::Concept const*, mlir::Operation*, mlir::transform::TransformRewriter&, mlir::transform::TransformResults&, mlir::transform::TransformState&) blaze-out/k8-opt-asan/bin/mlir/include/mlir/Dialect/Transform/Interfaces/TransformInterfaces.h.inc:477:56
#10 0x55e297494a60 in apply blaze-out/k8-opt-asan/bin/mlir/include/mlir/Dialect/Transform/Interfaces/TransformInterfaces.cpp.inc:61:14
#11 0x55e297494a60 in mlir::transform::TransformState::applyTransform(mlir::transform::TransformOpInterface) mlir/lib/Dialect/Transform/Interfaces/TransformInterfaces.cpp:953:48
#12 0x55e2974a5fe2 in mlir::transform::applyTransforms(mlir::Operation*, mlir::transform::TransformOpInterface, mlir::RaggedArray<llvm::PointerUnion<mlir::Operation*, mlir::Attribute, mlir::Value>> const&, mlir::transform::TransformOptions const&, bool) mlir/lib/Dialect/Transform/Interfaces/TransformInterfaces.cpp:2016:16
#13 0x55e2945888d7 in mlir::transform::applyTransformNamedSequence(mlir::RaggedArray<llvm::PointerUnion<mlir::Operation*, mlir::Attribute, mlir::Value>>, mlir::transform::TransformOpInterface, mlir::ModuleOp, mlir::transform::TransformOptions const&) mlir/lib/Dialect/Transform/Transforms/TransformInterpreterUtils.cpp:234:10
#14 0x55e294582446 in (anonymous namespace)::InterpreterPass::runOnOperation() mlir/lib/Dialect/Transform/Transforms/InterpreterPass.cpp:147:16
#15 0x55e2978e93c6 in operator() mlir/lib/Pass/Pass.cpp:527:17
#16 0x55e2978e93c6 in void llvm::function_ref<void ()>::callback_fn<mlir::detail::OpToOpPassAdaptor::run(mlir::Pass*, mlir::Operation*, mlir::AnalysisManager, bool, unsigned int)::$_1>(long) llvm/include/llvm/ADT/STLFunctionalExtras.h:45:12
#17 0x55e2978e207a in operator() llvm/include/llvm/ADT/STLFunctionalExtras.h:68:12
#18 0x55e2978e207a in executeAction<mlir::PassExecutionAction, mlir::Pass &> mlir/include/mlir/IR/MLIRContext.h:275:7
#19 0x55e2978e207a in mlir::detail::OpToOpPassAdaptor::run(mlir::Pass*, mlir::Operation*, mlir::AnalysisManager, bool, unsigned int) mlir/lib/Pass/Pass.cpp:521:21
#20 0x55e2978e5fbf in runPipeline mlir/lib/Pass/Pass.cpp:593:16
#21 0x55e2978e5fbf in mlir::PassManager::runPasses(mlir::Operation*, mlir::AnalysisManager) mlir/lib/Pass/Pass.cpp:904:10
#22 0x55e2978e5b65 in mlir::PassManager::run(mlir::Operation*) mlir/lib/Pass/Pass.cpp:884:60
#23 0x55e291ebb460 in performActions(llvm::raw_ostream&, std::__u::shared_ptr<llvm::SourceMgr> const&, mlir::MLIRContext*, mlir::MlirOptMainConfig const&) mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:408:17
#24 0x55e291ebabd9 in processBuffer mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:481:9
#25 0x55e291ebabd9 in operator() mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:548:12
#26 0x55e291ebabd9 in llvm::LogicalResult llvm::function_ref<llvm::LogicalResult (std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&)>::callback_fn<mlir::MlirOptMain(llvm::raw_ostream&, std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, mlir::DialectRegistry&, mlir::MlirOptMainConfig const&)::$_0>(long, std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&) llvm/include/llvm/ADT/STLFunctionalExtras.h:45:12
#27 0x55e297b1cffe in operator() llvm/include/llvm/ADT/STLFunctionalExtras.h:68:12
#28 0x55e297b1cffe in mlir::splitAndProcessBuffer(std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::function_ref<llvm::LogicalResult (std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&)>, llvm::raw_ostream&, llvm::StringRef, llvm::StringRef)::$_0::operator()(llvm::StringRef) const mlir/lib/Support/ToolUtilities.cpp:86:16
#29 0x55e297b1c9c5 in interleave<const llvm::StringRef *, (lambda at mlir/lib/Support/ToolUtilities.cpp:79:23), (lambda at llvm/include/llvm/ADT/STLExtras.h:2147:49), void> llvm/include/llvm/ADT/STLExtras.h:2125:3
#30 0x55e297b1c9c5 in interleave<llvm::SmallVector<llvm::StringRef, 8U>, (lambda at mlir/lib/Support/ToolUtilities.cpp:79:23), llvm::raw_ostream, llvm::StringRef> llvm/include/llvm/ADT/STLExtras.h:2147:3
#31 0x55e297b1c9c5 in mlir::splitAndProcessBuffer(std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::function_ref<llvm::LogicalResult (std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&)>, llvm::raw_ostream&, llvm::StringRef, llvm::StringRef) mlir/lib/Support/ToolUtilities.cpp:89:3
#32 0x55e291eb0cf0 in mlir::MlirOptMain(llvm::raw_ostream&, std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, mlir::DialectRegistry&, mlir::MlirOptMainConfig const&) mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:551:10
#33 0x55e291eb115c in mlir::MlirOptMain(int, char**, llvm::StringRef, llvm::StringRef, mlir::DialectRegistry&) mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:589:14
#34 0x55e291eb15f8 in mlir::MlirOptMain(int, char**, llvm::StringRef, mlir::DialectRegistry&) mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:605:10
#35 0x55e29130d1be in main mlir/tools/mlir-opt/mlir-opt.cpp:311:33
#36 0x7fbcf3fff3d3 in __libc_start_main (/usr/grte/v5/lib64/libc.so.6+0x613d3) (BuildId: 9a996398ce14a94560b0c642eb4f6e94)
#37 0x55e2912365a9 in _start /usr/grte/v5/debug-src/src/csu/../sysdeps/x86_64/start.S:120
0x502000006cd8 is located 8 bytes inside of 16-byte region [0x502000006cd0,0x502000006ce0)
freed by thread T0 here:
#0 0x55e29130b7e2 in operator delete(void*, unsigned long) compiler-rt/lib/asan/asan_new_delete.cpp:155:3
#1 0x55e2979eb657 in __libcpp_operator_delete<void *, unsigned long>
#2 0x55e2979eb657 in __do_deallocate_handle_size<>
#3 0x55e2979eb657 in __libcpp_deallocate
#4 0x55e2979eb657 in deallocate
#5 0x55e2979eb657 in deallocate
#6 0x55e2979eb657 in operator()
#7 0x55e2979eb657 in ~vector
#8 0x55e2979eb657 in mlir::Block::~Block() mlir/lib/IR/Block.cpp:24:1
#9 0x55e2979ebc17 in deleteNode llvm/include/llvm/ADT/ilist.h:42:39
#10 0x55e2979ebc17 in erase llvm/include/llvm/ADT/ilist.h:205:5
#11 0x55e2979ebc17 in erase llvm/include/llvm/ADT/ilist.h:209:39
#12 0x55e2979ebc17 in mlir::Block::erase() mlir/lib/IR/Block.cpp:67:28
#13 0x55e297aef978 in mlir::RewriterBase::eraseBlock(mlir::Block*) mlir/lib/IR/PatternMatch.cpp:245:10
#14 0x55e297af0563 in mlir::RewriterBase::inlineBlockBefore(mlir::Block*, mlir::Block*, llvm::ilist_iterator<llvm::ilist_detail::node_options<mlir::Operation, false, false, void, false, void>, false, false>, mlir::ValueRange) mlir/lib/IR/PatternMatch.cpp:331:3
#15 0x55e297af06d8 in mlir::RewriterBase::mergeBlocks(mlir::Block*, mlir::Block*, mlir::ValueRange) mlir/lib/IR/PatternMatch.cpp:341:3
#16 0x55e297036608 in mlir::scf::ForOp::replaceWithAdditionalYields(mlir::RewriterBase&, mlir::ValueRange, bool, std::__u::function<llvm::SmallVector<mlir::Value, 6u> (mlir::OpBuilder&, mlir::Location, llvm::ArrayRef<mlir::BlockArgument>)> const&) mlir/lib/Dialect/SCF/IR/SCF.cpp:575:12
#17 0x55e2970673ca in mlir::detail::LoopLikeOpInterfaceInterfaceTraits::Model<mlir::scf::ForOp>::replaceWithAdditionalYields(mlir::detail::LoopLikeOpInterfaceInterfaceTraits::Concept const*, mlir::Operation*, mlir::RewriterBase&, mlir::ValueRange, bool, std::__u::function<llvm::SmallVector<mlir::Value, 6u> (mlir::OpBuilder&, mlir::Location, llvm::ArrayRef<mlir::BlockArgument>)> const&) blaze-out/k8-opt-asan/bin/mlir/include/mlir/Interfaces/LoopLikeInterface.h.inc:658:56
#18 0x55e2978d5feb in replaceWithAdditionalYields blaze-out/k8-opt-asan/bin/mlir/include/mlir/Interfaces/LoopLikeInterface.cpp.inc:105:14
#19 0x55e2978d5feb in mlir::createFused(mlir::LoopLikeOpInterface, mlir::LoopLikeOpInterface, mlir::RewriterBase&, std::__u::function<llvm::SmallVector<mlir::Value, 6u> (mlir::OpBuilder&, mlir::Location, llvm::ArrayRef<mlir::BlockArgument>)>, llvm::function_ref<void (mlir::RewriterBase&, mlir::LoopLikeOpInterface, mlir::LoopLikeOpInterface&, mlir::IRMapping)>) mlir/lib/Interfaces/LoopLikeInterface.cpp:135:14
#20 0x55e2952a614b in mlir::fuseIndependentSiblingForLoops(mlir::scf::ForOp, mlir::scf::ForOp, mlir::RewriterBase&) mlir/lib/Dialect/SCF/Utils/Utils.cpp:1398:43
#21 0x55e291480c6f in mlir::transform::LoopFuseSiblingOp::apply(mlir::transform::TransformRewriter&, mlir::transform::TransformResults&, mlir::transform::TransformState&) mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp:482:17
#22 0x55e29149ed5e in mlir::transform::detail::TransformOpInterfaceInterfaceTraits::Model<mlir::transform::LoopFuseSiblingOp>::apply(mlir::transform::detail::TransformOpInterfaceInterfaceTraits::Concept const*, mlir::Operation*, mlir::transform::TransformRewriter&, mlir::transform::TransformResults&, mlir::transform::TransformState&) blaze-out/k8-opt-asan/bin/mlir/include/mlir/Dialect/Transform/Interfaces/TransformInterfaces.h.inc:477:56
#23 0x55e297494a60 in apply blaze-out/k8-opt-asan/bin/mlir/include/mlir/Dialect/Transform/Interfaces/TransformInterfaces.cpp.inc:61:14
#24 0x55e297494a60 in mlir::transform::TransformState::applyTransform(mlir::transform::TransformOpInterface) mlir/lib/Dialect/Transform/Interfaces/TransformInterfaces.cpp:953:48
#25 0x55e294646a8d in applySequenceBlock(mlir::Block&, mlir::transform::FailurePropagationMode, mlir::transform::TransformState&, mlir::transform::TransformResults&) mlir/lib/Dialect/Transform/IR/TransformOps.cpp:1788:15
#26 0x55e29464f927 in mlir::transform::NamedSequenceOp::apply(mlir::transform::TransformRewriter&, mlir::transform::TransformResults&, mlir::transform::TransformState&) mlir/lib/Dialect/Transform/IR/TransformOps.cpp:2155:10
#27 0x55e2945d28ee in mlir::transform::detail::TransformOpInterfaceInterfaceTraits::Model<mlir::transform::NamedSequenceOp>::apply(mlir::transform::detail::TransformOpInterfaceInterfaceTraits::Concept const*, mlir::Operation*, mlir::transform::TransformRewriter&, mlir::transform::TransformResults&, mlir::transform::TransformState&) blaze-out/k8-opt-asan/bin/mlir/include/mlir/Dialect/Transform/Interfaces/TransformInterfaces.h.inc:477:56
#28 0x55e297494a60 in apply blaze-out/k8-opt-asan/bin/mlir/include/mlir/Dialect/Transform/Interfaces/TransformInterfaces.cpp.inc:61:14
#29 0x55e297494a60 in mlir::transform::TransformState::applyTransform(mlir::transform::TransformOpInterface) mlir/lib/Dialect/Transform/Interfaces/TransformInterfaces.cpp:953:48
#30 0x55e2974a5fe2 in mlir::transform::applyTransforms(mlir::Operation*, mlir::transform::TransformOpInterface, mlir::RaggedArray<llvm::PointerUnion<mlir::Operation*, mlir::Attribute, mlir::Value>> const&, mlir::transform::TransformOptions const&, bool) mlir/lib/Dialect/Transform/Interfaces/TransformInterfaces.cpp:2016:16
#31 0x55e2945888d7 in mlir::transform::applyTransformNamedSequence(mlir::RaggedArray<llvm::PointerUnion<mlir::Operation*, mlir::Attribute, mlir::Value>>, mlir::transform::TransformOpInterface, mlir::ModuleOp, mlir::transform::TransformOptions const&) mlir/lib/Dialect/Transform/Transforms/TransformInterpreterUtils.cpp:234:10
#32 0x55e294582446 in (anonymous namespace)::InterpreterPass::runOnOperation() mlir/lib/Dialect/Transform/Transforms/InterpreterPass.cpp:147:16
#33 0x55e2978e93c6 in operator() mlir/lib/Pass/Pass.cpp:527:17
#34 0x55e2978e93c6 in void llvm::function_ref<void ()>::callback_fn<mlir::detail::OpToOpPassAdaptor::run(mlir::Pass*, mlir::Operation*, mlir::AnalysisManager, bool, unsigned int)::$_1>(long) llvm/include/llvm/ADT/STLFunctionalExtras.h:45:12
#35 0x55e2978e207a in operator() llvm/include/llvm/ADT/STLFunctionalExtras.h:68:12
#36 0x55e2978e207a in executeAction<mlir::PassExecutionAction, mlir::Pass &> mlir/include/mlir/IR/MLIRContext.h:275:7
#37 0x55e2978e207a in mlir::detail::OpToOpPassAdaptor::run(mlir::Pass*, mlir::Operation*, mlir::AnalysisManager, bool, unsigned int) mlir/lib/Pass/Pass.cpp:521:21
#38 0x55e2978e5fbf in runPipeline mlir/lib/Pass/Pass.cpp:593:16
#39 0x55e2978e5fbf in mlir::PassManager::runPasses(mlir::Operation*, mlir::AnalysisManager) mlir/lib/Pass/Pass.cpp:904:10
#40 0x55e2978e5b65 in mlir::PassManager::run(mlir::Operation*) mlir/lib/Pass/Pass.cpp:884:60
#41 0x55e291ebb460 in performActions(llvm::raw_ostream&, std::__u::shared_ptr<llvm::SourceMgr> const&, mlir::MLIRContext*, mlir::MlirOptMainConfig const&) mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:408:17
#42 0x55e291ebabd9 in processBuffer mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:481:9
#43 0x55e291ebabd9 in operator() mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:548:12
#44 0x55e291ebabd9 in llvm::LogicalResult llvm::function_ref<llvm::LogicalResult (std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&)>::callback_fn<mlir::MlirOptMain(llvm::raw_ostream&, std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, mlir::DialectRegistry&, mlir::MlirOptMainConfig const&)::$_0>(long, std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&) llvm/include/llvm/ADT/STLFunctionalExtras.h:45:12
#45 0x55e297b1cffe in operator() llvm/include/llvm/ADT/STLFunctionalExtras.h:68:12
#46 0x55e297b1cffe in mlir::splitAndProcessBuffer(std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::function_ref<llvm::LogicalResult (std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&)>, llvm::raw_ostream&, llvm::StringRef, llvm::StringRef)::$_0::operator()(llvm::StringRef) const mlir/lib/Support/ToolUtilities.cpp:86:16
#47 0x55e297b1c9c5 in interleave<const llvm::StringRef *, (lambda at mlir/lib/Support/ToolUtilities.cpp:79:23), (lambda at llvm/include/llvm/ADT/STLExtras.h:2147:49), void> llvm/include/llvm/ADT/STLExtras.h:2125:3
#48 0x55e297b1c9c5 in interleave<llvm::SmallVector<llvm::StringRef, 8U>, (lambda at mlir/lib/Support/ToolUtilities.cpp:79:23), llvm::raw_ostream, llvm::StringRef> llvm/include/llvm/ADT/STLExtras.h:2147:3
#49 0x55e297b1c9c5 in mlir::splitAndProcessBuffer(std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::function_ref<llvm::LogicalResult (std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&)>, llvm::raw_ostream&, llvm::StringRef, llvm::StringRef) mlir/lib/Support/ToolUtilities.cpp:89:3
#50 0x55e291eb0cf0 in mlir::MlirOptMain(llvm::raw_ostream&, std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, mlir::DialectRegistry&, mlir::MlirOptMainConfig const&) mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:551:10
#51 0x55e291eb115c in mlir::MlirOptMain(int, char**, llvm::StringRef, llvm::StringRef, mlir::DialectRegistry&) mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:589:14
previously allocated by thread T0 here:
#0 0x55e29130ab5d in operator new(unsigned long) compiler-rt/lib/asan/asan_new_delete.cpp:86:3
#1 0x55e2979ed5d4 in __libcpp_operator_new<unsigned long>
#2 0x55e2979ed5d4 in __libcpp_allocate
#3 0x55e2979ed5d4 in allocate
#4 0x55e2979ed5d4 in __allocate_at_least<std::__u::allocator<mlir::BlockArgument> >
#5 0x55e2979ed5d4 in __split_buffer
#6 0x55e2979ed5d4 in mlir::BlockArgument* std::__u::vector<mlir::BlockArgument, std::__u::allocator<mlir::BlockArgument>>::__push_back_slow_path<mlir::BlockArgument const&>(mlir::BlockArgument const&)
#7 0x55e2979ec0f2 in push_back
#8 0x55e2979ec0f2 in mlir::Block::addArgument(mlir::Type, mlir::Location) mlir/lib/IR/Block.cpp:154:13
#9 0x55e29796e457 in parseRegionBody mlir/lib/AsmParser/Parser.cpp:2172:34
#10 0x55e29796e457 in (anonymous namespace)::OperationParser::parseRegion(mlir::Region&, llvm::ArrayRef<mlir::OpAsmParser::Argument>, bool) mlir/lib/AsmParser/Parser.cpp:2121:7
#11 0x55e29796b25e in (anonymous namespace)::CustomOpAsmParser::parseRegion(mlir::Region&, llvm::ArrayRef<mlir::OpAsmParser::Argument>, bool) mlir/lib/AsmParser/Parser.cpp:1785:16
#12 0x55e297035742 in mlir::scf::ForOp::parse(mlir::OpAsmParser&, mlir::OperationState&) mlir/lib/Dialect/SCF/IR/SCF.cpp:521:14
#13 0x55e291322c18 in llvm::ParseResult llvm::detail::UniqueFunctionBase<llvm::ParseResult, mlir::OpAsmParser&, mlir::OperationState&>::CallImpl<llvm::ParseResult (*)(mlir::OpAsmParser&, mlir::OperationState&)>(void*, mlir::OpAsmParser&, mlir::OperationState&) llvm/include/llvm/ADT/FunctionExtras.h:220:12
#14 0x55e29795bea3 in operator() llvm/include/llvm/ADT/FunctionExtras.h:384:12
#15 0x55e29795bea3 in callback_fn<llvm::unique_function<llvm::ParseResult (mlir::OpAsmParser &, mlir::OperationState &)> > llvm/include/llvm/ADT/STLFunctionalExtras.h:45:12
#16 0x55e29795bea3 in operator() llvm/include/llvm/ADT/STLFunctionalExtras.h:68:12
#17 0x55e29795bea3 in parseOperation mlir/lib/AsmParser/Parser.cpp:1521:9
#18 0x55e29795bea3 in parseCustomOperation mlir/lib/AsmParser/Parser.cpp:2017:19
#19 0x55e29795bea3 in (anonymous namespace)::OperationParser::parseOperation() mlir/lib/AsmParser/Parser.cpp:1174:10
#20 0x55e297971d20 in parseBlockBody mlir/lib/AsmParser/Parser.cpp:2296:9
#21 0x55e297971d20 in (anonymous namespace)::OperationParser::parseBlock(mlir::Block*&) mlir/lib/AsmParser/Parser.cpp:2226:12
#22 0x55e29796e4f5 in parseRegionBody mlir/lib/AsmParser/Parser.cpp:2184:7
#23 0x55e29796e4f5 in (anonymous namespace)::OperationParser::parseRegion(mlir::Region&, llvm::ArrayRef<mlir::OpAsmParser::Argument>, bool) mlir/lib/AsmParser/Parser.cpp:2121:7
#24 0x55e29796b25e in (anonymous namespace)::CustomOpAsmParser::parseRegion(mlir::Region&, llvm::ArrayRef<mlir::OpAsmParser::Argument>, bool) mlir/lib/AsmParser/Parser.cpp:1785:16
#25 0x55e29796b2cf in (anonymous namespace)::CustomOpAsmParser::parseOptionalRegion(mlir::Region&, llvm::ArrayRef<mlir::OpAsmParser::Argument>, bool) mlir/lib/AsmParser/Parser.cpp:1796:12
#26 0x55e2978d89ff in mlir::function_interface_impl::parseFunctionOp(mlir::OpAsmParser&, mlir::OperationState&, bool, mlir::StringAttr, llvm::function_ref<mlir::Type (mlir::Builder&, llvm::ArrayRef<mlir::Type>, llvm::ArrayRef<mlir::Type>, mlir::function_interface_impl::VariadicFlag, std::__u::basic_string<char, std::__u::char_traits<char>, std::__u::allocator<char>>&)>, mlir::StringAttr, mlir::StringAttr) mlir/lib/Interfaces/FunctionImplementation.cpp:232:14
#27 0x55e2969ba41d in mlir::func::FuncOp::parse(mlir::OpAsmParser&, mlir::OperationState&) mlir/lib/Dialect/Func/IR/FuncOps.cpp:203:10
#28 0x55e291322c18 in llvm::ParseResult llvm::detail::UniqueFunctionBase<llvm::ParseResult, mlir::OpAsmParser&, mlir::OperationState&>::CallImpl<llvm::ParseResult (*)(mlir::OpAsmParser&, mlir::OperationState&)>(void*, mlir::OpAsmParser&, mlir::OperationState&) llvm/include/llvm/ADT/FunctionExtras.h:220:12
#29 0x55e29795bea3 in operator() llvm/include/llvm/ADT/FunctionExtras.h:384:12
#30 0x55e29795bea3 in callback_fn<llvm::unique_function<llvm::ParseResult (mlir::OpAsmParser &, mlir::OperationState &)> > llvm/include/llvm/ADT/STLFunctionalExtras.h:45:12
#31 0x55e29795bea3 in operator() llvm/include/llvm/ADT/STLFunctionalExtras.h:68:12
#32 0x55e29795bea3 in parseOperation mlir/lib/AsmParser/Parser.cpp:1521:9
#33 0x55e29795bea3 in parseCustomOperation mlir/lib/AsmParser/Parser.cpp:2017:19
#34 0x55e29795bea3 in (anonymous namespace)::OperationParser::parseOperation() mlir/lib/AsmParser/Parser.cpp:1174:10
#35 0x55e297959b78 in parse mlir/lib/AsmParser/Parser.cpp:2725:20
#36 0x55e297959b78 in mlir::parseAsmSourceFile(llvm::SourceMgr const&, mlir::Block*, mlir::ParserConfig const&, mlir::AsmParserState*, mlir::AsmParserCodeCompleteContext*) mlir/lib/AsmParser/Parser.cpp:2785:41
#37 0x55e29790d5c2 in mlir::parseSourceFile(std::__u::shared_ptr<llvm::SourceMgr> const&, mlir::Block*, mlir::ParserConfig const&, mlir::LocationAttr*) mlir/lib/Parser/Parser.cpp:46:10
#38 0x55e291ebbfe2 in parseSourceFile<mlir::ModuleOp, const std::__u::shared_ptr<llvm::SourceMgr> &> mlir/include/mlir/Parser/Parser.h:159:14
#39 0x55e291ebbfe2 in parseSourceFile<mlir::ModuleOp> mlir/include/mlir/Parser/Parser.h:189:10
#40 0x55e291ebbfe2 in mlir::parseSourceFileForTool(std::__u::shared_ptr<llvm::SourceMgr> const&, mlir::ParserConfig const&, bool) mlir/include/mlir/Tools/ParseUtilities.h:31:12
#41 0x55e291ebb263 in performActions(llvm::raw_ostream&, std::__u::shared_ptr<llvm::SourceMgr> const&, mlir::MLIRContext*, mlir::MlirOptMainConfig const&) mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:383:33
#42 0x55e291ebabd9 in processBuffer mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:481:9
#43 0x55e291ebabd9 in operator() mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:548:12
#44 0x55e291ebabd9 in llvm::LogicalResult llvm::function_ref<llvm::LogicalResult (std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&)>::callback_fn<mlir::MlirOptMain(llvm::raw_ostream&, std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, mlir::DialectRegistry&, mlir::MlirOptMainConfig const&)::$_0>(long, std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&) llvm/include/llvm/ADT/STLFunctionalExtras.h:45:12
#45 0x55e297b1cffe in operator() llvm/include/llvm/ADT/STLFunctionalExtras.h:68:12
#46 0x55e297b1cffe in mlir::splitAndProcessBuffer(std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::function_ref<llvm::LogicalResult (std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&)>, llvm::raw_ostream&, llvm::StringRef, llvm::StringRef)::$_0::operator()(llvm::StringRef) const mlir/lib/Support/ToolUtilities.cpp:86:16
#47 0x55e297b1c9c5 in interleave<const llvm::StringRef *, (lambda at mlir/lib/Support/ToolUtilities.cpp:79:23), (lambda at llvm/include/llvm/ADT/STLExtras.h:2147:49), void> llvm/include/llvm/ADT/STLExtras.h:2125:3
#48 0x55e297b1c9c5 in interleave<llvm::SmallVector<llvm::StringRef, 8U>, (lambda at mlir/lib/Support/ToolUtilities.cpp:79:23), llvm::raw_ostream, llvm::StringRef> llvm/include/llvm/ADT/STLExtras.h:2147:3
#49 0x55e297b1c9c5 in mlir::splitAndProcessBuffer(std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::function_ref<llvm::LogicalResult (std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&)>, llvm::raw_ostream&, llvm::StringRef, llvm::StringRef) mlir/lib/Support/ToolUtilities.cpp:89:3
#50 0x55e291eb0cf0 in mlir::MlirOptMain(llvm::raw_ostream&, std::__u::unique_ptr<llvm::MemoryBuffer, std::__u::default_delete<llvm::MemoryBuffer>>, mlir::DialectRegistry&, mlir::MlirOptMainConfig const&) mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:551:10
#51 0x55e291eb115c in mlir::MlirOptMain(int, char**, llvm::StringRef, llvm::StringRef, mlir::DialectRegistry&) mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:589:14
#52 0x55e291eb15f8 in mlir::MlirOptMain(int, char**, llvm::StringRef, mlir::DialectRegistry&) mlir/lib/Tools/mlir-opt/MlirOptMain.cpp:605:10
#53 0x55e29130d1be in main mlir/tools/mlir-opt/mlir-opt.cpp:311:33
#54 0x7fbcf3fff3d3 in __libc_start_main (/usr/grte/v5/lib64/libc.so.6+0x613d3) (BuildId: 9a996398ce14a94560b0c642eb4f6e94)
#55 0x55e2912365a9 in _start /usr/grte/v5/debug-src/src/csu/../sysdeps/x86_64/start.S:120
SUMMARY: AddressSanitizer: heap-use-after-free mlir/include/mlir/IR/IRMapping.h:40:11 in map<llvm::MutableArrayRef<mlir::BlockArgument> &, llvm::MutableArrayRef<mlir::BlockArgument>, nullptr>
Shadow bytes around the buggy address:
0x502000006a00: fa fa 00 fa fa fa 00 00 fa fa 00 fa fa fa 00 fa
0x502000006a80: fa fa 00 fa fa fa 00 00 fa fa 00 00 fa fa 00 00
0x502000006b00: fa fa 00 00 fa fa 00 00 fa fa 00 fa fa fa 00 fa
0x502000006b80: fa fa 00 fa fa fa 00 fa fa fa 00 00 fa fa 00 00
0x502000006c00: fa fa 00 00 fa fa 00 00 fa fa 00 00 fa fa fd fa
=>0x502000006c80: fa fa fd fa fa fa fd fd fa fa fd[fd]fa fa fd fd
0x502000006d00: fa fa 00 fa fa fa 00 fa fa fa 00 fa fa fa 00 fa
0x502000006d80: fa fa 00 fa fa fa 00 fa fa fa 00 fa fa fa 00 fa
0x502000006e00: fa fa 00 fa fa fa 00 fa fa fa 00 00 fa fa 00 fa
0x502000006e80: fa fa 00 fa fa fa 00 00 fa fa 00 fa fa fa 00 fa
0x502000006f00: fa fa 00 fa fa fa 00 fa fa fa 00 fa fa fa 00 fa
Shadow byte legend (one shadow byte represents 8 application bytes):
Addressable: 00
Partially addressable: 01 02 03 04 05 06 07
Heap left redzone: fa
Freed heap region: fd
Stack left redzone: f1
Stack mid redzone: f2
Stack right redzone: f3
Stack after return: f5
Stack use after scope: f8
Global redzone: f9
Global init order: f6
Poisoned by user: f7
Container overflow: fc
Array cookie: ac
Intra object redzone: bb
ASan internal: fe
Left alloca redzone: ca
Right alloca redzone: cb
==4320==ABORTING
---
mlir/include/mlir/Dialect/SCF/IR/SCFOps.td | 3 +-
mlir/include/mlir/Dialect/SCF/Utils/Utils.h | 20 --
.../mlir/Interfaces/LoopLikeInterface.h | 20 --
mlir/lib/Dialect/SCF/IR/SCF.cpp | 38 ---
.../SCF/TransformOps/SCFTransformOps.cpp | 140 +++++++--
.../SCF/Transforms/ParallelLoopFusion.cpp | 80 ++++-
mlir/lib/Dialect/SCF/Utils/Utils.cpp | 279 ++++++-----------
mlir/lib/Interfaces/LoopLikeInterface.cpp | 59 ----
.../SCF/transform-loop-fuse-sibling.mlir | 290 +-----------------
9 files changed, 283 insertions(+), 646 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
index bf95fbe6721cf..f35ea962bea16 100644
--- a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
+++ b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
@@ -303,8 +303,7 @@ def ForallOp : SCF_Op<"forall", [
DeclareOpInterfaceMethods<LoopLikeOpInterface,
["getInitsMutable", "getRegionIterArgs", "getLoopInductionVars",
"getLoopLowerBounds", "getLoopUpperBounds", "getLoopSteps",
- "replaceWithAdditionalYields", "promoteIfSingleIteration",
- "yieldTiledValuesAndReplace"]>,
+ "promoteIfSingleIteration", "yieldTiledValuesAndReplace"]>,
RecursiveMemoryEffects,
SingleBlockImplicitTerminator<"scf::InParallelOp">,
DeclareOpInterfaceMethods<RegionBranchOpInterface>,
diff --git a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
index 6a40304e2eeba..de807c3e4e1f8 100644
--- a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
@@ -181,16 +181,6 @@ Loops tilePerfectlyNested(scf::ForOp rootForOp, ArrayRef<Value> sizes);
void getPerfectlyNestedLoops(SmallVectorImpl<scf::ForOp> &nestedLoops,
scf::ForOp root);
-//===----------------------------------------------------------------------===//
-// Fusion related helpers
-//===----------------------------------------------------------------------===//
-
-/// Check structural compatibility between two loops such as iteration space
-/// and dominance.
-bool checkFusionStructuralLegality(LoopLikeOpInterface target,
- LoopLikeOpInterface source,
- Diagnostic &diag);
-
/// Given two scf.forall loops, `target` and `source`, fuses `target` into
/// `source`. Assumes that the given loops are siblings and are independent of
/// each other.
@@ -212,16 +202,6 @@ scf::ForallOp fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForOp fuseIndependentSiblingForLoops(scf::ForOp target, scf::ForOp source,
RewriterBase &rewriter);
-/// Given two scf.parallel loops, `target` and `source`, fuses `target` into
-/// `source`. Assumes that the given loops are siblings and are independent of
-/// each other.
-///
-/// This function does not perform any legality checks and simply fuses the
-/// loops. The caller is responsible for ensuring that the loops are legal to
-/// fuse.
-scf::ParallelOp fuseIndependentSiblingParallelLoops(scf::ParallelOp target,
- scf::ParallelOp source,
- RewriterBase &rewriter);
} // namespace mlir
#endif // MLIR_DIALECT_SCF_UTILS_UTILS_H_
diff --git a/mlir/include/mlir/Interfaces/LoopLikeInterface.h b/mlir/include/mlir/Interfaces/LoopLikeInterface.h
index d08e097a9b4af..9925fc6ce6ca9 100644
--- a/mlir/include/mlir/Interfaces/LoopLikeInterface.h
+++ b/mlir/include/mlir/Interfaces/LoopLikeInterface.h
@@ -90,24 +90,4 @@ struct JamBlockGatherer {
/// Include the generated interface declarations.
#include "mlir/Interfaces/LoopLikeInterface.h.inc"
-namespace mlir {
-/// A function that rewrites `target`'s terminator as a teminator obtained by
-/// fusing `source` into `target`.
-using FuseTerminatorFn =
- function_ref<void(RewriterBase &rewriter, LoopLikeOpInterface source,
- LoopLikeOpInterface &target, IRMapping mapping)>;
-
-/// Returns a fused `LoopLikeOpInterface` created by fusing `source` to
-/// `target`. The `NewYieldValuesFn` callback is used to pass to the
-/// `replaceWithAdditionalYields` interface method to replace the loop with a
-/// new loop with (possibly) additional yields, while the `FuseTerminatorFn`
-/// callback is repsonsible for updating the fused loop terminator.
-LoopLikeOpInterface createFused(LoopLikeOpInterface target,
- LoopLikeOpInterface source,
- RewriterBase &rewriter,
- NewYieldValuesFn newYieldValuesFn,
- FuseTerminatorFn fuseTerminatorFn);
-
-} // namespace mlir
-
#endif // MLIR_INTERFACES_LOOPLIKEINTERFACE_H_
diff --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp
index cb15e0ecebf05..907d7f794593d 100644
--- a/mlir/lib/Dialect/SCF/IR/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp
@@ -618,44 +618,6 @@ void ForOp::getSuccessorRegions(RegionBranchPoint point,
SmallVector<Region *> ForallOp::getLoopRegions() { return {&getRegion()}; }
-FailureOr<LoopLikeOpInterface> ForallOp::replaceWithAdditionalYields(
- RewriterBase &rewriter, ValueRange newInitOperands,
- bool replaceInitOperandUsesInLoop,
- const NewYieldValuesFn &newYieldValuesFn) {
- // Create a new loop before the existing one, with the extra operands.
- OpBuilder::InsertionGuard g(rewriter);
- rewriter.setInsertionPoint(getOperation());
- SmallVector<Value> inits(getOutputs());
- llvm::append_range(inits, newInitOperands);
- scf::ForallOp newLoop = rewriter.create<scf::ForallOp>(
- getLoc(), getMixedLowerBound(), getMixedUpperBound(), getMixedStep(),
- inits, getMapping(),
- /*bodyBuilderFn =*/[](OpBuilder &, Location, ValueRange) {});
-
- // Move the loop body to the new op.
- rewriter.mergeBlocks(getBody(), newLoop.getBody(),
- newLoop.getBody()->getArguments().take_front(
- getBody()->getNumArguments()));
-
- if (replaceInitOperandUsesInLoop) {
- // Replace all uses of `newInitOperands` with the corresponding basic block
- // arguments.
- for (auto &&[newOperand, oldOperand] :
- llvm::zip(newInitOperands, newLoop.getBody()->getArguments().take_back(
- newInitOperands.size()))) {
- rewriter.replaceUsesWithIf(newOperand, oldOperand, [&](OpOperand &use) {
- Operation *user = use.getOwner();
- return newLoop->isProperAncestor(user);
- });
- }
- }
-
- // Replace the old loop.
- rewriter.replaceOp(getOperation(),
- newLoop->getResults().take_front(getNumResults()));
- return cast<LoopLikeOpInterface>(newLoop.getOperation());
-}
-
/// Promotes the loop body of a forallOp to its containing block if it can be
/// determined that the loop has a single iteration.
LogicalResult scf::ForallOp::promoteIfSingleIteration(RewriterBase &rewriter) {
diff --git a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
index 41834fea3bb84..56ff2709a589e 100644
--- a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
+++ b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
@@ -261,10 +261,8 @@ loopScheduling(scf::ForOp forOp,
return 1;
};
- std::optional<int64_t> ubConstant =
- getConstantIntValue(forOp.getUpperBound());
- std::optional<int64_t> lbConstant =
- getConstantIntValue(forOp.getLowerBound());
+ std::optional<int64_t> ubConstant = getConstantIntValue(forOp.getUpperBound());
+ std::optional<int64_t> lbConstant = getConstantIntValue(forOp.getLowerBound());
DenseMap<Operation *, unsigned> opCycles;
std::map<unsigned, std::vector<Operation *>> wrappedSchedule;
for (Operation &op : forOp.getBody()->getOperations()) {
@@ -449,6 +447,113 @@ void transform::TakeAssumedBranchOp::getEffects(
// LoopFuseSiblingOp
//===----------------------------------------------------------------------===//
+/// Check if `target` and `source` are siblings, in the context that `target`
+/// is being fused into `source`.
+///
+/// This is a simple check that just checks if both operations are in the same
+/// block and some checks to ensure that the fused IR does not violate
+/// dominance.
+static DiagnosedSilenceableFailure isOpSibling(Operation *target,
+ Operation *source) {
+ // Check if both operations are same.
+ if (target == source)
+ return emitSilenceableFailure(source)
+ << "target and source need to be different loops";
+
+ // Check if both operations are in the same block.
+ if (target->getBlock() != source->getBlock())
+ return emitSilenceableFailure(source)
+ << "target and source are not in the same block";
+
+ // Check if fusion will violate dominance.
+ DominanceInfo domInfo(source);
+ if (target->isBeforeInBlock(source)) {
+ // Since `target` is before `source`, all users of results of `target`
+ // need to be dominated by `source`.
+ for (Operation *user : target->getUsers()) {
+ if (!domInfo.properlyDominates(source, user, /*enclosingOpOk=*/false)) {
+ return emitSilenceableFailure(target)
+ << "user of results of target should be properly dominated by "
+ "source";
+ }
+ }
+ } else {
+ // Since `target` is after `source`, all values used by `target` need
+ // to dominate `source`.
+
+ // Check if operands of `target` are dominated by `source`.
+ for (Value operand : target->getOperands()) {
+ Operation *operandOp = operand.getDefiningOp();
+ // Operands without defining operations are block arguments. When `target`
+ // and `source` occur in the same block, these operands dominate `source`.
+ if (!operandOp)
+ continue;
+
+ // Operand's defining operation should properly dominate `source`.
+ if (!domInfo.properlyDominates(operandOp, source,
+ /*enclosingOpOk=*/false))
+ return emitSilenceableFailure(target)
+ << "operands of target should be properly dominated by source";
+ }
+
+ // Check if values used by `target` are dominated by `source`.
+ bool failed = false;
+ OpOperand *failedValue = nullptr;
+ visitUsedValuesDefinedAbove(target->getRegions(), [&](OpOperand *operand) {
+ Operation *operandOp = operand->get().getDefiningOp();
+ if (operandOp && !domInfo.properlyDominates(operandOp, source,
+ /*enclosingOpOk=*/false)) {
+ // `operand` is not an argument of an enclosing block and the defining
+ // op of `operand` is outside `target` but does not dominate `source`.
+ failed = true;
+ failedValue = operand;
+ }
+ });
+
+ if (failed)
+ return emitSilenceableFailure(failedValue->getOwner())
+ << "values used inside regions of target should be properly "
+ "dominated by source";
+ }
+
+ return DiagnosedSilenceableFailure::success();
+}
+
+/// Check if `target` scf.forall can be fused into `source` scf.forall.
+///
+/// This simply checks if both loops have the same bounds, steps and mapping.
+/// No attempt is made at checking that the side effects of `target` and
+/// `source` are independent of each other.
+static bool isForallWithIdenticalConfiguration(Operation *target,
+ Operation *source) {
+ auto targetOp = dyn_cast<scf::ForallOp>(target);
+ auto sourceOp = dyn_cast<scf::ForallOp>(source);
+ if (!targetOp || !sourceOp)
+ return false;
+
+ return targetOp.getMixedLowerBound() == sourceOp.getMixedLowerBound() &&
+ targetOp.getMixedUpperBound() == sourceOp.getMixedUpperBound() &&
+ targetOp.getMixedStep() == sourceOp.getMixedStep() &&
+ targetOp.getMapping() == sourceOp.getMapping();
+}
+
+/// Check if `target` scf.for can be fused into `source` scf.for.
+///
+/// This simply checks if both loops have the same bounds and steps. No attempt
+/// is made at checking that the side effects of `target` and `source` are
+/// independent of each other.
+static bool isForWithIdenticalConfiguration(Operation *target,
+ Operation *source) {
+ auto targetOp = dyn_cast<scf::ForOp>(target);
+ auto sourceOp = dyn_cast<scf::ForOp>(source);
+ if (!targetOp || !sourceOp)
+ return false;
+
+ return targetOp.getLowerBound() == sourceOp.getLowerBound() &&
+ targetOp.getUpperBound() == sourceOp.getUpperBound() &&
+ targetOp.getStep() == sourceOp.getStep();
+}
+
DiagnosedSilenceableFailure
transform::LoopFuseSiblingOp::apply(transform::TransformRewriter &rewriter,
transform::TransformResults &results,
@@ -464,32 +569,25 @@ transform::LoopFuseSiblingOp::apply(transform::TransformRewriter &rewriter,
<< "source handle (got " << llvm::range_size(sourceOps) << ")";
}
- auto target = dyn_cast<LoopLikeOpInterface>(*targetOps.begin());
- auto source = dyn_cast<LoopLikeOpInterface>(*sourceOps.begin());
- if (!target || !source)
- return emitSilenceableFailure(target->getLoc())
- << "target or source is not a loop op";
+ Operation *target = *targetOps.begin();
+ Operation *source = *sourceOps.begin();
- // Check if loops can be fused
- Diagnostic diag(target.getLoc(), DiagnosticSeverity::Error);
- if (!mlir::checkFusionStructuralLegality(target, source, diag))
- return DiagnosedSilenceableFailure::silenceableFailure(std::move(diag));
+ // Check if the target and source are siblings.
+ DiagnosedSilenceableFailure diag = isOpSibling(target, source);
+ if (!diag.succeeded())
+ return diag;
Operation *fusedLoop;
- // TODO: Support fusion for loop-like ops besides scf.for, scf.forall
- // and scf.parallel.
- if (isa<scf::ForOp>(target) && isa<scf::ForOp>(source)) {
+ /// TODO: Support fusion for loop-like ops besides scf.for and scf.forall.
+ if (isForWithIdenticalConfiguration(target, source)) {
fusedLoop = fuseIndependentSiblingForLoops(
cast<scf::ForOp>(target), cast<scf::ForOp>(source), rewriter);
- } else if (isa<scf::ForallOp>(target) && isa<scf::ForallOp>(source)) {
+ } else if (isForallWithIdenticalConfiguration(target, source)) {
fusedLoop = fuseIndependentSiblingForallLoops(
cast<scf::ForallOp>(target), cast<scf::ForallOp>(source), rewriter);
- } else if (isa<scf::ParallelOp>(target) && isa<scf::ParallelOp>(source)) {
- fusedLoop = fuseIndependentSiblingParallelLoops(
- cast<scf::ParallelOp>(target), cast<scf::ParallelOp>(source), rewriter);
} else
return emitSilenceableFailure(target->getLoc())
- << "unsupported loop type for fusion";
+ << "operations cannot be fused";
assert(fusedLoop && "failed to fuse operations");
diff --git a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
index b775f988576e3..5934d85373b03 100644
--- a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
@@ -16,7 +16,6 @@
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/SCF/Transforms/Transforms.h"
-#include "mlir/Dialect/SCF/Utils/Utils.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/OpDefinition.h"
@@ -38,6 +37,24 @@ static bool hasNestedParallelOp(ParallelOp ploop) {
return walkResult.wasInterrupted();
}
+/// Verify equal iteration spaces.
+static bool equalIterationSpaces(ParallelOp firstPloop,
+ ParallelOp secondPloop) {
+ if (firstPloop.getNumLoops() != secondPloop.getNumLoops())
+ return false;
+
+ auto matchOperands = [&](const OperandRange &lhs,
+ const OperandRange &rhs) -> bool {
+ // TODO: Extend this to support aliases and equal constants.
+ return std::equal(lhs.begin(), lhs.end(), rhs.begin());
+ };
+ return matchOperands(firstPloop.getLowerBound(),
+ secondPloop.getLowerBound()) &&
+ matchOperands(firstPloop.getUpperBound(),
+ secondPloop.getUpperBound()) &&
+ matchOperands(firstPloop.getStep(), secondPloop.getStep());
+}
+
/// Checks if the parallel loops have mixed access to the same buffers. Returns
/// `true` if the first parallel loop writes to the same indices that the second
/// loop reads.
@@ -136,10 +153,9 @@ verifyDependencies(ParallelOp firstPloop, ParallelOp secondPloop,
static bool isFusionLegal(ParallelOp firstPloop, ParallelOp secondPloop,
const IRMapping &firstToSecondPloopIndices,
llvm::function_ref<bool(Value, Value)> mayAlias) {
- Diagnostic diag(firstPloop.getLoc(), DiagnosticSeverity::Remark);
return !hasNestedParallelOp(firstPloop) &&
!hasNestedParallelOp(secondPloop) &&
- checkFusionStructuralLegality(firstPloop, secondPloop, diag) &&
+ equalIterationSpaces(firstPloop, secondPloop) &&
succeeded(verifyDependencies(firstPloop, secondPloop,
firstToSecondPloopIndices, mayAlias));
}
@@ -158,9 +174,61 @@ static void fuseIfLegal(ParallelOp firstPloop, ParallelOp &secondPloop,
mayAlias))
return;
- IRRewriter rewriter(builder);
- secondPloop = mlir::fuseIndependentSiblingParallelLoops(
- firstPloop, secondPloop, rewriter);
+ DominanceInfo dom;
+ // We are fusing first loop into second, make sure there are no users of the
+ // first loop results between loops.
+ for (Operation *user : firstPloop->getUsers())
+ if (!dom.properlyDominates(secondPloop, user, /*enclosingOpOk*/ false))
+ return;
+
+ ValueRange inits1 = firstPloop.getInitVals();
+ ValueRange inits2 = secondPloop.getInitVals();
+
+ SmallVector<Value> newInitVars(inits1.begin(), inits1.end());
+ newInitVars.append(inits2.begin(), inits2.end());
+
+ IRRewriter b(builder);
+ b.setInsertionPoint(secondPloop);
+ auto newSecondPloop = b.create<ParallelOp>(
+ secondPloop.getLoc(), secondPloop.getLowerBound(),
+ secondPloop.getUpperBound(), secondPloop.getStep(), newInitVars);
+
+ Block *newBlock = newSecondPloop.getBody();
+ auto term1 = cast<ReduceOp>(block1->getTerminator());
+ auto term2 = cast<ReduceOp>(block2->getTerminator());
+
+ b.inlineBlockBefore(block2, newBlock, newBlock->begin(),
+ newBlock->getArguments());
+ b.inlineBlockBefore(block1, newBlock, newBlock->begin(),
+ newBlock->getArguments());
+
+ ValueRange results = newSecondPloop.getResults();
+ if (!results.empty()) {
+ b.setInsertionPointToEnd(newBlock);
+
+ ValueRange reduceArgs1 = term1.getOperands();
+ ValueRange reduceArgs2 = term2.getOperands();
+ SmallVector<Value> newReduceArgs(reduceArgs1.begin(), reduceArgs1.end());
+ newReduceArgs.append(reduceArgs2.begin(), reduceArgs2.end());
+
+ auto newReduceOp = b.create<scf::ReduceOp>(term2.getLoc(), newReduceArgs);
+
+ for (auto &&[i, reg] : llvm::enumerate(llvm::concat<Region>(
+ term1.getReductions(), term2.getReductions()))) {
+ Block &oldRedBlock = reg.front();
+ Block &newRedBlock = newReduceOp.getReductions()[i].front();
+ b.inlineBlockBefore(&oldRedBlock, &newRedBlock, newRedBlock.begin(),
+ newRedBlock.getArguments());
+ }
+
+ firstPloop.replaceAllUsesWith(results.take_front(inits1.size()));
+ secondPloop.replaceAllUsesWith(results.take_back(inits2.size()));
+ }
+ term1->erase();
+ term2->erase();
+ firstPloop.erase();
+ secondPloop.erase();
+ secondPloop = newSecondPloop;
}
void mlir::scf::naivelyFuseParallelOps(
diff --git a/mlir/lib/Dialect/SCF/Utils/Utils.cpp b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
index abfc9a1b4d444..c0ee9d2afe91c 100644
--- a/mlir/lib/Dialect/SCF/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
@@ -17,7 +17,6 @@
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/IR/BuiltinOps.h"
-#include "mlir/IR/Dominance.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/PatternMatch.h"
@@ -1263,131 +1262,54 @@ TileLoops mlir::extractFixedOuterLoops(scf::ForOp rootForOp,
return tileLoops;
}
-//===----------------------------------------------------------------------===//
-// Fusion related helpers
-//===----------------------------------------------------------------------===//
-
-/// Check if `target` and `source` are siblings, in the context that `target`
-/// is being fused into `source`.
-///
-/// This is a simple check that just checks if both operations are in the same
-/// block and some checks to ensure that the fused IR does not violate
-/// dominance.
-static bool isOpSibling(Operation *target, Operation *source,
- Diagnostic &diag) {
- // Check if both operations are same.
- if (target == source) {
- diag << "target and source need to be different loops";
- return false;
- }
-
- // Check if both operations are in the same block.
- if (target->getBlock() != source->getBlock()) {
- diag << "target and source are not in the same block";
- return false;
- }
-
- // Check if fusion will violate dominance.
- DominanceInfo domInfo(source);
- if (target->isBeforeInBlock(source)) {
- // Since `target` is before `source`, all users of results of `target`
- // need to be dominated by `source`.
- for (Operation *user : target->getUsers()) {
- if (!domInfo.properlyDominates(source, user, /*enclosingOpOk=*/false)) {
- diag << "user of results of target should "
- "be properly dominated by source";
- return false;
- }
- }
- } else {
- // Since `target` is after `source`, all values used by `target` need
- // to dominate `source`.
-
- // Check if operands of `target` are dominated by `source`.
- for (Value operand : target->getOperands()) {
- Operation *operandOp = operand.getDefiningOp();
- // Operands without defining operations are block arguments. When `target`
- // and `source` occur in the same block, these operands dominate `source`.
- if (!operandOp)
- continue;
-
- // Operand's defining operation should properly dominate `source`.
- if (!domInfo.properlyDominates(operandOp, source,
- /*enclosingOpOk=*/false)) {
- diag << "operands of target should be properly dominated by source";
- return false;
- }
- }
-
- // Check if values used by `target` are dominated by `source`.
- bool failed = false;
- OpOperand *failedValue = nullptr;
- visitUsedValuesDefinedAbove(target->getRegions(), [&](OpOperand *operand) {
- Operation *operandOp = operand->get().getDefiningOp();
- if (operandOp && !domInfo.properlyDominates(operandOp, source,
- /*enclosingOpOk=*/false)) {
- // `operand` is not an argument of an enclosing block and the defining
- // op of `operand` is outside `target` but does not dominate `source`.
- failed = true;
- failedValue = operand;
- }
- });
-
- if (failed) {
- diag << "values used inside regions of target should be properly "
- "dominated by source";
- diag.attachNote(failedValue->getOwner()->getLoc()) << "see operation";
- return false;
- }
- }
-
- return true;
-}
-
-bool mlir::checkFusionStructuralLegality(LoopLikeOpInterface target,
- LoopLikeOpInterface source,
- Diagnostic &diag) {
- if (target->getName() != source->getName()) {
- diag << "target and source must be same loop type";
- return false;
- }
-
- bool iterSpaceEq =
- target.getLoopLowerBounds() == source.getLoopLowerBounds() &&
- target.getLoopUpperBounds() == source.getLoopUpperBounds() &&
- target.getLoopSteps() == source.getLoopSteps();
- // TODO: Decouple checks on concrete loop types and move this function
- // somewhere for general utility for `LoopLikeOpInterface`
- if (auto forAllTarget = dyn_cast<scf::ForallOp>(*target))
- iterSpaceEq = iterSpaceEq && forAllTarget.getMapping() ==
- cast<scf::ForallOp>(*source).getMapping();
- if (!iterSpaceEq) {
- diag << "target and source iteration spaces must be equal";
- return false;
- }
- return isOpSibling(target, source, diag);
-}
-
scf::ForallOp mlir::fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForallOp source,
RewriterBase &rewriter) {
- scf::ForallOp fusedLoop = cast<scf::ForallOp>(createFused(
- target, source, rewriter,
- [&](OpBuilder &b, Location loc, ArrayRef<BlockArgument> newBBArgs) {
- // `ForallOp` does not have yields, rather an `InParallelOp` terminator.
- return ValueRange{};
- },
- [&](RewriterBase &b, LoopLikeOpInterface source,
- LoopLikeOpInterface &target, IRMapping mapping) {
- auto sourceForall = cast<scf::ForallOp>(source);
- auto targetForall = cast<scf::ForallOp>(target);
- scf::InParallelOp fusedTerm = targetForall.getTerminator();
- b.setInsertionPointToEnd(fusedTerm.getBody());
- for (Operation &op : sourceForall.getTerminator().getYieldingOps())
- b.clone(op, mapping);
- }));
- rewriter.replaceOp(source,
- fusedLoop.getResults().take_back(source.getNumResults()));
+ unsigned numTargetOuts = target.getNumResults();
+ unsigned numSourceOuts = source.getNumResults();
+
+ // Create fused shared_outs.
+ SmallVector<Value> fusedOuts;
+ llvm::append_range(fusedOuts, target.getOutputs());
+ llvm::append_range(fusedOuts, source.getOutputs());
+
+ // Create a new scf.forall op after the source loop.
+ rewriter.setInsertionPointAfter(source);
+ scf::ForallOp fusedLoop = rewriter.create<scf::ForallOp>(
+ source.getLoc(), source.getMixedLowerBound(), source.getMixedUpperBound(),
+ source.getMixedStep(), fusedOuts, source.getMapping());
+
+ // Map control operands.
+ IRMapping mapping;
+ mapping.map(target.getInductionVars(), fusedLoop.getInductionVars());
+ mapping.map(source.getInductionVars(), fusedLoop.getInductionVars());
+
+ // Map shared outs.
+ mapping.map(target.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
+ mapping.map(source.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
+
+ // Append everything except the terminator into the fused operation.
+ rewriter.setInsertionPointToStart(fusedLoop.getBody());
+ for (Operation &op : target.getBody()->without_terminator())
+ rewriter.clone(op, mapping);
+ for (Operation &op : source.getBody()->without_terminator())
+ rewriter.clone(op, mapping);
+
+ // Fuse the old terminator in_parallel ops into the new one.
+ scf::InParallelOp targetTerm = target.getTerminator();
+ scf::InParallelOp sourceTerm = source.getTerminator();
+ scf::InParallelOp fusedTerm = fusedLoop.getTerminator();
+ rewriter.setInsertionPointToStart(fusedTerm.getBody());
+ for (Operation &op : targetTerm.getYieldingOps())
+ rewriter.clone(op, mapping);
+ for (Operation &op : sourceTerm.getYieldingOps())
+ rewriter.clone(op, mapping);
+
+ // Replace old loops by substituting their uses by results of the fused loop.
+ rewriter.replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
+ rewriter.replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
return fusedLoop;
}
@@ -1395,74 +1317,49 @@ scf::ForallOp mlir::fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForOp mlir::fuseIndependentSiblingForLoops(scf::ForOp target,
scf::ForOp source,
RewriterBase &rewriter) {
- scf::ForOp fusedLoop = cast<scf::ForOp>(createFused(
- target, source, rewriter,
- [&](OpBuilder &b, Location loc, ArrayRef<BlockArgument> newBBArgs) {
- return source.getYieldedValues();
- },
- [&](RewriterBase &b, LoopLikeOpInterface source,
- LoopLikeOpInterface &target, IRMapping mapping) {
- auto targetFor = cast<scf::ForOp>(target);
- auto newTerm = b.clone(*targetFor.getBody()->getTerminator(), mapping);
- b.replaceOp(targetFor.getBody()->getTerminator(), newTerm);
- }));
- rewriter.replaceOp(source,
- fusedLoop.getResults().take_back(source.getNumResults()));
- return fusedLoop;
-}
-
-// TODO: Finish refactoring this a la the above, but likely requires additional
-// interface methods.
-scf::ParallelOp mlir::fuseIndependentSiblingParallelLoops(
- scf::ParallelOp target, scf::ParallelOp source, RewriterBase &rewriter) {
- OpBuilder::InsertionGuard guard(rewriter);
- Block *block1 = target.getBody();
- Block *block2 = source.getBody();
- auto term1 = cast<scf::ReduceOp>(block1->getTerminator());
- auto term2 = cast<scf::ReduceOp>(block2->getTerminator());
-
- ValueRange inits1 = target.getInitVals();
- ValueRange inits2 = source.getInitVals();
-
- SmallVector<Value> newInitVars(inits1.begin(), inits1.end());
- newInitVars.append(inits2.begin(), inits2.end());
-
- rewriter.setInsertionPoint(source);
- auto fusedLoop = rewriter.create<scf::ParallelOp>(
- rewriter.getFusedLoc(target.getLoc(), source.getLoc()),
- source.getLowerBound(), source.getUpperBound(), source.getStep(),
- newInitVars);
- Block *newBlock = fusedLoop.getBody();
- rewriter.inlineBlockBefore(block2, newBlock, newBlock->begin(),
- newBlock->getArguments());
- rewriter.inlineBlockBefore(block1, newBlock, newBlock->begin(),
- newBlock->getArguments());
-
- ValueRange results = fusedLoop.getResults();
- if (!results.empty()) {
- rewriter.setInsertionPointToEnd(newBlock);
-
- ValueRange reduceArgs1 = term1.getOperands();
- ValueRange reduceArgs2 = term2.getOperands();
- SmallVector<Value> newReduceArgs(reduceArgs1.begin(), reduceArgs1.end());
- newReduceArgs.append(reduceArgs2.begin(), reduceArgs2.end());
-
- auto newReduceOp = rewriter.create<scf::ReduceOp>(
- rewriter.getFusedLoc(term1.getLoc(), term2.getLoc()), newReduceArgs);
-
- for (auto &&[i, reg] : llvm::enumerate(llvm::concat<Region>(
- term1.getReductions(), term2.getReductions()))) {
- Block &oldRedBlock = reg.front();
- Block &newRedBlock = newReduceOp.getReductions()[i].front();
- rewriter.inlineBlockBefore(&oldRedBlock, &newRedBlock,
- newRedBlock.begin(),
- newRedBlock.getArguments());
- }
- }
- rewriter.replaceOp(target, results.take_front(inits1.size()));
- rewriter.replaceOp(source, results.take_back(inits2.size()));
- rewriter.eraseOp(term1);
- rewriter.eraseOp(term2);
+ unsigned numTargetOuts = target.getNumResults();
+ unsigned numSourceOuts = source.getNumResults();
+
+ // Create fused init_args, with target's init_args before source's init_args.
+ SmallVector<Value> fusedInitArgs;
+ llvm::append_range(fusedInitArgs, target.getInitArgs());
+ llvm::append_range(fusedInitArgs, source.getInitArgs());
+
+ // Create a new scf.for op after the source loop (with scf.yield terminator
+ // (without arguments) only in case its init_args is empty).
+ rewriter.setInsertionPointAfter(source);
+ scf::ForOp fusedLoop = rewriter.create<scf::ForOp>(
+ source.getLoc(), source.getLowerBound(), source.getUpperBound(),
+ source.getStep(), fusedInitArgs);
+
+ // Map original induction variables and operands to those of the fused loop.
+ IRMapping mapping;
+ mapping.map(target.getInductionVar(), fusedLoop.getInductionVar());
+ mapping.map(target.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
+ mapping.map(source.getInductionVar(), fusedLoop.getInductionVar());
+ mapping.map(source.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
+
+ // Merge target's body into the new (fused) for loop and then source's body.
+ rewriter.setInsertionPointToStart(fusedLoop.getBody());
+ for (Operation &op : target.getBody()->without_terminator())
+ rewriter.clone(op, mapping);
+ for (Operation &op : source.getBody()->without_terminator())
+ rewriter.clone(op, mapping);
+
+ // Build fused yield results by appropriately mapping original yield operands.
+ SmallVector<Value> yieldResults;
+ for (Value operand : target.getBody()->getTerminator()->getOperands())
+ yieldResults.push_back(mapping.lookupOrDefault(operand));
+ for (Value operand : source.getBody()->getTerminator()->getOperands())
+ yieldResults.push_back(mapping.lookupOrDefault(operand));
+ if (!yieldResults.empty())
+ rewriter.create<scf::YieldOp>(source.getLoc(), yieldResults);
+
+ // Replace old loops by substituting their uses by results of the fused loop.
+ rewriter.replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
+ rewriter.replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
return fusedLoop;
}
diff --git a/mlir/lib/Interfaces/LoopLikeInterface.cpp b/mlir/lib/Interfaces/LoopLikeInterface.cpp
index 5a119a7cf2659..1e0e87b64e811 100644
--- a/mlir/lib/Interfaces/LoopLikeInterface.cpp
+++ b/mlir/lib/Interfaces/LoopLikeInterface.cpp
@@ -8,8 +8,6 @@
#include "mlir/Interfaces/LoopLikeInterface.h"
-#include "mlir/IR/IRMapping.h"
-#include "mlir/IR/PatternMatch.h"
#include "mlir/Interfaces/FunctionInterfaces.h"
#include "llvm/ADT/DenseSet.h"
@@ -115,60 +113,3 @@ LogicalResult detail::verifyLoopLikeOpInterface(Operation *op) {
return success();
}
-
-LoopLikeOpInterface mlir::createFused(LoopLikeOpInterface target,
- LoopLikeOpInterface source,
- RewriterBase &rewriter,
- NewYieldValuesFn newYieldValuesFn,
- FuseTerminatorFn fuseTerminatorFn) {
- auto targetIterArgs = target.getRegionIterArgs();
- std::optional<SmallVector<Value>> targetInductionVar =
- target.getLoopInductionVars();
- SmallVector<Value> targetYieldOperands(target.getYieldedValues());
- auto sourceIterArgs = source.getRegionIterArgs();
- std::optional<SmallVector<Value>> sourceInductionVar =
- *source.getLoopInductionVars();
- SmallVector<Value> sourceYieldOperands(source.getYieldedValues());
- auto sourceRegion = source.getLoopRegions().front();
-
- FailureOr<LoopLikeOpInterface> maybeFusedLoop =
- target.replaceWithAdditionalYields(rewriter, source.getInits(),
- /*replaceInitOperandUsesInLoop=*/false,
- newYieldValuesFn);
- if (failed(maybeFusedLoop))
- llvm_unreachable("failed to replace loop");
- LoopLikeOpInterface fusedLoop = *maybeFusedLoop;
- // Since the target op is rewritten at the original's location, we move it to
- // the soure op's location.
- rewriter.moveOpBefore(fusedLoop, source);
-
- // Map control operands.
- IRMapping mapping;
- std::optional<SmallVector<Value>> fusedInductionVar =
- fusedLoop.getLoopInductionVars();
- if (fusedInductionVar) {
- if (!targetInductionVar || !sourceInductionVar)
- llvm_unreachable(
- "expected target and source loops to have induction vars");
- mapping.map(*targetInductionVar, *fusedInductionVar);
- mapping.map(*sourceInductionVar, *fusedInductionVar);
- }
- mapping.map(targetIterArgs,
- fusedLoop.getRegionIterArgs().take_front(targetIterArgs.size()));
- mapping.map(targetYieldOperands,
- fusedLoop.getYieldedValues().take_front(targetIterArgs.size()));
- mapping.map(sourceIterArgs,
- fusedLoop.getRegionIterArgs().take_back(sourceIterArgs.size()));
- mapping.map(sourceYieldOperands,
- fusedLoop.getYieldedValues().take_back(sourceIterArgs.size()));
- // Append everything except the terminator into the fused operation.
- rewriter.setInsertionPoint(
- fusedLoop.getLoopRegions().front()->front().getTerminator());
- for (Operation &op : sourceRegion->front().without_terminator())
- rewriter.clone(op, mapping);
-
- // TODO: Replace with corresponding interface method if added
- fuseTerminatorFn(rewriter, source, fusedLoop, mapping);
-
- return fusedLoop;
-}
diff --git a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
index f8246b74a5744..54dd2bdf953ca 100644
--- a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
+++ b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
@@ -47,169 +47,6 @@ module attributes {transform.with_named_sequence} {
// -----
-// CHECK-LABEL: func @fuse_two_parallel
-// CHECK-SAME: ([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}) {
-func.func @fuse_two_parallel(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
-// CHECK-DAG: [[C2:%.*]] = arith.constant 2 : index
-// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
-// CHECK-DAG: [[C1:%.*]] = arith.constant 1 : index
-// CHECK-DAG: [[C1FP:%.*]] = arith.constant 1.
- %c2 = arith.constant 2 : index
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %c1fp = arith.constant 1.0 : f32
-// CHECK: [[SUM:%.*]] = memref.alloc()
- %sum = memref.alloc() : memref<2x2xf32>
-// CHECK: scf.parallel ([[I:%.*]], [[J:%.*]]) = ([[C0]], [[C0]])
-// CHECK-SAME: to ([[C2]], [[C2]]) step ([[C1]], [[C1]]) {
-// CHECK: [[B_ELEM:%.*]] = memref.load [[B]]{{\[}}[[I]], [[J]]]
-// CHECK: [[SUM_ELEM:%.*]] = arith.addf [[B_ELEM]], [[C1FP]]
-// CHECK: memref.store [[SUM_ELEM]], [[SUM]]{{\[}}[[I]], [[J]]]
-// CHECK-NOT: scf.parallel
-// CHECK: [[SUM_ELEM_:%.*]] = memref.load [[SUM]]{{\[}}[[I]], [[J]]]
-// CHECK: [[A_ELEM:%.*]] = memref.load [[A]]{{\[}}[[I]], [[J]]]
-// CHECK: [[PRODUCT_ELEM:%.*]] = arith.mulf [[SUM_ELEM_]], [[A_ELEM]]
-// CHECK: memref.store [[PRODUCT_ELEM]], [[B]]{{\[}}[[I]], [[J]]]
-// CHECK: scf.reduce
-// CHECK: }
- scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
- %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
- %sum_elem = arith.addf %B_elem, %c1fp : f32
- memref.store %sum_elem, %sum[%i, %j] : memref<2x2xf32>
- scf.reduce
- }
- scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
- %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
- %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
- %product_elem = arith.mulf %sum_elem, %A_elem : f32
- memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
- scf.reduce
- }
-// CHECK: memref.dealloc [[SUM]]
- memref.dealloc %sum : memref<2x2xf32>
- return
-}
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-// CHECK-LABEL: func @fuse_two_parallel_reverse
-// CHECK-SAME: ([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}) {
-func.func @fuse_two_parallel_reverse(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
-// CHECK-DAG: [[C2:%.*]] = arith.constant 2 : index
-// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
-// CHECK-DAG: [[C1:%.*]] = arith.constant 1 : index
-// CHECK-DAG: [[C1FP:%.*]] = arith.constant 1.
- %c2 = arith.constant 2 : index
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %c1fp = arith.constant 1.0 : f32
-// CHECK: [[SUM:%.*]] = memref.alloc()
- %sum = memref.alloc() : memref<2x2xf32>
-// CHECK: scf.parallel ([[I:%.*]], [[J:%.*]]) = ([[C0]], [[C0]])
-// CHECK-SAME: to ([[C2]], [[C2]]) step ([[C1]], [[C1]]) {
-// CHECK: [[SUM_ELEM_:%.*]] = memref.load [[SUM]]{{\[}}[[I]], [[J]]]
-// CHECK: [[A_ELEM:%.*]] = memref.load [[A]]{{\[}}[[I]], [[J]]]
-// CHECK: [[PRODUCT_ELEM:%.*]] = arith.mulf [[SUM_ELEM_]], [[A_ELEM]]
-// CHECK: memref.store [[PRODUCT_ELEM]], [[B]]{{\[}}[[I]], [[J]]]
-// CHECK-NOT: scf.parallel
-// CHECK: [[B_ELEM:%.*]] = memref.load [[B]]{{\[}}[[I]], [[J]]]
-// CHECK: [[SUM_ELEM:%.*]] = arith.addf [[B_ELEM]], [[C1FP]]
-// CHECK: memref.store [[SUM_ELEM]], [[SUM]]{{\[}}[[I]], [[J]]]
-// CHECK: scf.reduce
-// CHECK: }
- scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
- %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
- %sum_elem = arith.addf %B_elem, %c1fp : f32
- memref.store %sum_elem, %sum[%i, %j] : memref<2x2xf32>
- scf.reduce
- }
- scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
- %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
- %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
- %product_elem = arith.mulf %sum_elem, %A_elem : f32
- memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
- scf.reduce
- }
-// CHECK: memref.dealloc [[SUM]]
- memref.dealloc %sum : memref<2x2xf32>
- return
-}
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- %fused = transform.loop.fuse_sibling %parallel#1 into %parallel#0 : (!transform.any_op,!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-// CHECK-LABEL: func @fuse_reductions_two
-// CHECK-SAME: (%[[A:.*]]: memref<2x2xf32>, %[[B:.*]]: memref<2x2xf32>) -> (f32, f32)
-func.func @fuse_reductions_two(%A: memref<2x2xf32>, %B: memref<2x2xf32>) -> (f32, f32) {
-// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
-// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: %[[INIT1:.*]] = arith.constant 1.000000e+00 : f32
-// CHECK-DAG: %[[INIT2:.*]] = arith.constant 2.000000e+00 : f32
-// CHECK: %[[RES:.*]]:2 = scf.parallel (%[[I:.*]], %[[J:.*]]) = (%[[C0]], %[[C0]])
-// CHECK-SAME: to (%[[C2]], %[[C2]]) step (%[[C1]], %[[C1]])
-// CHECK-SAME: init (%[[INIT1]], %[[INIT2]]) -> (f32, f32)
-// CHECK: %[[VAL_A:.*]] = memref.load %[[A]][%[[I]], %[[J]]]
-// CHECK: %[[VAL_B:.*]] = memref.load %[[B]][%[[I]], %[[J]]]
-// CHECK: scf.reduce(%[[VAL_A]], %[[VAL_B]] : f32, f32) {
-// CHECK: ^bb0(%[[LHS:.*]]: f32, %[[RHS:.*]]: f32):
-// CHECK: %[[R:.*]] = arith.addf %[[LHS]], %[[RHS]] : f32
-// CHECK: scf.reduce.return %[[R]] : f32
-// CHECK: }
-// CHECK: ^bb0(%[[LHS:.*]]: f32, %[[RHS:.*]]: f32):
-// CHECK: %[[R:.*]] = arith.mulf %[[LHS]], %[[RHS]] : f32
-// CHECK: scf.reduce.return %[[R]] : f32
-// CHECK: }
-// CHECK: return %[[RES]]#0, %[[RES]]#1 : f32, f32
- %c2 = arith.constant 2 : index
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %init1 = arith.constant 1.0 : f32
- %init2 = arith.constant 2.0 : f32
- %res1 = scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) init(%init1) -> f32 {
- %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
- scf.reduce(%A_elem : f32) {
- ^bb0(%lhs: f32, %rhs: f32):
- %1 = arith.addf %lhs, %rhs : f32
- scf.reduce.return %1 : f32
- }
- }
- %res2 = scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) init(%init2) -> f32 {
- %B_elem = memref.load %B[%i, %j] : memref<2x2xf32>
- scf.reduce(%B_elem : f32) {
- ^bb0(%lhs: f32, %rhs: f32):
- %1 = arith.mulf %lhs, %rhs : f32
- scf.reduce.return %1 : f32
- }
- }
- return %res1, %res2 : f32, f32
-}
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
// CHECK: func.func @fuse_2nd_for_into_1st([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
func.func @fuse_2nd_for_into_1st(%A: tensor<128xf32>, %B: tensor<128xf32>) -> (tensor<128xf32>, tensor<128xf32>) {
// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
@@ -371,62 +208,6 @@ module attributes {transform.with_named_sequence} {
}
}
-
-// -----
-
-// CHECK: #[[$MAP:.+]] = affine_map<(d0) -> (d0 * 32)
-#map = affine_map<(d0) -> (d0 * 32)>
-#map1 = affine_map<(d0, d1) -> (d0, d1)>
-module {
- // CHECK: func.func @loop_sibling_fusion(%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}
- func.func @loop_sibling_fusion(%arg0: tensor<128xf32>, %arg1: tensor<128x128xf16>, %arg2: tensor<128x64xf32>, %arg3: tensor<128x128xf32>) -> (tensor<128xf32>, tensor<128x128xf16>) {
- // CHECK: %[[EMPTY:.*]] = tensor.empty() : tensor<128x128xf16>
- // CHECK-NEXT: %[[RESULTS:.*]]:2 = scf.forall (%[[I:.*]]) in (4) shared_outs(%[[S1:.*]] = %[[ARG0]], %[[S2:.*]] = %[[ARG1]]) -> (tensor<128xf32>, tensor<128x128xf16>) {
- // CHECK-NEXT: %[[IDX:.*]] = affine.apply #[[$MAP]](%[[I]])
- // CHECK-NEXT: %[[SLICE0:.*]] = tensor.extract_slice %[[ARG3]][%[[IDX]], 0] [32, 1] [1, 1] : tensor<128x128xf32> to tensor<32xf32>
- // CHECK-NEXT: %[[SLICE1:.*]] = tensor.extract_slice %[[ARG3]][%[[IDX]], 0] [32, 128] [1, 1] : tensor<128x128xf32> to tensor<32x128xf32>
- // CHECK-NEXT: %[[SLICE2:.*]] = tensor.extract_slice %[[EMPTY]][%[[IDX]], 0] [32, 128] [1, 1] : tensor<128x128xf16> to tensor<32x128xf16>
- // CHECK-NEXT: %[[GENERIC:.*]] = linalg.generic {{.*}} ins(%[[SLICE1]] : {{.*}}) outs(%[[SLICE2]] : {{.*}})
- // CHECK: scf.forall.in_parallel {
- // CHECK-NEXT: tensor.parallel_insert_slice %[[SLICE0]] into %[[S1]][%[[IDX]]] [32] [1] : tensor<32xf32> into tensor<128xf32>
- // CHECK-NEXT: tensor.parallel_insert_slice %[[GENERIC]] into %[[S2]][%[[IDX]], 0] [32, 128] [1, 1] : tensor<32x128xf16> into tensor<128x128xf16>
- // CHECK-NEXT: }
- // CHECK-NEXT: } {mapping = [#gpu.warp<linear_dim_0>]}
- // CHECK-NEXT: return %[[RESULTS]]#0, %[[RESULTS]]#1
- %0 = scf.forall (%arg4) in (4) shared_outs(%arg5 = %arg0) -> (tensor<128xf32>) {
- %3 = affine.apply #map(%arg4)
- %extracted_slice = tensor.extract_slice %arg3[%3, 0] [32, 1] [1, 1] : tensor<128x128xf32> to tensor<32xf32>
- scf.forall.in_parallel {
- tensor.parallel_insert_slice %extracted_slice into %arg5[%3] [32] [1] : tensor<32xf32> into tensor<128xf32>
- }
- } {mapping = [#gpu.warp<linear_dim_0>]}
- %1 = tensor.empty() : tensor<128x128xf16>
- %2 = scf.forall (%arg4) in (4) shared_outs(%arg5 = %arg1) -> (tensor<128x128xf16>) {
- %3 = affine.apply #map(%arg4)
- %extracted_slice = tensor.extract_slice %arg3[%3, 0] [32, 128] [1, 1] : tensor<128x128xf32> to tensor<32x128xf32>
- %extracted_slice_0 = tensor.extract_slice %1[%3, 0] [32, 128] [1, 1] : tensor<128x128xf16> to tensor<32x128xf16>
- %4 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%extracted_slice : tensor<32x128xf32>) outs(%extracted_slice_0 : tensor<32x128xf16>) {
- ^bb0(%in: f32, %out: f16):
- %5 = arith.truncf %in : f32 to f16
- linalg.yield %5 : f16
- } -> tensor<32x128xf16>
- scf.forall.in_parallel {
- tensor.parallel_insert_slice %4 into %arg5[%3, 0] [32, 128] [1, 1] : tensor<32x128xf16> into tensor<128x128xf16>
- }
- } {mapping = [#gpu.warp<linear_dim_0>]}
- return %0, %2 : tensor<128xf32>, tensor<128x128xf16>
- }
-}
-
-module attributes { transform.with_named_sequence } {
- transform.named_sequence @__transform_main(%root: !transform.any_op) {
- %loops = transform.structured.match ops{["scf.forall"]} in %root : (!transform.any_op) -> !transform.any_op
- %loop1, %loop2 = transform.split_handle %loops : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- %loop3 = transform.loop.fuse_sibling %loop1 into %loop2 : (!transform.any_op, !transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
// -----
func.func @source_for_uses_result_of_target_for_err(%A: tensor<128xf32>, %B: tensor<128xf32>) -> (tensor<128xf32>, tensor<128xf32>) {
@@ -501,9 +282,8 @@ func.func @target_for_region_uses_result_of_source_for_err(%A: tensor<128xf32>,
%6 = vector.transfer_write %5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
scf.yield %6 : tensor<128xf32>
}
- // expected-error @below {{values used inside regions of target should be properly dominated by source}}
%dup1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %B) -> (tensor<128xf32>) {
- // expected-note @below {{see operation}}
+ // expected-error @below {{values used inside regions of target should be properly dominated by source}}
%dup2 = vector.transfer_read %1[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
%dup3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
%dup5 = arith.addf %dup3, %dup2 : vector<16xf32>
@@ -548,74 +328,6 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
-
-// -----
-
-func.func @non_matching_iteration_spaces_err(%A: memref<2x2xf32>, %B: memref<2x2xf32>) {
- %c2 = arith.constant 2 : index
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %c1fp = arith.constant 1.0 : f32
- %sum = memref.alloc() : memref<2x2xf32>
- // expected-error @below {{target and source iteration spaces must be equal}}
- scf.parallel (%i) = (%c0) to (%c2) step (%c1) {
- %B_elem = memref.load %B[%i, %c0] : memref<2x2xf32>
- %sum_elem = arith.addf %B_elem, %c1fp : f32
- memref.store %sum_elem, %sum[%i, %c0] : memref<2x2xf32>
- scf.reduce
- }
- scf.parallel (%i, %j) = (%c0, %c0) to (%c2, %c2) step (%c1, %c1) {
- %sum_elem = memref.load %sum[%i, %j] : memref<2x2xf32>
- %A_elem = memref.load %A[%i, %j] : memref<2x2xf32>
- %product_elem = arith.mulf %sum_elem, %A_elem : f32
- memref.store %product_elem, %B[%i, %j] : memref<2x2xf32>
- scf.reduce
- }
- memref.dealloc %sum : memref<2x2xf32>
- return
-}
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %parallel:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- %fused = transform.loop.fuse_sibling %parallel#0 into %parallel#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-func.func @non_matching_loop_types_err(%A: memref<2xf32>, %B: memref<2xf32>) {
- %c2 = arith.constant 2 : index
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %c1fp = arith.constant 1.0 : f32
- %sum = memref.alloc() : memref<2xf32>
- // expected-error @below {{target and source must be same loop type}}
- scf.for %i = %c0 to %c2 step %c1 {
- %B_elem = memref.load %B[%i] : memref<2xf32>
- %sum_elem = arith.addf %B_elem, %c1fp : f32
- memref.store %sum_elem, %sum[%i] : memref<2xf32>
- }
- scf.parallel (%i) = (%c0) to (%c2) step (%c1) {
- %sum_elem = memref.load %sum[%i] : memref<2xf32>
- %A_elem = memref.load %A[%i] : memref<2xf32>
- %product_elem = arith.mulf %sum_elem, %A_elem : f32
- memref.store %product_elem, %B[%i] : memref<2xf32>
- scf.reduce
- }
- memref.dealloc %sum : memref<2xf32>
- return
-}
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["scf.for"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.match ops{["scf.parallel"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %fused = transform.loop.fuse_sibling %0 into %1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
// -----
// CHECK: func.func @foreach_loop_pair_fuse([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
>From ebab105670a409e426ddcb0278578711a622b1b2 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Thu, 4 Jul 2024 09:43:16 +0200
Subject: [PATCH 217/246] [IR] Don't strip through pointer to vector of pointer
bitcasts
When using stripPointerCasts() and getUnderlyingObject(), don't
strip through a bitcast from ptr to <1 x ptr>, which is not a
no-op pointer cast. Calling code is generally not prepared to
handle that situation, resulting in incorrect alias analysis
results for example.
Fixes https://github.com/llvm/llvm-project/issues/97600.
---
llvm/lib/Analysis/ValueTracking.cpp | 5 +++--
llvm/lib/IR/Value.cpp | 5 +++--
llvm/test/Analysis/BasicAA/ptr-vector.ll | 12 ++++++++++++
3 files changed, 18 insertions(+), 4 deletions(-)
create mode 100644 llvm/test/Analysis/BasicAA/ptr-vector.ll
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 5476dc5d85182..258576f0cdff8 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -6403,9 +6403,10 @@ const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast ||
Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
- V = cast<Operator>(V)->getOperand(0);
- if (!V->getType()->isPointerTy())
+ Value *NewV = cast<Operator>(V)->getOperand(0);
+ if (!NewV->getType()->isPointerTy())
return V;
+ V = NewV;
} else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
if (GA->isInterposable())
return V;
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
index 8522747ccf128..b2ee75811fbb7 100644
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -652,9 +652,10 @@ static const Value *stripPointerCastsAndOffsets(
}
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
- V = cast<Operator>(V)->getOperand(0);
- if (!V->getType()->isPointerTy())
+ Value *NewV = cast<Operator>(V)->getOperand(0);
+ if (!NewV->getType()->isPointerTy())
return V;
+ V = NewV;
} else if (StripKind != PSK_ZeroIndicesSameRepresentation &&
Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
// TODO: If we know an address space cast will not change the
diff --git a/llvm/test/Analysis/BasicAA/ptr-vector.ll b/llvm/test/Analysis/BasicAA/ptr-vector.ll
new file mode 100644
index 0000000000000..7dea24fb5aba7
--- /dev/null
+++ b/llvm/test/Analysis/BasicAA/ptr-vector.ll
@@ -0,0 +1,12 @@
+; RUN: opt -print-all-alias-modref-info -passes=aa-eval -disable-output < %s 2>&1 | FileCheck %s
+
+; CHECK: MayAlias: i8* %b, i8* %p
+; CHECK: Just Ref: Ptr: i8* %p <-> %v1p = call <1 x ptr> @llvm.masked.load.v1p0.p0(ptr %a, i32 8, <1 x i1> %c, <1 x ptr> poison)
+; CHECK: Just Ref: Ptr: i8* %b <-> %v1p = call <1 x ptr> @llvm.masked.load.v1p0.p0(ptr %a, i32 8, <1 x i1> %c, <1 x ptr> poison)
+define void @test(ptr %a, ptr %b, <1 x i1> %c) {
+ %v1p = call <1 x ptr> @llvm.masked.load.v1p0.p0(ptr %a, i32 8, <1 x i1> %c, <1 x ptr> poison)
+ %p = bitcast <1 x ptr> %v1p to ptr
+ load i8, ptr %p
+ store i8 0, ptr %b
+ ret void
+}
>From 927def49728371d746476e79a6570cd93a4d335c Mon Sep 17 00:00:00 2001
From: David Spickett <david.spickett at linaro.org>
Date: Thu, 4 Jul 2024 07:49:36 +0000
Subject: [PATCH 218/246] Revert "[lldb] Print empty enums as if they were
unrecognised normal enums (#97553)"
This reverts commit 41fddc4ec3302f125a5b84ae86c8027dedc89984.
Due to build errors with gcc passing signed ints to unsigned ints.
---
.../TypeSystem/Clang/TypeSystemClang.cpp | 33 ++++++++-----------
.../x86/debug-types-missing-signature.test | 4 +--
.../DumpValueObjectOptionsTests.cpp | 28 ++++------------
3 files changed, 22 insertions(+), 43 deletions(-)
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index f70efe5ed57e4..9c77a5d6e66ee 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -8656,25 +8656,20 @@ static bool DumpEnumValue(const clang::QualType &qual_type, Stream &s,
// every enumerator is either a one bit value or a superset of the previous
// enumerators. Also 0 doesn't make sense when the enumerators are used as
// flags.
- clang::EnumDecl::enumerator_range enumerators = enum_decl->enumerators();
- if (enumerators.empty())
- can_be_bitfield = false;
- else {
- for (auto *enumerator : enumerators) {
- llvm::APSInt init_val = enumerator->getInitVal();
- uint64_t val = qual_type_is_signed ? init_val.getSExtValue()
- : init_val.getZExtValue();
- if (qual_type_is_signed)
- val = llvm::SignExtend64(val, 8 * byte_size);
- if (llvm::popcount(val) != 1 && (val & ~covered_bits) != 0)
- can_be_bitfield = false;
- covered_bits |= val;
- ++num_enumerators;
- if (val == enum_svalue) {
- // Found an exact match, that's all we need to do.
- s.PutCString(enumerator->getNameAsString());
- return true;
- }
+ for (auto *enumerator : enum_decl->enumerators()) {
+ llvm::APSInt init_val = enumerator->getInitVal();
+ uint64_t val =
+ qual_type_is_signed ? init_val.getSExtValue() : init_val.getZExtValue();
+ if (qual_type_is_signed)
+ val = llvm::SignExtend64(val, 8 * byte_size);
+ if (llvm::popcount(val) != 1 && (val & ~covered_bits) != 0)
+ can_be_bitfield = false;
+ covered_bits |= val;
+ ++num_enumerators;
+ if (val == enum_svalue) {
+ // Found an exact match, that's all we need to do.
+ s.PutCString(enumerator->getNameAsString());
+ return true;
}
}
diff --git a/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test b/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test
index b2c792ed6003e..548dd6cdbc275 100644
--- a/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test
+++ b/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test
@@ -22,5 +22,5 @@ PRINTEC: use of undeclared identifier 'EC'
RUN: %lldb %t -b -o "target variable a e ec" | FileCheck --check-prefix=VARS %s
VARS: (const (unnamed struct)) a = <incomplete type "const (unnamed struct)">
-VARS: (const (unnamed enum)) e = 1
-VARS: (const (unnamed enum)) ec = 1
+VARS: (const (unnamed enum)) e = 0x1
+VARS: (const (unnamed enum)) ec = 0x1
diff --git a/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp b/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
index af6fa55bab171..a7ccd74721f66 100644
--- a/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
+++ b/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
@@ -71,13 +71,12 @@ class ValueObjectMockProcessTest : public ::testing::Test {
}
CompilerType
- MakeEnumType(const std::vector<std::pair<const char *, int>> enumerators,
- bool is_signed) {
- CompilerType int_type = m_type_system->GetBuiltinTypeForEncodingAndBitSize(
- is_signed ? lldb::eEncodingSint : lldb::eEncodingUint, 32);
+ MakeEnumType(const std::vector<std::pair<const char *, int>> enumerators) {
+ CompilerType uint_type = m_type_system->GetBuiltinTypeForEncodingAndBitSize(
+ lldb::eEncodingUint, 32);
CompilerType enum_type = m_type_system->CreateEnumerationType(
"TestEnum", m_type_system->GetTranslationUnitDecl(),
- OptionalClangModuleID(), Declaration(), int_type, false);
+ OptionalClangModuleID(), Declaration(), uint_type, false);
m_type_system->StartTagDeclarationDefinition(enum_type);
Declaration decl;
@@ -124,27 +123,12 @@ class ValueObjectMockProcessTest : public ::testing::Test {
lldb::ProcessSP m_process_sp;
};
-TEST_F(ValueObjectMockProcessTest, EmptyEnum) {
- // All values of an empty enum should be shown as plain numbers.
- TestDumpValueObject(MakeEnumType({}, false),
- {{0, {}, "(TestEnum) test_var = 0\n"},
- {1, {}, "(TestEnum) test_var = 1\n"},
- {2, {}, "(TestEnum) test_var = 2\n"}});
-
- TestDumpValueObject(MakeEnumType({}, true),
- {{-2, {}, "(TestEnum) test_var = -2\n"},
- {-1, {}, "(TestEnum) test_var = -1\n"},
- {0, {}, "(TestEnum) test_var = 0\n"},
- {1, {}, "(TestEnum) test_var = 1\n"},
- {2, {}, "(TestEnum) test_var = 2\n"}});
-}
-
TEST_F(ValueObjectMockProcessTest, Enum) {
// This is not a bitfield-like enum, so values are printed as decimal by
// default. Also we only show the enumerator name if the value is an
// exact match.
TestDumpValueObject(
- MakeEnumType({{"test_2", 2}, {"test_3", 3}}, false),
+ MakeEnumType({{"test_2", 2}, {"test_3", 3}}),
{{0, {}, "(TestEnum) test_var = 0\n"},
{1, {}, "(TestEnum) test_var = 1\n"},
{2, {}, "(TestEnum) test_var = test_2\n"},
@@ -168,7 +152,7 @@ TEST_F(ValueObjectMockProcessTest, BitFieldLikeEnum) {
// as hex, and values without exact matches are shown as a combination of
// enumerators and any remaining value left over.
TestDumpValueObject(
- MakeEnumType({{"test_2", 2}, {"test_4", 4}}, false),
+ MakeEnumType({{"test_2", 2}, {"test_4", 4}}),
{
{0, {}, "(TestEnum) test_var = 0x0\n"},
{1, {}, "(TestEnum) test_var = 0x1\n"},
>From 67b302c52f79db2ab5c46e5e8c600f1c2af57a83 Mon Sep 17 00:00:00 2001
From: Cullen Rhodes <cullen.rhodes at arm.com>
Date: Thu, 4 Jul 2024 08:57:02 +0100
Subject: [PATCH 219/246] [mlir][vector] Add vector.step operation (#96776)
This patch adds a new vector.step operation to the Vector dialect. It
produces a linear sequence of index values from 0 to N, where N is the
number of elements in the result vector, and can be used to create
vectors of indices.
It supports both fixed-width and scalable vectors. For fixed the
canonical representation is `arith.constant dense<[0, .., N]>`. A
scalable step cannot be represented as a constant and is lowered to the
`llvm.experimental.stepvector` intrinsic [1].
This op enables scalable vectorization of linalg.index ops, see #96778. It can
also be used in the SparseVectorizer in-place of lower-level stepvector
intrinsic, see [2] (patch to follow).
[1] https://llvm.org/docs/LangRef.html#llvm-experimental-stepvector-intrinsic
[2] https://github.com/llvm/llvm-project/blob/acf675b63f9426e61aac2155e29280f7d21f9421/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp#L385-L388
---
.../mlir/Dialect/Vector/IR/VectorOps.td | 25 +++++++++++++++++++
.../VectorToLLVM/ConvertVectorToLLVM.cpp | 17 +++++++++++--
mlir/lib/Dialect/Vector/IR/VectorOps.cpp | 14 +++++++++++
.../VectorToLLVM/vector-to-llvm.mlir | 11 ++++++++
mlir/test/Dialect/Vector/canonicalize.mlir | 10 ++++++++
mlir/test/Dialect/Vector/invalid.mlir | 16 ++++++++++++
mlir/test/Dialect/Vector/ops.mlir | 11 +++++++-
7 files changed, 101 insertions(+), 3 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 097e5e6fb0d61..44efb7f8bd3d4 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -3017,6 +3017,31 @@ def Vector_ScanOp :
let hasVerifier = 1;
}
+//===----------------------------------------------------------------------===//
+// VectorStepOp
+//===----------------------------------------------------------------------===//
+
+def Vector_StepOp : Vector_Op<"step", [Pure]> {
+ let summary = "A linear sequence of values from 0 to N";
+ let description = [{
+ A `step` operation produces an index vector, i.e. a 1-D vector of values of
+ index type that represents a linear sequence from 0 to N-1, where N is the
+ number of elements in the `result` vector.
+
+ Supports fixed-width and scalable vectors.
+
+ Examples:
+
+ ```mlir
+ %0 = vector.step : vector<4xindex> ; [0, 1, 2, 3]
+ %1 = vector.step : vector<[4]xindex> ; [0, 1, .., <vscale * 4 - 1>]
+ ```
+ }];
+ let hasFolder = 1;
+ let results = (outs VectorOfRankAndType<[1], [Index]>:$result);
+ let assemblyFormat = "attr-dict `:` type($result)";
+}
+
def Vector_YieldOp : Vector_Op<"yield", [
Pure, ReturnLike, Terminator]> {
let summary = "Terminates and yields values from vector regions.";
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 0eac55255b133..6a8a9d818aad2 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -1860,6 +1860,19 @@ struct VectorFromElementsLowering
}
};
+/// Conversion pattern for vector.step.
+struct VectorStepOpLowering : public ConvertOpToLLVMPattern<vector::StepOp> {
+ using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern;
+
+ LogicalResult
+ matchAndRewrite(vector::StepOp stepOp, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ Type llvmType = typeConverter->convertType(stepOp.getType());
+ rewriter.replaceOpWithNewOp<LLVM::StepVectorOp>(stepOp, llvmType);
+ return success();
+ }
+};
+
} // namespace
/// Populate the given list with patterns that convert from Vector to LLVM.
@@ -1885,8 +1898,8 @@ void mlir::populateVectorToLLVMConversionPatterns(
VectorSplatOpLowering, VectorSplatNdOpLowering,
VectorScalableInsertOpLowering, VectorScalableExtractOpLowering,
MaskedReductionOpConversion, VectorInterleaveOpLowering,
- VectorDeinterleaveOpLowering, VectorFromElementsLowering>(
- converter);
+ VectorDeinterleaveOpLowering, VectorFromElementsLowering,
+ VectorStepOpLowering>(converter);
// Transfer ops with rank > 1 are handled by VectorToSCF.
populateVectorTransferLoweringPatterns(patterns, /*maxTransferRank=*/1);
}
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index 149723f51cc12..53a6648de014c 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -6312,6 +6312,20 @@ OpFoldResult SplatOp::fold(FoldAdaptor adaptor) {
return SplatElementsAttr::get(getType(), {constOperand});
}
+//===----------------------------------------------------------------------===//
+// StepOp
+//===----------------------------------------------------------------------===//
+
+OpFoldResult StepOp::fold(FoldAdaptor adaptor) {
+ auto resultType = cast<VectorType>(getType());
+ if (resultType.isScalable())
+ return nullptr;
+ SmallVector<APInt> indices;
+ for (unsigned i = 0; i < resultType.getNumElements(); i++)
+ indices.push_back(APInt(/*width=*/64, i));
+ return DenseElementsAttr::get(resultType, indices);
+}
+
//===----------------------------------------------------------------------===//
// WarpExecuteOnLane0Op
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index 09b79708a9ab2..5f2d2809a0fe8 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -2621,3 +2621,14 @@ func.func @vector_from_elements_0d(%a: f32) -> vector<f32> {
%0 = vector.from_elements %a : vector<f32>
return %0 : vector<f32>
}
+
+// -----
+
+// CHECK-LABEL: @vector_step_scalable
+// CHECK: %[[STEPVECTOR:.*]] = llvm.intr.experimental.stepvector : vector<[4]xi64>
+// CHECK: %[[CAST:.*]] = builtin.unrealized_conversion_cast %[[STEPVECTOR]] : vector<[4]xi64> to vector<[4]xindex>
+// CHECK: return %[[CAST]] : vector<[4]xindex>
+func.func @vector_step_scalable() -> vector<[4]xindex> {
+ %0 = vector.step : vector<[4]xindex>
+ return %0 : vector<[4]xindex>
+}
diff --git a/mlir/test/Dialect/Vector/canonicalize.mlir b/mlir/test/Dialect/Vector/canonicalize.mlir
index fc5651f5bb02f..1a674d715ca61 100644
--- a/mlir/test/Dialect/Vector/canonicalize.mlir
+++ b/mlir/test/Dialect/Vector/canonicalize.mlir
@@ -2719,3 +2719,13 @@ func.func @from_elements_to_splat(%a: f32, %b: f32) -> (vector<2x3xf32>, vector<
// CHECK: return %[[splat]], %[[from_el]], %[[splat2]]
return %0, %1, %2 : vector<2x3xf32>, vector<2x3xf32>, vector<f32>
}
+
+// -----
+
+// CHECK-LABEL: @fold_vector_step_to_constant
+// CHECK: %[[CONSTANT:.*]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xindex>
+// CHECK: return %[[CONSTANT]] : vector<4xindex>
+func.func @fold_vector_step_to_constant() -> vector<4xindex> {
+ %0 = vector.step : vector<4xindex>
+ return %0 : vector<4xindex>
+}
diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir
index d0eaed8f98cc5..db169a6c1f8ae 100644
--- a/mlir/test/Dialect/Vector/invalid.mlir
+++ b/mlir/test/Dialect/Vector/invalid.mlir
@@ -1871,3 +1871,19 @@ func.func @invalid_from_elements(%a: f32, %b: i32) {
vector.from_elements %a, %b : vector<2xf32>
return
}
+
+// -----
+
+func.func @invalid_step_0d() {
+ // expected-error @+1 {{vector.step' op result #0 must be vector of index values of ranks 1, but got 'vector<f32>'}}
+ vector.step : vector<f32>
+ return
+}
+
+// -----
+
+func.func @invalid_step_2d() {
+ // expected-error @+1 {{vector.step' op result #0 must be vector of index values of ranks 1, but got 'vector<2x4xf32>'}}
+ vector.step : vector<2x4xf32>
+ return
+}
diff --git a/mlir/test/Dialect/Vector/ops.mlir b/mlir/test/Dialect/Vector/ops.mlir
index 4da09584db88b..531e2db636431 100644
--- a/mlir/test/Dialect/Vector/ops.mlir
+++ b/mlir/test/Dialect/Vector/ops.mlir
@@ -1171,4 +1171,13 @@ func.func @from_elements(%a: f32, %b: f32) -> (vector<f32>, vector<1xf32>, vecto
// CHECK: vector.from_elements %[[b]], %[[b]], %[[a]], %[[a]] : vector<2x2xf32>
%3 = vector.from_elements %b, %b, %a, %a : vector<2x2xf32>
return %0, %1, %2, %3 : vector<f32>, vector<1xf32>, vector<1x2xf32>, vector<2x2xf32>
-}
\ No newline at end of file
+}
+
+// CHECK-LABEL: @step
+func.func @step() {
+ // CHECK: vector.step : vector<2xindex>
+ %0 = vector.step : vector<2xindex>
+ // CHECK: vector.step : vector<[4]xindex>
+ %1 = vector.step : vector<[4]xindex>
+ return
+}
>From 7b34cad1b8505dd1d5593ab8fd196398bf605ac5 Mon Sep 17 00:00:00 2001
From: Tianyi Guan <tguan at nvidia.com>
Date: Mon, 1 Jul 2024 15:04:12 +0100
Subject: [PATCH 220/246] [AArch64] Extract bf16 cases from `itofp.ll` to
`itofp-bf16.ll`, NFC.
---
llvm/test/CodeGen/AArch64/itofp-bf16.ll | 1832 +++++++++++++++++++++++
llvm/test/CodeGen/AArch64/itofp.ll | 1820 ----------------------
2 files changed, 1832 insertions(+), 1820 deletions(-)
create mode 100644 llvm/test/CodeGen/AArch64/itofp-bf16.ll
diff --git a/llvm/test/CodeGen/AArch64/itofp-bf16.ll b/llvm/test/CodeGen/AArch64/itofp-bf16.ll
new file mode 100644
index 0000000000000..978fe0b5ba3b3
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/itofp-bf16.ll
@@ -0,0 +1,1832 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+
+define bfloat @stofp_i64_bf16(i64 %a) {
+; CHECK-LABEL: stofp_i64_bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmp x0, #0
+; CHECK-NEXT: and x11, x0, #0x8000000000000000
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: cneg x9, x0, mi
+; CHECK-NEXT: lsr x10, x9, #53
+; CHECK-NEXT: cmp x10, #0
+; CHECK-NEXT: and x10, x9, #0xfffffffffffff000
+; CHECK-NEXT: csel x10, x10, x9, ne
+; CHECK-NEXT: scvtf d0, x10
+; CHECK-NEXT: cset w10, ne
+; CHECK-NEXT: tst x9, #0xfff
+; CHECK-NEXT: csel w10, wzr, w10, eq
+; CHECK-NEXT: fmov x9, d0
+; CHECK-NEXT: orr x9, x9, x11
+; CHECK-NEXT: orr x9, x9, x10
+; CHECK-NEXT: fmov d0, x9
+; CHECK-NEXT: fcvtxn s0, d0
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: ubfx w10, w9, #16, #1
+; CHECK-NEXT: add w8, w9, w8
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp i64 %a to bfloat
+ ret bfloat %c
+}
+
+define bfloat @utofp_i64_bf16(i64 %a) {
+; CHECK-LABEL: utofp_i64_bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: lsr x9, x0, #53
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: cmp x9, #0
+; CHECK-NEXT: and x9, x0, #0xfffffffffffff000
+; CHECK-NEXT: csel x9, x9, x0, ne
+; CHECK-NEXT: ucvtf d0, x9
+; CHECK-NEXT: cset w9, ne
+; CHECK-NEXT: tst x0, #0xfff
+; CHECK-NEXT: csel w9, wzr, w9, eq
+; CHECK-NEXT: fmov x10, d0
+; CHECK-NEXT: orr x9, x10, x9
+; CHECK-NEXT: fmov d0, x9
+; CHECK-NEXT: fcvtxn s0, d0
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: ubfx w10, w9, #16, #1
+; CHECK-NEXT: add w8, w9, w8
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp i64 %a to bfloat
+ ret bfloat %c
+}
+
+define bfloat @stofp_i32_bf16(i32 %a) {
+; CHECK-LABEL: stofp_i32_bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: scvtf d0, w0
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: fcvtxn s0, d0
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: ubfx w10, w9, #16, #1
+; CHECK-NEXT: add w8, w9, w8
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp i32 %a to bfloat
+ ret bfloat %c
+}
+
+define bfloat @utofp_i32_bf16(i32 %a) {
+; CHECK-LABEL: utofp_i32_bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ucvtf d0, w0
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: fcvtxn s0, d0
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: ubfx w10, w9, #16, #1
+; CHECK-NEXT: add w8, w9, w8
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp i32 %a to bfloat
+ ret bfloat %c
+}
+
+define bfloat @stofp_i16_bf16(i16 %a) {
+; CHECK-LABEL: stofp_i16_bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sxth w9, w0
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: scvtf s0, w9
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: ubfx w10, w9, #16, #1
+; CHECK-NEXT: add w8, w9, w8
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp i16 %a to bfloat
+ ret bfloat %c
+}
+
+define bfloat @utofp_i16_bf16(i16 %a) {
+; CHECK-LABEL: utofp_i16_bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: and w9, w0, #0xffff
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: ucvtf s0, w9
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: ubfx w10, w9, #16, #1
+; CHECK-NEXT: add w8, w9, w8
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp i16 %a to bfloat
+ ret bfloat %c
+}
+
+define bfloat @stofp_i8_bf16(i8 %a) {
+; CHECK-LABEL: stofp_i8_bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sxtb w9, w0
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: scvtf s0, w9
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: ubfx w10, w9, #16, #1
+; CHECK-NEXT: add w8, w9, w8
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp i8 %a to bfloat
+ ret bfloat %c
+}
+
+define bfloat @utofp_i8_bf16(i8 %a) {
+; CHECK-LABEL: utofp_i8_bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: and w9, w0, #0xff
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: ucvtf s0, w9
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: ubfx w10, w9, #16, #1
+; CHECK-NEXT: add w8, w9, w8
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp i8 %a to bfloat
+ ret bfloat %c
+}
+
+define <2 x bfloat> @stofp_v2i64_v2bf16(<2 x i64> %a) {
+; CHECK-LABEL: stofp_v2i64_v2bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov x9, v0.d[1]
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: cmp x9, #0
+; CHECK-NEXT: cneg x10, x9, mi
+; CHECK-NEXT: and x9, x9, #0x8000000000000000
+; CHECK-NEXT: lsr x11, x10, #53
+; CHECK-NEXT: and x12, x10, #0xfffffffffffff000
+; CHECK-NEXT: cmp x11, #0
+; CHECK-NEXT: csel x11, x12, x10, ne
+; CHECK-NEXT: cset w12, ne
+; CHECK-NEXT: tst x10, #0xfff
+; CHECK-NEXT: fmov x10, d0
+; CHECK-NEXT: csel w12, wzr, w12, eq
+; CHECK-NEXT: scvtf d0, x11
+; CHECK-NEXT: cmp x10, #0
+; CHECK-NEXT: cneg x13, x10, mi
+; CHECK-NEXT: and x10, x10, #0x8000000000000000
+; CHECK-NEXT: lsr x14, x13, #53
+; CHECK-NEXT: cmp x14, #0
+; CHECK-NEXT: and x14, x13, #0xfffffffffffff000
+; CHECK-NEXT: csel x11, x14, x13, ne
+; CHECK-NEXT: cset w14, ne
+; CHECK-NEXT: tst x13, #0xfff
+; CHECK-NEXT: scvtf d1, x11
+; CHECK-NEXT: fmov x11, d0
+; CHECK-NEXT: orr x9, x11, x9
+; CHECK-NEXT: csel w11, wzr, w14, eq
+; CHECK-NEXT: fmov x13, d1
+; CHECK-NEXT: orr x9, x9, x12
+; CHECK-NEXT: fmov d0, x9
+; CHECK-NEXT: orr x10, x13, x10
+; CHECK-NEXT: orr x10, x10, x11
+; CHECK-NEXT: fcvtxn s0, d0
+; CHECK-NEXT: fmov d1, x10
+; CHECK-NEXT: fcvtxn s1, d1
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: ubfx w11, w9, #16, #1
+; CHECK-NEXT: add w9, w9, w8
+; CHECK-NEXT: fmov w10, s1
+; CHECK-NEXT: add w9, w11, w9
+; CHECK-NEXT: lsr w9, w9, #16
+; CHECK-NEXT: ubfx w12, w10, #16, #1
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: fmov s1, w9
+; CHECK-NEXT: add w8, w12, w8
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <2 x i64> %a to <2 x bfloat>
+ ret <2 x bfloat> %c
+}
+
+define <2 x bfloat> @utofp_v2i64_v2bf16(<2 x i64> %a) {
+; CHECK-LABEL: utofp_v2i64_v2bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov x9, v0.d[1]
+; CHECK-NEXT: fmov x11, d0
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: lsr x10, x9, #53
+; CHECK-NEXT: and x12, x9, #0xfffffffffffff000
+; CHECK-NEXT: cmp x10, #0
+; CHECK-NEXT: lsr x10, x11, #53
+; CHECK-NEXT: csel x12, x12, x9, ne
+; CHECK-NEXT: cset w13, ne
+; CHECK-NEXT: tst x9, #0xfff
+; CHECK-NEXT: csel w9, wzr, w13, eq
+; CHECK-NEXT: cmp x10, #0
+; CHECK-NEXT: and x10, x11, #0xfffffffffffff000
+; CHECK-NEXT: csel x10, x10, x11, ne
+; CHECK-NEXT: ucvtf d0, x12
+; CHECK-NEXT: ucvtf d1, x10
+; CHECK-NEXT: cset w10, ne
+; CHECK-NEXT: tst x11, #0xfff
+; CHECK-NEXT: csel w10, wzr, w10, eq
+; CHECK-NEXT: fmov x11, d0
+; CHECK-NEXT: fmov x12, d1
+; CHECK-NEXT: orr x9, x11, x9
+; CHECK-NEXT: orr x10, x12, x10
+; CHECK-NEXT: fmov d0, x9
+; CHECK-NEXT: fmov d1, x10
+; CHECK-NEXT: fcvtxn s0, d0
+; CHECK-NEXT: fcvtxn s1, d1
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: fmov w10, s1
+; CHECK-NEXT: ubfx w11, w9, #16, #1
+; CHECK-NEXT: add w9, w9, w8
+; CHECK-NEXT: ubfx w12, w10, #16, #1
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: add w9, w11, w9
+; CHECK-NEXT: add w8, w12, w8
+; CHECK-NEXT: lsr w9, w9, #16
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: fmov s1, w9
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <2 x i64> %a to <2 x bfloat>
+ ret <2 x bfloat> %c
+}
+
+define <3 x bfloat> @stofp_v3i64_v3bf16(<3 x i64> %a) {
+; CHECK-LABEL: stofp_v3i64_v3bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: scvtf v1.2d, v2.2d
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: scvtf v0.2d, v0.2d
+; CHECK-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ushr v3.4s, v0.4s, #16
+; CHECK-NEXT: add v2.4s, v0.4s, v2.4s
+; CHECK-NEXT: and v1.16b, v3.16b, v1.16b
+; CHECK-NEXT: fcmeq v3.4s, v0.4s, v0.4s
+; CHECK-NEXT: orr v0.4s, #64, lsl #16
+; CHECK-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NEXT: bit v0.16b, v1.16b, v3.16b
+; CHECK-NEXT: shrn v0.4h, v0.4s, #16
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <3 x i64> %a to <3 x bfloat>
+ ret <3 x bfloat> %c
+}
+
+define <3 x bfloat> @utofp_v3i64_v3bf16(<3 x i64> %a) {
+; CHECK-LABEL: utofp_v3i64_v3bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: ucvtf v1.2d, v2.2d
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ushr v3.4s, v0.4s, #16
+; CHECK-NEXT: add v2.4s, v0.4s, v2.4s
+; CHECK-NEXT: and v1.16b, v3.16b, v1.16b
+; CHECK-NEXT: fcmeq v3.4s, v0.4s, v0.4s
+; CHECK-NEXT: orr v0.4s, #64, lsl #16
+; CHECK-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NEXT: bit v0.16b, v1.16b, v3.16b
+; CHECK-NEXT: shrn v0.4h, v0.4s, #16
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <3 x i64> %a to <3 x bfloat>
+ ret <3 x bfloat> %c
+}
+
+define <4 x bfloat> @stofp_v4i64_v4bf16(<4 x i64> %a) {
+; CHECK-LABEL: stofp_v4i64_v4bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: scvtf v0.2d, v0.2d
+; CHECK-NEXT: scvtf v1.2d, v1.2d
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ushr v3.4s, v0.4s, #16
+; CHECK-NEXT: add v2.4s, v0.4s, v2.4s
+; CHECK-NEXT: and v1.16b, v3.16b, v1.16b
+; CHECK-NEXT: fcmeq v3.4s, v0.4s, v0.4s
+; CHECK-NEXT: orr v0.4s, #64, lsl #16
+; CHECK-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NEXT: bit v0.16b, v1.16b, v3.16b
+; CHECK-NEXT: shrn v0.4h, v0.4s, #16
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <4 x i64> %a to <4 x bfloat>
+ ret <4 x bfloat> %c
+}
+
+define <4 x bfloat> @utofp_v4i64_v4bf16(<4 x i64> %a) {
+; CHECK-LABEL: utofp_v4i64_v4bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-NEXT: ucvtf v1.2d, v1.2d
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ushr v3.4s, v0.4s, #16
+; CHECK-NEXT: add v2.4s, v0.4s, v2.4s
+; CHECK-NEXT: and v1.16b, v3.16b, v1.16b
+; CHECK-NEXT: fcmeq v3.4s, v0.4s, v0.4s
+; CHECK-NEXT: orr v0.4s, #64, lsl #16
+; CHECK-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NEXT: bit v0.16b, v1.16b, v3.16b
+; CHECK-NEXT: shrn v0.4h, v0.4s, #16
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <4 x i64> %a to <4 x bfloat>
+ ret <4 x bfloat> %c
+}
+
+define <8 x bfloat> @stofp_v8i64_v8bf16(<8 x i64> %a) {
+; CHECK-LABEL: stofp_v8i64_v8bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: scvtf v2.2d, v2.2d
+; CHECK-NEXT: scvtf v0.2d, v0.2d
+; CHECK-NEXT: scvtf v3.2d, v3.2d
+; CHECK-NEXT: scvtf v1.2d, v1.2d
+; CHECK-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: movi v3.4s, #127, msl #8
+; CHECK-NEXT: ushr v4.4s, v2.4s, #16
+; CHECK-NEXT: ushr v5.4s, v0.4s, #16
+; CHECK-NEXT: add v6.4s, v2.4s, v3.4s
+; CHECK-NEXT: add v3.4s, v0.4s, v3.4s
+; CHECK-NEXT: and v4.16b, v4.16b, v1.16b
+; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
+; CHECK-NEXT: fcmeq v5.4s, v2.4s, v2.4s
+; CHECK-NEXT: orr v2.4s, #64, lsl #16
+; CHECK-NEXT: add v4.4s, v4.4s, v6.4s
+; CHECK-NEXT: fcmeq v6.4s, v0.4s, v0.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v3.4s
+; CHECK-NEXT: orr v0.4s, #64, lsl #16
+; CHECK-NEXT: bit v2.16b, v4.16b, v5.16b
+; CHECK-NEXT: bit v0.16b, v1.16b, v6.16b
+; CHECK-NEXT: uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <8 x i64> %a to <8 x bfloat>
+ ret <8 x bfloat> %c
+}
+
+define <8 x bfloat> @utofp_v8i64_v8bf16(<8 x i64> %a) {
+; CHECK-LABEL: utofp_v8i64_v8bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ucvtf v2.2d, v2.2d
+; CHECK-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-NEXT: ucvtf v3.2d, v3.2d
+; CHECK-NEXT: ucvtf v1.2d, v1.2d
+; CHECK-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: movi v3.4s, #127, msl #8
+; CHECK-NEXT: ushr v4.4s, v2.4s, #16
+; CHECK-NEXT: ushr v5.4s, v0.4s, #16
+; CHECK-NEXT: add v6.4s, v2.4s, v3.4s
+; CHECK-NEXT: add v3.4s, v0.4s, v3.4s
+; CHECK-NEXT: and v4.16b, v4.16b, v1.16b
+; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
+; CHECK-NEXT: fcmeq v5.4s, v2.4s, v2.4s
+; CHECK-NEXT: orr v2.4s, #64, lsl #16
+; CHECK-NEXT: add v4.4s, v4.4s, v6.4s
+; CHECK-NEXT: fcmeq v6.4s, v0.4s, v0.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v3.4s
+; CHECK-NEXT: orr v0.4s, #64, lsl #16
+; CHECK-NEXT: bit v2.16b, v4.16b, v5.16b
+; CHECK-NEXT: bit v0.16b, v1.16b, v6.16b
+; CHECK-NEXT: uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <8 x i64> %a to <8 x bfloat>
+ ret <8 x bfloat> %c
+}
+
+define <16 x bfloat> @stofp_v16i64_v16bf16(<16 x i64> %a) {
+; CHECK-LABEL: stofp_v16i64_v16bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: scvtf v0.2d, v0.2d
+; CHECK-NEXT: scvtf v2.2d, v2.2d
+; CHECK-NEXT: scvtf v6.2d, v6.2d
+; CHECK-NEXT: scvtf v4.2d, v4.2d
+; CHECK-NEXT: scvtf v1.2d, v1.2d
+; CHECK-NEXT: scvtf v3.2d, v3.2d
+; CHECK-NEXT: scvtf v7.2d, v7.2d
+; CHECK-NEXT: scvtf v5.2d, v5.2d
+; CHECK-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-NEXT: fcvtn v6.2s, v6.2d
+; CHECK-NEXT: fcvtn v4.2s, v4.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-NEXT: fcvtn2 v6.4s, v7.2d
+; CHECK-NEXT: fcvtn2 v4.4s, v5.2d
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: movi v3.4s, #127, msl #8
+; CHECK-NEXT: ushr v7.4s, v0.4s, #16
+; CHECK-NEXT: ushr v5.4s, v2.4s, #16
+; CHECK-NEXT: ushr v16.4s, v6.4s, #16
+; CHECK-NEXT: ushr v17.4s, v4.4s, #16
+; CHECK-NEXT: add v19.4s, v0.4s, v3.4s
+; CHECK-NEXT: add v18.4s, v2.4s, v3.4s
+; CHECK-NEXT: add v20.4s, v6.4s, v3.4s
+; CHECK-NEXT: add v3.4s, v4.4s, v3.4s
+; CHECK-NEXT: and v7.16b, v7.16b, v1.16b
+; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
+; CHECK-NEXT: and v16.16b, v16.16b, v1.16b
+; CHECK-NEXT: and v1.16b, v17.16b, v1.16b
+; CHECK-NEXT: fcmeq v17.4s, v2.4s, v2.4s
+; CHECK-NEXT: orr v2.4s, #64, lsl #16
+; CHECK-NEXT: add v7.4s, v7.4s, v19.4s
+; CHECK-NEXT: fcmeq v19.4s, v6.4s, v6.4s
+; CHECK-NEXT: add v5.4s, v5.4s, v18.4s
+; CHECK-NEXT: fcmeq v18.4s, v0.4s, v0.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v3.4s
+; CHECK-NEXT: fcmeq v3.4s, v4.4s, v4.4s
+; CHECK-NEXT: add v16.4s, v16.4s, v20.4s
+; CHECK-NEXT: orr v0.4s, #64, lsl #16
+; CHECK-NEXT: orr v6.4s, #64, lsl #16
+; CHECK-NEXT: orr v4.4s, #64, lsl #16
+; CHECK-NEXT: bit v2.16b, v5.16b, v17.16b
+; CHECK-NEXT: mov v5.16b, v19.16b
+; CHECK-NEXT: bit v0.16b, v7.16b, v18.16b
+; CHECK-NEXT: bif v1.16b, v4.16b, v3.16b
+; CHECK-NEXT: bsl v5.16b, v16.16b, v6.16b
+; CHECK-NEXT: uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT: uzp2 v1.8h, v1.8h, v5.8h
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <16 x i64> %a to <16 x bfloat>
+ ret <16 x bfloat> %c
+}
+
+define <16 x bfloat> @utofp_v16i64_v16bf16(<16 x i64> %a) {
+; CHECK-LABEL: utofp_v16i64_v16bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-NEXT: ucvtf v2.2d, v2.2d
+; CHECK-NEXT: ucvtf v6.2d, v6.2d
+; CHECK-NEXT: ucvtf v4.2d, v4.2d
+; CHECK-NEXT: ucvtf v1.2d, v1.2d
+; CHECK-NEXT: ucvtf v3.2d, v3.2d
+; CHECK-NEXT: ucvtf v7.2d, v7.2d
+; CHECK-NEXT: ucvtf v5.2d, v5.2d
+; CHECK-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-NEXT: fcvtn v6.2s, v6.2d
+; CHECK-NEXT: fcvtn v4.2s, v4.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-NEXT: fcvtn2 v6.4s, v7.2d
+; CHECK-NEXT: fcvtn2 v4.4s, v5.2d
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: movi v3.4s, #127, msl #8
+; CHECK-NEXT: ushr v7.4s, v0.4s, #16
+; CHECK-NEXT: ushr v5.4s, v2.4s, #16
+; CHECK-NEXT: ushr v16.4s, v6.4s, #16
+; CHECK-NEXT: ushr v17.4s, v4.4s, #16
+; CHECK-NEXT: add v19.4s, v0.4s, v3.4s
+; CHECK-NEXT: add v18.4s, v2.4s, v3.4s
+; CHECK-NEXT: add v20.4s, v6.4s, v3.4s
+; CHECK-NEXT: add v3.4s, v4.4s, v3.4s
+; CHECK-NEXT: and v7.16b, v7.16b, v1.16b
+; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
+; CHECK-NEXT: and v16.16b, v16.16b, v1.16b
+; CHECK-NEXT: and v1.16b, v17.16b, v1.16b
+; CHECK-NEXT: fcmeq v17.4s, v2.4s, v2.4s
+; CHECK-NEXT: orr v2.4s, #64, lsl #16
+; CHECK-NEXT: add v7.4s, v7.4s, v19.4s
+; CHECK-NEXT: fcmeq v19.4s, v6.4s, v6.4s
+; CHECK-NEXT: add v5.4s, v5.4s, v18.4s
+; CHECK-NEXT: fcmeq v18.4s, v0.4s, v0.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v3.4s
+; CHECK-NEXT: fcmeq v3.4s, v4.4s, v4.4s
+; CHECK-NEXT: add v16.4s, v16.4s, v20.4s
+; CHECK-NEXT: orr v0.4s, #64, lsl #16
+; CHECK-NEXT: orr v6.4s, #64, lsl #16
+; CHECK-NEXT: orr v4.4s, #64, lsl #16
+; CHECK-NEXT: bit v2.16b, v5.16b, v17.16b
+; CHECK-NEXT: mov v5.16b, v19.16b
+; CHECK-NEXT: bit v0.16b, v7.16b, v18.16b
+; CHECK-NEXT: bif v1.16b, v4.16b, v3.16b
+; CHECK-NEXT: bsl v5.16b, v16.16b, v6.16b
+; CHECK-NEXT: uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT: uzp2 v1.8h, v1.8h, v5.8h
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <16 x i64> %a to <16 x bfloat>
+ ret <16 x bfloat> %c
+}
+
+define <32 x bfloat> @stofp_v32i64_v32bf16(<32 x i64> %a) {
+; CHECK-LABEL: stofp_v32i64_v32bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: scvtf v17.2d, v2.2d
+; CHECK-NEXT: scvtf v18.2d, v0.2d
+; CHECK-NEXT: scvtf v19.2d, v3.2d
+; CHECK-NEXT: scvtf v3.2d, v6.2d
+; CHECK-NEXT: ldp q21, q20, [sp, #32]
+; CHECK-NEXT: scvtf v4.2d, v4.2d
+; CHECK-NEXT: scvtf v6.2d, v7.2d
+; CHECK-NEXT: scvtf v5.2d, v5.2d
+; CHECK-NEXT: ldp q24, q23, [sp, #64]
+; CHECK-NEXT: movi v16.4s, #1
+; CHECK-NEXT: fcvtn v0.2s, v17.2d
+; CHECK-NEXT: scvtf v17.2d, v1.2d
+; CHECK-NEXT: fcvtn v1.2s, v18.2d
+; CHECK-NEXT: fcvtn v3.2s, v3.2d
+; CHECK-NEXT: ldp q18, q7, [sp]
+; CHECK-NEXT: scvtf v21.2d, v21.2d
+; CHECK-NEXT: fcvtn v4.2s, v4.2d
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: scvtf v20.2d, v20.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v19.2d
+; CHECK-NEXT: ldp q22, q19, [sp, #96]
+; CHECK-NEXT: fcvtn2 v1.4s, v17.2d
+; CHECK-NEXT: fcvtn2 v3.4s, v6.2d
+; CHECK-NEXT: scvtf v18.2d, v18.2d
+; CHECK-NEXT: scvtf v17.2d, v24.2d
+; CHECK-NEXT: fcvtn v6.2s, v21.2d
+; CHECK-NEXT: fcvtn2 v4.4s, v5.2d
+; CHECK-NEXT: scvtf v22.2d, v22.2d
+; CHECK-NEXT: scvtf v21.2d, v23.2d
+; CHECK-NEXT: scvtf v7.2d, v7.2d
+; CHECK-NEXT: ushr v24.4s, v0.4s, #16
+; CHECK-NEXT: add v5.4s, v0.4s, v2.4s
+; CHECK-NEXT: scvtf v19.2d, v19.2d
+; CHECK-NEXT: ushr v23.4s, v1.4s, #16
+; CHECK-NEXT: ushr v25.4s, v3.4s, #16
+; CHECK-NEXT: fcvtn v18.2s, v18.2d
+; CHECK-NEXT: fcvtn2 v6.4s, v20.2d
+; CHECK-NEXT: add v26.4s, v1.4s, v2.4s
+; CHECK-NEXT: fcvtn v17.2s, v17.2d
+; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
+; CHECK-NEXT: fcvtn v22.2s, v22.2d
+; CHECK-NEXT: fcmeq v20.4s, v0.4s, v0.4s
+; CHECK-NEXT: and v23.16b, v23.16b, v16.16b
+; CHECK-NEXT: orr v0.4s, #64, lsl #16
+; CHECK-NEXT: fcmeq v27.4s, v3.4s, v3.4s
+; CHECK-NEXT: fcvtn2 v18.4s, v7.2d
+; CHECK-NEXT: add v7.4s, v3.4s, v2.4s
+; CHECK-NEXT: orr v3.4s, #64, lsl #16
+; CHECK-NEXT: add v5.4s, v24.4s, v5.4s
+; CHECK-NEXT: and v24.16b, v25.16b, v16.16b
+; CHECK-NEXT: ushr v25.4s, v4.4s, #16
+; CHECK-NEXT: fcvtn2 v22.4s, v19.2d
+; CHECK-NEXT: add v19.4s, v23.4s, v26.4s
+; CHECK-NEXT: ushr v26.4s, v6.4s, #16
+; CHECK-NEXT: fcvtn2 v17.4s, v21.2d
+; CHECK-NEXT: fcmeq v21.4s, v1.4s, v1.4s
+; CHECK-NEXT: orr v1.4s, #64, lsl #16
+; CHECK-NEXT: and v23.16b, v25.16b, v16.16b
+; CHECK-NEXT: add v25.4s, v4.4s, v2.4s
+; CHECK-NEXT: add v7.4s, v24.4s, v7.4s
+; CHECK-NEXT: ushr v24.4s, v18.4s, #16
+; CHECK-NEXT: add v30.4s, v18.4s, v2.4s
+; CHECK-NEXT: bit v0.16b, v5.16b, v20.16b
+; CHECK-NEXT: ushr v28.4s, v22.4s, #16
+; CHECK-NEXT: add v31.4s, v22.4s, v2.4s
+; CHECK-NEXT: add v23.4s, v23.4s, v25.4s
+; CHECK-NEXT: and v25.16b, v26.16b, v16.16b
+; CHECK-NEXT: add v26.4s, v6.4s, v2.4s
+; CHECK-NEXT: ushr v29.4s, v17.4s, #16
+; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
+; CHECK-NEXT: add v2.4s, v17.4s, v2.4s
+; CHECK-NEXT: and v28.16b, v28.16b, v16.16b
+; CHECK-NEXT: bit v3.16b, v7.16b, v27.16b
+; CHECK-NEXT: bit v1.16b, v19.16b, v21.16b
+; CHECK-NEXT: add v25.4s, v25.4s, v26.4s
+; CHECK-NEXT: fcmeq v26.4s, v6.4s, v6.4s
+; CHECK-NEXT: orr v6.4s, #64, lsl #16
+; CHECK-NEXT: and v16.16b, v29.16b, v16.16b
+; CHECK-NEXT: add v24.4s, v24.4s, v30.4s
+; CHECK-NEXT: fcmeq v30.4s, v18.4s, v18.4s
+; CHECK-NEXT: add v28.4s, v28.4s, v31.4s
+; CHECK-NEXT: fcmeq v31.4s, v22.4s, v22.4s
+; CHECK-NEXT: fcmeq v29.4s, v4.4s, v4.4s
+; CHECK-NEXT: orr v4.4s, #64, lsl #16
+; CHECK-NEXT: orr v18.4s, #64, lsl #16
+; CHECK-NEXT: orr v22.4s, #64, lsl #16
+; CHECK-NEXT: mov v5.16b, v26.16b
+; CHECK-NEXT: add v2.4s, v16.4s, v2.4s
+; CHECK-NEXT: fcmeq v16.4s, v17.4s, v17.4s
+; CHECK-NEXT: orr v17.4s, #64, lsl #16
+; CHECK-NEXT: uzp2 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: mov v7.16b, v31.16b
+; CHECK-NEXT: bit v4.16b, v23.16b, v29.16b
+; CHECK-NEXT: bsl v5.16b, v25.16b, v6.16b
+; CHECK-NEXT: mov v6.16b, v30.16b
+; CHECK-NEXT: bsl v16.16b, v2.16b, v17.16b
+; CHECK-NEXT: bsl v7.16b, v28.16b, v22.16b
+; CHECK-NEXT: bsl v6.16b, v24.16b, v18.16b
+; CHECK-NEXT: uzp2 v1.8h, v4.8h, v3.8h
+; CHECK-NEXT: uzp2 v3.8h, v16.8h, v7.8h
+; CHECK-NEXT: uzp2 v2.8h, v6.8h, v5.8h
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <32 x i64> %a to <32 x bfloat>
+ ret <32 x bfloat> %c
+}
+
+define <32 x bfloat> @utofp_v32i64_v32bf16(<32 x i64> %a) {
+; CHECK-LABEL: utofp_v32i64_v32bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ucvtf v17.2d, v2.2d
+; CHECK-NEXT: ucvtf v18.2d, v0.2d
+; CHECK-NEXT: ucvtf v19.2d, v3.2d
+; CHECK-NEXT: ucvtf v3.2d, v6.2d
+; CHECK-NEXT: ldp q21, q20, [sp, #32]
+; CHECK-NEXT: ucvtf v4.2d, v4.2d
+; CHECK-NEXT: ucvtf v6.2d, v7.2d
+; CHECK-NEXT: ucvtf v5.2d, v5.2d
+; CHECK-NEXT: ldp q24, q23, [sp, #64]
+; CHECK-NEXT: movi v16.4s, #1
+; CHECK-NEXT: fcvtn v0.2s, v17.2d
+; CHECK-NEXT: ucvtf v17.2d, v1.2d
+; CHECK-NEXT: fcvtn v1.2s, v18.2d
+; CHECK-NEXT: fcvtn v3.2s, v3.2d
+; CHECK-NEXT: ldp q18, q7, [sp]
+; CHECK-NEXT: ucvtf v21.2d, v21.2d
+; CHECK-NEXT: fcvtn v4.2s, v4.2d
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: ucvtf v20.2d, v20.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v19.2d
+; CHECK-NEXT: ldp q22, q19, [sp, #96]
+; CHECK-NEXT: fcvtn2 v1.4s, v17.2d
+; CHECK-NEXT: fcvtn2 v3.4s, v6.2d
+; CHECK-NEXT: ucvtf v18.2d, v18.2d
+; CHECK-NEXT: ucvtf v17.2d, v24.2d
+; CHECK-NEXT: fcvtn v6.2s, v21.2d
+; CHECK-NEXT: fcvtn2 v4.4s, v5.2d
+; CHECK-NEXT: ucvtf v22.2d, v22.2d
+; CHECK-NEXT: ucvtf v21.2d, v23.2d
+; CHECK-NEXT: ucvtf v7.2d, v7.2d
+; CHECK-NEXT: ushr v24.4s, v0.4s, #16
+; CHECK-NEXT: add v5.4s, v0.4s, v2.4s
+; CHECK-NEXT: ucvtf v19.2d, v19.2d
+; CHECK-NEXT: ushr v23.4s, v1.4s, #16
+; CHECK-NEXT: ushr v25.4s, v3.4s, #16
+; CHECK-NEXT: fcvtn v18.2s, v18.2d
+; CHECK-NEXT: fcvtn2 v6.4s, v20.2d
+; CHECK-NEXT: add v26.4s, v1.4s, v2.4s
+; CHECK-NEXT: fcvtn v17.2s, v17.2d
+; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
+; CHECK-NEXT: fcvtn v22.2s, v22.2d
+; CHECK-NEXT: fcmeq v20.4s, v0.4s, v0.4s
+; CHECK-NEXT: and v23.16b, v23.16b, v16.16b
+; CHECK-NEXT: orr v0.4s, #64, lsl #16
+; CHECK-NEXT: fcmeq v27.4s, v3.4s, v3.4s
+; CHECK-NEXT: fcvtn2 v18.4s, v7.2d
+; CHECK-NEXT: add v7.4s, v3.4s, v2.4s
+; CHECK-NEXT: orr v3.4s, #64, lsl #16
+; CHECK-NEXT: add v5.4s, v24.4s, v5.4s
+; CHECK-NEXT: and v24.16b, v25.16b, v16.16b
+; CHECK-NEXT: ushr v25.4s, v4.4s, #16
+; CHECK-NEXT: fcvtn2 v22.4s, v19.2d
+; CHECK-NEXT: add v19.4s, v23.4s, v26.4s
+; CHECK-NEXT: ushr v26.4s, v6.4s, #16
+; CHECK-NEXT: fcvtn2 v17.4s, v21.2d
+; CHECK-NEXT: fcmeq v21.4s, v1.4s, v1.4s
+; CHECK-NEXT: orr v1.4s, #64, lsl #16
+; CHECK-NEXT: and v23.16b, v25.16b, v16.16b
+; CHECK-NEXT: add v25.4s, v4.4s, v2.4s
+; CHECK-NEXT: add v7.4s, v24.4s, v7.4s
+; CHECK-NEXT: ushr v24.4s, v18.4s, #16
+; CHECK-NEXT: add v30.4s, v18.4s, v2.4s
+; CHECK-NEXT: bit v0.16b, v5.16b, v20.16b
+; CHECK-NEXT: ushr v28.4s, v22.4s, #16
+; CHECK-NEXT: add v31.4s, v22.4s, v2.4s
+; CHECK-NEXT: add v23.4s, v23.4s, v25.4s
+; CHECK-NEXT: and v25.16b, v26.16b, v16.16b
+; CHECK-NEXT: add v26.4s, v6.4s, v2.4s
+; CHECK-NEXT: ushr v29.4s, v17.4s, #16
+; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
+; CHECK-NEXT: add v2.4s, v17.4s, v2.4s
+; CHECK-NEXT: and v28.16b, v28.16b, v16.16b
+; CHECK-NEXT: bit v3.16b, v7.16b, v27.16b
+; CHECK-NEXT: bit v1.16b, v19.16b, v21.16b
+; CHECK-NEXT: add v25.4s, v25.4s, v26.4s
+; CHECK-NEXT: fcmeq v26.4s, v6.4s, v6.4s
+; CHECK-NEXT: orr v6.4s, #64, lsl #16
+; CHECK-NEXT: and v16.16b, v29.16b, v16.16b
+; CHECK-NEXT: add v24.4s, v24.4s, v30.4s
+; CHECK-NEXT: fcmeq v30.4s, v18.4s, v18.4s
+; CHECK-NEXT: add v28.4s, v28.4s, v31.4s
+; CHECK-NEXT: fcmeq v31.4s, v22.4s, v22.4s
+; CHECK-NEXT: fcmeq v29.4s, v4.4s, v4.4s
+; CHECK-NEXT: orr v4.4s, #64, lsl #16
+; CHECK-NEXT: orr v18.4s, #64, lsl #16
+; CHECK-NEXT: orr v22.4s, #64, lsl #16
+; CHECK-NEXT: mov v5.16b, v26.16b
+; CHECK-NEXT: add v2.4s, v16.4s, v2.4s
+; CHECK-NEXT: fcmeq v16.4s, v17.4s, v17.4s
+; CHECK-NEXT: orr v17.4s, #64, lsl #16
+; CHECK-NEXT: uzp2 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: mov v7.16b, v31.16b
+; CHECK-NEXT: bit v4.16b, v23.16b, v29.16b
+; CHECK-NEXT: bsl v5.16b, v25.16b, v6.16b
+; CHECK-NEXT: mov v6.16b, v30.16b
+; CHECK-NEXT: bsl v16.16b, v2.16b, v17.16b
+; CHECK-NEXT: bsl v7.16b, v28.16b, v22.16b
+; CHECK-NEXT: bsl v6.16b, v24.16b, v18.16b
+; CHECK-NEXT: uzp2 v1.8h, v4.8h, v3.8h
+; CHECK-NEXT: uzp2 v3.8h, v16.8h, v7.8h
+; CHECK-NEXT: uzp2 v2.8h, v6.8h, v5.8h
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <32 x i64> %a to <32 x bfloat>
+ ret <32 x bfloat> %c
+}
+
+define <2 x bfloat> @stofp_v2i32_v2bf16(<2 x i32> %a) {
+; CHECK-LABEL: stofp_v2i32_v2bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <2 x i32> %a to <2 x bfloat>
+ ret <2 x bfloat> %c
+}
+
+define <2 x bfloat> @utofp_v2i32_v2bf16(<2 x i32> %a) {
+; CHECK-LABEL: utofp_v2i32_v2bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <2 x i32> %a to <2 x bfloat>
+ ret <2 x bfloat> %c
+}
+
+define <3 x bfloat> @stofp_v3i32_v3bf16(<3 x i32> %a) {
+; CHECK-LABEL: stofp_v3i32_v3bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <3 x i32> %a to <3 x bfloat>
+ ret <3 x bfloat> %c
+}
+
+define <3 x bfloat> @utofp_v3i32_v3bf16(<3 x i32> %a) {
+; CHECK-LABEL: utofp_v3i32_v3bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <3 x i32> %a to <3 x bfloat>
+ ret <3 x bfloat> %c
+}
+
+define <4 x bfloat> @stofp_v4i32_v4bf16(<4 x i32> %a) {
+; CHECK-LABEL: stofp_v4i32_v4bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <4 x i32> %a to <4 x bfloat>
+ ret <4 x bfloat> %c
+}
+
+define <4 x bfloat> @utofp_v4i32_v4bf16(<4 x i32> %a) {
+; CHECK-LABEL: utofp_v4i32_v4bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <4 x i32> %a to <4 x bfloat>
+ ret <4 x bfloat> %c
+}
+
+define <8 x bfloat> @stofp_v8i32_v8bf16(<8 x i32> %a) {
+; CHECK-LABEL: stofp_v8i32_v8bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: movi v2.4s, #1
+; CHECK-NEXT: scvtf v1.4s, v1.4s
+; CHECK-NEXT: movi v5.4s, #127, msl #8
+; CHECK-NEXT: ushr v3.4s, v0.4s, #16
+; CHECK-NEXT: ushr v4.4s, v1.4s, #16
+; CHECK-NEXT: and v3.16b, v3.16b, v2.16b
+; CHECK-NEXT: and v2.16b, v4.16b, v2.16b
+; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT: add v1.4s, v2.4s, v1.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v5.4s
+; CHECK-NEXT: addhn2 v0.8h, v1.4s, v5.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <8 x i32> %a to <8 x bfloat>
+ ret <8 x bfloat> %c
+}
+
+define <8 x bfloat> @utofp_v8i32_v8bf16(<8 x i32> %a) {
+; CHECK-LABEL: utofp_v8i32_v8bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: movi v2.4s, #1
+; CHECK-NEXT: ucvtf v1.4s, v1.4s
+; CHECK-NEXT: movi v5.4s, #127, msl #8
+; CHECK-NEXT: ushr v3.4s, v0.4s, #16
+; CHECK-NEXT: ushr v4.4s, v1.4s, #16
+; CHECK-NEXT: and v3.16b, v3.16b, v2.16b
+; CHECK-NEXT: and v2.16b, v4.16b, v2.16b
+; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT: add v1.4s, v2.4s, v1.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v5.4s
+; CHECK-NEXT: addhn2 v0.8h, v1.4s, v5.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <8 x i32> %a to <8 x bfloat>
+ ret <8 x bfloat> %c
+}
+
+define <16 x bfloat> @stofp_v16i32_v16bf16(<16 x i32> %a) {
+; CHECK-LABEL: stofp_v16i32_v16bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: scvtf v2.4s, v2.4s
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: scvtf v4.4s, v1.4s
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: scvtf v3.4s, v3.4s
+; CHECK-NEXT: movi v17.4s, #127, msl #8
+; CHECK-NEXT: ushr v5.4s, v0.4s, #16
+; CHECK-NEXT: ushr v6.4s, v2.4s, #16
+; CHECK-NEXT: ushr v7.4s, v4.4s, #16
+; CHECK-NEXT: ushr v16.4s, v3.4s, #16
+; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
+; CHECK-NEXT: and v6.16b, v6.16b, v1.16b
+; CHECK-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-NEXT: add v2.4s, v6.4s, v2.4s
+; CHECK-NEXT: and v5.16b, v7.16b, v1.16b
+; CHECK-NEXT: and v6.16b, v16.16b, v1.16b
+; CHECK-NEXT: addhn v0.4h, v0.4s, v17.4s
+; CHECK-NEXT: addhn v1.4h, v2.4s, v17.4s
+; CHECK-NEXT: add v2.4s, v5.4s, v4.4s
+; CHECK-NEXT: add v3.4s, v6.4s, v3.4s
+; CHECK-NEXT: addhn2 v0.8h, v2.4s, v17.4s
+; CHECK-NEXT: addhn2 v1.8h, v3.4s, v17.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <16 x i32> %a to <16 x bfloat>
+ ret <16 x bfloat> %c
+}
+
+define <16 x bfloat> @utofp_v16i32_v16bf16(<16 x i32> %a) {
+; CHECK-LABEL: utofp_v16i32_v16bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ucvtf v2.4s, v2.4s
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: ucvtf v4.4s, v1.4s
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ucvtf v3.4s, v3.4s
+; CHECK-NEXT: movi v17.4s, #127, msl #8
+; CHECK-NEXT: ushr v5.4s, v0.4s, #16
+; CHECK-NEXT: ushr v6.4s, v2.4s, #16
+; CHECK-NEXT: ushr v7.4s, v4.4s, #16
+; CHECK-NEXT: ushr v16.4s, v3.4s, #16
+; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
+; CHECK-NEXT: and v6.16b, v6.16b, v1.16b
+; CHECK-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-NEXT: add v2.4s, v6.4s, v2.4s
+; CHECK-NEXT: and v5.16b, v7.16b, v1.16b
+; CHECK-NEXT: and v6.16b, v16.16b, v1.16b
+; CHECK-NEXT: addhn v0.4h, v0.4s, v17.4s
+; CHECK-NEXT: addhn v1.4h, v2.4s, v17.4s
+; CHECK-NEXT: add v2.4s, v5.4s, v4.4s
+; CHECK-NEXT: add v3.4s, v6.4s, v3.4s
+; CHECK-NEXT: addhn2 v0.8h, v2.4s, v17.4s
+; CHECK-NEXT: addhn2 v1.8h, v3.4s, v17.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <16 x i32> %a to <16 x bfloat>
+ ret <16 x bfloat> %c
+}
+
+define <32 x bfloat> @stofp_v32i32_v32bf16(<32 x i32> %a) {
+; CHECK-LABEL: stofp_v32i32_v32bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: scvtf v2.4s, v2.4s
+; CHECK-NEXT: scvtf v4.4s, v4.4s
+; CHECK-NEXT: scvtf v6.4s, v6.4s
+; CHECK-NEXT: movi v16.4s, #1
+; CHECK-NEXT: scvtf v1.4s, v1.4s
+; CHECK-NEXT: scvtf v17.4s, v3.4s
+; CHECK-NEXT: scvtf v5.4s, v5.4s
+; CHECK-NEXT: scvtf v7.4s, v7.4s
+; CHECK-NEXT: movi v21.4s, #127, msl #8
+; CHECK-NEXT: ushr v3.4s, v0.4s, #16
+; CHECK-NEXT: ushr v18.4s, v2.4s, #16
+; CHECK-NEXT: ushr v19.4s, v4.4s, #16
+; CHECK-NEXT: ushr v20.4s, v6.4s, #16
+; CHECK-NEXT: ushr v22.4s, v1.4s, #16
+; CHECK-NEXT: ushr v23.4s, v17.4s, #16
+; CHECK-NEXT: ushr v24.4s, v5.4s, #16
+; CHECK-NEXT: ushr v25.4s, v7.4s, #16
+; CHECK-NEXT: and v3.16b, v3.16b, v16.16b
+; CHECK-NEXT: and v18.16b, v18.16b, v16.16b
+; CHECK-NEXT: and v19.16b, v19.16b, v16.16b
+; CHECK-NEXT: and v20.16b, v20.16b, v16.16b
+; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT: and v3.16b, v22.16b, v16.16b
+; CHECK-NEXT: add v2.4s, v18.4s, v2.4s
+; CHECK-NEXT: add v4.4s, v19.4s, v4.4s
+; CHECK-NEXT: add v6.4s, v20.4s, v6.4s
+; CHECK-NEXT: and v18.16b, v23.16b, v16.16b
+; CHECK-NEXT: and v19.16b, v24.16b, v16.16b
+; CHECK-NEXT: and v16.16b, v25.16b, v16.16b
+; CHECK-NEXT: add v20.4s, v3.4s, v1.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v21.4s
+; CHECK-NEXT: addhn v1.4h, v2.4s, v21.4s
+; CHECK-NEXT: addhn v2.4h, v4.4s, v21.4s
+; CHECK-NEXT: addhn v3.4h, v6.4s, v21.4s
+; CHECK-NEXT: add v4.4s, v18.4s, v17.4s
+; CHECK-NEXT: add v5.4s, v19.4s, v5.4s
+; CHECK-NEXT: add v6.4s, v16.4s, v7.4s
+; CHECK-NEXT: addhn2 v0.8h, v20.4s, v21.4s
+; CHECK-NEXT: addhn2 v1.8h, v4.4s, v21.4s
+; CHECK-NEXT: addhn2 v2.8h, v5.4s, v21.4s
+; CHECK-NEXT: addhn2 v3.8h, v6.4s, v21.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <32 x i32> %a to <32 x bfloat>
+ ret <32 x bfloat> %c
+}
+
+define <32 x bfloat> @utofp_v32i32_v32bf16(<32 x i32> %a) {
+; CHECK-LABEL: utofp_v32i32_v32bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: ucvtf v2.4s, v2.4s
+; CHECK-NEXT: ucvtf v4.4s, v4.4s
+; CHECK-NEXT: ucvtf v6.4s, v6.4s
+; CHECK-NEXT: movi v16.4s, #1
+; CHECK-NEXT: ucvtf v1.4s, v1.4s
+; CHECK-NEXT: ucvtf v17.4s, v3.4s
+; CHECK-NEXT: ucvtf v5.4s, v5.4s
+; CHECK-NEXT: ucvtf v7.4s, v7.4s
+; CHECK-NEXT: movi v21.4s, #127, msl #8
+; CHECK-NEXT: ushr v3.4s, v0.4s, #16
+; CHECK-NEXT: ushr v18.4s, v2.4s, #16
+; CHECK-NEXT: ushr v19.4s, v4.4s, #16
+; CHECK-NEXT: ushr v20.4s, v6.4s, #16
+; CHECK-NEXT: ushr v22.4s, v1.4s, #16
+; CHECK-NEXT: ushr v23.4s, v17.4s, #16
+; CHECK-NEXT: ushr v24.4s, v5.4s, #16
+; CHECK-NEXT: ushr v25.4s, v7.4s, #16
+; CHECK-NEXT: and v3.16b, v3.16b, v16.16b
+; CHECK-NEXT: and v18.16b, v18.16b, v16.16b
+; CHECK-NEXT: and v19.16b, v19.16b, v16.16b
+; CHECK-NEXT: and v20.16b, v20.16b, v16.16b
+; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT: and v3.16b, v22.16b, v16.16b
+; CHECK-NEXT: add v2.4s, v18.4s, v2.4s
+; CHECK-NEXT: add v4.4s, v19.4s, v4.4s
+; CHECK-NEXT: add v6.4s, v20.4s, v6.4s
+; CHECK-NEXT: and v18.16b, v23.16b, v16.16b
+; CHECK-NEXT: and v19.16b, v24.16b, v16.16b
+; CHECK-NEXT: and v16.16b, v25.16b, v16.16b
+; CHECK-NEXT: add v20.4s, v3.4s, v1.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v21.4s
+; CHECK-NEXT: addhn v1.4h, v2.4s, v21.4s
+; CHECK-NEXT: addhn v2.4h, v4.4s, v21.4s
+; CHECK-NEXT: addhn v3.4h, v6.4s, v21.4s
+; CHECK-NEXT: add v4.4s, v18.4s, v17.4s
+; CHECK-NEXT: add v5.4s, v19.4s, v5.4s
+; CHECK-NEXT: add v6.4s, v16.4s, v7.4s
+; CHECK-NEXT: addhn2 v0.8h, v20.4s, v21.4s
+; CHECK-NEXT: addhn2 v1.8h, v4.4s, v21.4s
+; CHECK-NEXT: addhn2 v2.8h, v5.4s, v21.4s
+; CHECK-NEXT: addhn2 v3.8h, v6.4s, v21.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <32 x i32> %a to <32 x bfloat>
+ ret <32 x bfloat> %c
+}
+
+define <2 x bfloat> @stofp_v2i16_v2bf16(<2 x i16> %a) {
+; CHECK-LABEL: stofp_v2i16_v2bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: uzp1 v0.4h, v0.4h, v0.4h
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <2 x i16> %a to <2 x bfloat>
+ ret <2 x bfloat> %c
+}
+
+define <2 x bfloat> @utofp_v2i16_v2bf16(<2 x i16> %a) {
+; CHECK-LABEL: utofp_v2i16_v2bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: uzp1 v0.4h, v0.4h, v0.4h
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <2 x i16> %a to <2 x bfloat>
+ ret <2 x bfloat> %c
+}
+
+define <3 x bfloat> @stofp_v3i16_v3bf16(<3 x i16> %a) {
+; CHECK-LABEL: stofp_v3i16_v3bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <3 x i16> %a to <3 x bfloat>
+ ret <3 x bfloat> %c
+}
+
+define <3 x bfloat> @utofp_v3i16_v3bf16(<3 x i16> %a) {
+; CHECK-LABEL: utofp_v3i16_v3bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <3 x i16> %a to <3 x bfloat>
+ ret <3 x bfloat> %c
+}
+
+define <4 x bfloat> @stofp_v4i16_v4bf16(<4 x i16> %a) {
+; CHECK-LABEL: stofp_v4i16_v4bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <4 x i16> %a to <4 x bfloat>
+ ret <4 x bfloat> %c
+}
+
+define <4 x bfloat> @utofp_v4i16_v4bf16(<4 x i16> %a) {
+; CHECK-LABEL: utofp_v4i16_v4bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <4 x i16> %a to <4 x bfloat>
+ ret <4 x bfloat> %c
+}
+
+define <8 x bfloat> @stofp_v8i16_v8bf16(<8 x i16> %a) {
+; CHECK-LABEL: stofp_v8i16_v8bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshll v2.4s, v0.4h, #0
+; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: movi v4.4s, #127, msl #8
+; CHECK-NEXT: scvtf v2.4s, v2.4s
+; CHECK-NEXT: scvtf v3.4s, v0.4s
+; CHECK-NEXT: ushr v0.4s, v2.4s, #16
+; CHECK-NEXT: ushr v5.4s, v3.4s, #16
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
+; CHECK-NEXT: add v0.4s, v0.4s, v4.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v4.4s
+; CHECK-NEXT: addhn v0.4h, v2.4s, v0.4s
+; CHECK-NEXT: addhn2 v0.8h, v3.4s, v1.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <8 x i16> %a to <8 x bfloat>
+ ret <8 x bfloat> %c
+}
+
+define <8 x bfloat> @utofp_v8i16_v8bf16(<8 x i16> %a) {
+; CHECK-LABEL: utofp_v8i16_v8bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ushll v2.4s, v0.4h, #0
+; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: movi v4.4s, #127, msl #8
+; CHECK-NEXT: ucvtf v2.4s, v2.4s
+; CHECK-NEXT: ucvtf v3.4s, v0.4s
+; CHECK-NEXT: ushr v0.4s, v2.4s, #16
+; CHECK-NEXT: ushr v5.4s, v3.4s, #16
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
+; CHECK-NEXT: add v0.4s, v0.4s, v4.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v4.4s
+; CHECK-NEXT: addhn v0.4h, v2.4s, v0.4s
+; CHECK-NEXT: addhn2 v0.8h, v3.4s, v1.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <8 x i16> %a to <8 x bfloat>
+ ret <8 x bfloat> %c
+}
+
+define <16 x bfloat> @stofp_v16i16_v16bf16(<16 x i16> %a) {
+; CHECK-LABEL: stofp_v16i16_v16bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshll v3.4s, v0.4h, #0
+; CHECK-NEXT: sshll v4.4s, v1.4h, #0
+; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: sshll2 v1.4s, v1.8h, #0
+; CHECK-NEXT: movi v2.4s, #1
+; CHECK-NEXT: movi v7.4s, #127, msl #8
+; CHECK-NEXT: scvtf v3.4s, v3.4s
+; CHECK-NEXT: scvtf v4.4s, v4.4s
+; CHECK-NEXT: scvtf v5.4s, v0.4s
+; CHECK-NEXT: scvtf v6.4s, v1.4s
+; CHECK-NEXT: ushr v0.4s, v3.4s, #16
+; CHECK-NEXT: ushr v1.4s, v4.4s, #16
+; CHECK-NEXT: ushr v16.4s, v5.4s, #16
+; CHECK-NEXT: ushr v17.4s, v6.4s, #16
+; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT: and v16.16b, v16.16b, v2.16b
+; CHECK-NEXT: and v2.16b, v17.16b, v2.16b
+; CHECK-NEXT: add v0.4s, v0.4s, v7.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v7.4s
+; CHECK-NEXT: add v2.4s, v2.4s, v7.4s
+; CHECK-NEXT: addhn v0.4h, v3.4s, v0.4s
+; CHECK-NEXT: addhn v1.4h, v4.4s, v1.4s
+; CHECK-NEXT: add v3.4s, v16.4s, v7.4s
+; CHECK-NEXT: addhn2 v0.8h, v5.4s, v3.4s
+; CHECK-NEXT: addhn2 v1.8h, v6.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <16 x i16> %a to <16 x bfloat>
+ ret <16 x bfloat> %c
+}
+
+define <16 x bfloat> @utofp_v16i16_v16bf16(<16 x i16> %a) {
+; CHECK-LABEL: utofp_v16i16_v16bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ushll v3.4s, v0.4h, #0
+; CHECK-NEXT: ushll v4.4s, v1.4h, #0
+; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: ushll2 v1.4s, v1.8h, #0
+; CHECK-NEXT: movi v2.4s, #1
+; CHECK-NEXT: movi v7.4s, #127, msl #8
+; CHECK-NEXT: ucvtf v3.4s, v3.4s
+; CHECK-NEXT: ucvtf v4.4s, v4.4s
+; CHECK-NEXT: ucvtf v5.4s, v0.4s
+; CHECK-NEXT: ucvtf v6.4s, v1.4s
+; CHECK-NEXT: ushr v0.4s, v3.4s, #16
+; CHECK-NEXT: ushr v1.4s, v4.4s, #16
+; CHECK-NEXT: ushr v16.4s, v5.4s, #16
+; CHECK-NEXT: ushr v17.4s, v6.4s, #16
+; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT: and v16.16b, v16.16b, v2.16b
+; CHECK-NEXT: and v2.16b, v17.16b, v2.16b
+; CHECK-NEXT: add v0.4s, v0.4s, v7.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v7.4s
+; CHECK-NEXT: add v2.4s, v2.4s, v7.4s
+; CHECK-NEXT: addhn v0.4h, v3.4s, v0.4s
+; CHECK-NEXT: addhn v1.4h, v4.4s, v1.4s
+; CHECK-NEXT: add v3.4s, v16.4s, v7.4s
+; CHECK-NEXT: addhn2 v0.8h, v5.4s, v3.4s
+; CHECK-NEXT: addhn2 v1.8h, v6.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <16 x i16> %a to <16 x bfloat>
+ ret <16 x bfloat> %c
+}
+
+define <32 x bfloat> @stofp_v32i16_v32bf16(<32 x i16> %a) {
+; CHECK-LABEL: stofp_v32i16_v32bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshll v4.4s, v1.4h, #0
+; CHECK-NEXT: sshll v5.4s, v0.4h, #0
+; CHECK-NEXT: sshll v6.4s, v2.4h, #0
+; CHECK-NEXT: sshll v7.4s, v3.4h, #0
+; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: sshll2 v1.4s, v1.8h, #0
+; CHECK-NEXT: sshll2 v2.4s, v2.8h, #0
+; CHECK-NEXT: sshll2 v3.4s, v3.8h, #0
+; CHECK-NEXT: movi v16.4s, #1
+; CHECK-NEXT: scvtf v5.4s, v5.4s
+; CHECK-NEXT: scvtf v4.4s, v4.4s
+; CHECK-NEXT: scvtf v6.4s, v6.4s
+; CHECK-NEXT: scvtf v7.4s, v7.4s
+; CHECK-NEXT: scvtf v17.4s, v0.4s
+; CHECK-NEXT: scvtf v18.4s, v1.4s
+; CHECK-NEXT: scvtf v19.4s, v2.4s
+; CHECK-NEXT: scvtf v20.4s, v3.4s
+; CHECK-NEXT: movi v21.4s, #127, msl #8
+; CHECK-NEXT: ushr v0.4s, v5.4s, #16
+; CHECK-NEXT: ushr v1.4s, v4.4s, #16
+; CHECK-NEXT: ushr v2.4s, v6.4s, #16
+; CHECK-NEXT: ushr v3.4s, v7.4s, #16
+; CHECK-NEXT: ushr v22.4s, v17.4s, #16
+; CHECK-NEXT: ushr v23.4s, v18.4s, #16
+; CHECK-NEXT: ushr v24.4s, v19.4s, #16
+; CHECK-NEXT: ushr v25.4s, v20.4s, #16
+; CHECK-NEXT: and v0.16b, v0.16b, v16.16b
+; CHECK-NEXT: and v1.16b, v1.16b, v16.16b
+; CHECK-NEXT: and v2.16b, v2.16b, v16.16b
+; CHECK-NEXT: and v3.16b, v3.16b, v16.16b
+; CHECK-NEXT: and v22.16b, v22.16b, v16.16b
+; CHECK-NEXT: and v23.16b, v23.16b, v16.16b
+; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
+; CHECK-NEXT: and v16.16b, v25.16b, v16.16b
+; CHECK-NEXT: add v0.4s, v0.4s, v21.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v21.4s
+; CHECK-NEXT: add v2.4s, v2.4s, v21.4s
+; CHECK-NEXT: add v3.4s, v3.4s, v21.4s
+; CHECK-NEXT: addhn v0.4h, v5.4s, v0.4s
+; CHECK-NEXT: addhn v1.4h, v4.4s, v1.4s
+; CHECK-NEXT: addhn v2.4h, v6.4s, v2.4s
+; CHECK-NEXT: addhn v3.4h, v7.4s, v3.4s
+; CHECK-NEXT: add v4.4s, v22.4s, v21.4s
+; CHECK-NEXT: add v5.4s, v23.4s, v21.4s
+; CHECK-NEXT: add v6.4s, v24.4s, v21.4s
+; CHECK-NEXT: add v7.4s, v16.4s, v21.4s
+; CHECK-NEXT: addhn2 v0.8h, v17.4s, v4.4s
+; CHECK-NEXT: addhn2 v1.8h, v18.4s, v5.4s
+; CHECK-NEXT: addhn2 v2.8h, v19.4s, v6.4s
+; CHECK-NEXT: addhn2 v3.8h, v20.4s, v7.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <32 x i16> %a to <32 x bfloat>
+ ret <32 x bfloat> %c
+}
+
+define <32 x bfloat> @utofp_v32i16_v32bf16(<32 x i16> %a) {
+; CHECK-LABEL: utofp_v32i16_v32bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ushll v4.4s, v1.4h, #0
+; CHECK-NEXT: ushll v5.4s, v0.4h, #0
+; CHECK-NEXT: ushll v6.4s, v2.4h, #0
+; CHECK-NEXT: ushll v7.4s, v3.4h, #0
+; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: ushll2 v1.4s, v1.8h, #0
+; CHECK-NEXT: ushll2 v2.4s, v2.8h, #0
+; CHECK-NEXT: ushll2 v3.4s, v3.8h, #0
+; CHECK-NEXT: movi v16.4s, #1
+; CHECK-NEXT: ucvtf v5.4s, v5.4s
+; CHECK-NEXT: ucvtf v4.4s, v4.4s
+; CHECK-NEXT: ucvtf v6.4s, v6.4s
+; CHECK-NEXT: ucvtf v7.4s, v7.4s
+; CHECK-NEXT: ucvtf v17.4s, v0.4s
+; CHECK-NEXT: ucvtf v18.4s, v1.4s
+; CHECK-NEXT: ucvtf v19.4s, v2.4s
+; CHECK-NEXT: ucvtf v20.4s, v3.4s
+; CHECK-NEXT: movi v21.4s, #127, msl #8
+; CHECK-NEXT: ushr v0.4s, v5.4s, #16
+; CHECK-NEXT: ushr v1.4s, v4.4s, #16
+; CHECK-NEXT: ushr v2.4s, v6.4s, #16
+; CHECK-NEXT: ushr v3.4s, v7.4s, #16
+; CHECK-NEXT: ushr v22.4s, v17.4s, #16
+; CHECK-NEXT: ushr v23.4s, v18.4s, #16
+; CHECK-NEXT: ushr v24.4s, v19.4s, #16
+; CHECK-NEXT: ushr v25.4s, v20.4s, #16
+; CHECK-NEXT: and v0.16b, v0.16b, v16.16b
+; CHECK-NEXT: and v1.16b, v1.16b, v16.16b
+; CHECK-NEXT: and v2.16b, v2.16b, v16.16b
+; CHECK-NEXT: and v3.16b, v3.16b, v16.16b
+; CHECK-NEXT: and v22.16b, v22.16b, v16.16b
+; CHECK-NEXT: and v23.16b, v23.16b, v16.16b
+; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
+; CHECK-NEXT: and v16.16b, v25.16b, v16.16b
+; CHECK-NEXT: add v0.4s, v0.4s, v21.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v21.4s
+; CHECK-NEXT: add v2.4s, v2.4s, v21.4s
+; CHECK-NEXT: add v3.4s, v3.4s, v21.4s
+; CHECK-NEXT: addhn v0.4h, v5.4s, v0.4s
+; CHECK-NEXT: addhn v1.4h, v4.4s, v1.4s
+; CHECK-NEXT: addhn v2.4h, v6.4s, v2.4s
+; CHECK-NEXT: addhn v3.4h, v7.4s, v3.4s
+; CHECK-NEXT: add v4.4s, v22.4s, v21.4s
+; CHECK-NEXT: add v5.4s, v23.4s, v21.4s
+; CHECK-NEXT: add v6.4s, v24.4s, v21.4s
+; CHECK-NEXT: add v7.4s, v16.4s, v21.4s
+; CHECK-NEXT: addhn2 v0.8h, v17.4s, v4.4s
+; CHECK-NEXT: addhn2 v1.8h, v18.4s, v5.4s
+; CHECK-NEXT: addhn2 v2.8h, v19.4s, v6.4s
+; CHECK-NEXT: addhn2 v3.8h, v20.4s, v7.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <32 x i16> %a to <32 x bfloat>
+ ret <32 x bfloat> %c
+}
+
+define <2 x bfloat> @stofp_v2i8_v2bf16(<2 x i8> %a) {
+; CHECK-LABEL: stofp_v2i8_v2bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: mov w9, v0.s[1]
+; CHECK-NEXT: fmov w10, s0
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: sxtb w10, w10
+; CHECK-NEXT: sxtb w9, w9
+; CHECK-NEXT: scvtf s1, w10
+; CHECK-NEXT: scvtf s0, w9
+; CHECK-NEXT: fmov w10, s1
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: ubfx w12, w10, #16, #1
+; CHECK-NEXT: ubfx w11, w9, #16, #1
+; CHECK-NEXT: add w9, w9, w8
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: add w8, w12, w8
+; CHECK-NEXT: add w9, w11, w9
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: lsr w9, w9, #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: fmov s1, w9
+; CHECK-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <2 x i8> %a to <2 x bfloat>
+ ret <2 x bfloat> %c
+}
+
+define <2 x bfloat> @utofp_v2i8_v2bf16(<2 x i8> %a) {
+; CHECK-LABEL: utofp_v2i8_v2bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: mov w9, v0.s[1]
+; CHECK-NEXT: fmov w10, s0
+; CHECK-NEXT: mov w8, #32767 // =0x7fff
+; CHECK-NEXT: and w10, w10, #0xff
+; CHECK-NEXT: and w9, w9, #0xff
+; CHECK-NEXT: ucvtf s1, w10
+; CHECK-NEXT: ucvtf s0, w9
+; CHECK-NEXT: fmov w10, s1
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: ubfx w12, w10, #16, #1
+; CHECK-NEXT: ubfx w11, w9, #16, #1
+; CHECK-NEXT: add w9, w9, w8
+; CHECK-NEXT: add w8, w10, w8
+; CHECK-NEXT: add w8, w12, w8
+; CHECK-NEXT: add w9, w11, w9
+; CHECK-NEXT: lsr w8, w8, #16
+; CHECK-NEXT: lsr w9, w9, #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: fmov s1, w9
+; CHECK-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <2 x i8> %a to <2 x bfloat>
+ ret <2 x bfloat> %c
+}
+
+define <3 x bfloat> @stofp_v3i8_v3bf16(<3 x i8> %a) {
+; CHECK-LABEL: stofp_v3i8_v3bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fmov s0, w0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: mov v0.h[1], w1
+; CHECK-NEXT: mov v0.h[2], w2
+; CHECK-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <3 x i8> %a to <3 x bfloat>
+ ret <3 x bfloat> %c
+}
+
+define <3 x bfloat> @utofp_v3i8_v3bf16(<3 x i8> %a) {
+; CHECK-LABEL: utofp_v3i8_v3bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fmov s0, w0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: mov v0.h[1], w1
+; CHECK-NEXT: mov v0.h[2], w2
+; CHECK-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <3 x i8> %a to <3 x bfloat>
+ ret <3 x bfloat> %c
+}
+
+define <4 x bfloat> @stofp_v4i8_v4bf16(<4 x i8> %a) {
+; CHECK-LABEL: stofp_v4i8_v4bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <4 x i8> %a to <4 x bfloat>
+ ret <4 x bfloat> %c
+}
+
+define <4 x bfloat> @utofp_v4i8_v4bf16(<4 x i8> %a) {
+; CHECK-LABEL: utofp_v4i8_v4bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: ushr v2.4s, v0.4s, #16
+; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #127, msl #8
+; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <4 x i8> %a to <4 x bfloat>
+ ret <4 x bfloat> %c
+}
+
+define <8 x bfloat> @stofp_v8i8_v8bf16(<8 x i8> %a) {
+; CHECK-LABEL: stofp_v8i8_v8bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshll v0.8h, v0.8b, #0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: movi v4.4s, #127, msl #8
+; CHECK-NEXT: sshll v2.4s, v0.4h, #0
+; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: scvtf v2.4s, v2.4s
+; CHECK-NEXT: scvtf v3.4s, v0.4s
+; CHECK-NEXT: ushr v0.4s, v2.4s, #16
+; CHECK-NEXT: ushr v5.4s, v3.4s, #16
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
+; CHECK-NEXT: add v0.4s, v0.4s, v4.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v4.4s
+; CHECK-NEXT: addhn v0.4h, v2.4s, v0.4s
+; CHECK-NEXT: addhn2 v0.8h, v3.4s, v1.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <8 x i8> %a to <8 x bfloat>
+ ret <8 x bfloat> %c
+}
+
+define <8 x bfloat> @utofp_v8i8_v8bf16(<8 x i8> %a) {
+; CHECK-LABEL: utofp_v8i8_v8bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: movi v4.4s, #127, msl #8
+; CHECK-NEXT: ushll v2.4s, v0.4h, #0
+; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: ucvtf v2.4s, v2.4s
+; CHECK-NEXT: ucvtf v3.4s, v0.4s
+; CHECK-NEXT: ushr v0.4s, v2.4s, #16
+; CHECK-NEXT: ushr v5.4s, v3.4s, #16
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
+; CHECK-NEXT: add v0.4s, v0.4s, v4.4s
+; CHECK-NEXT: add v1.4s, v1.4s, v4.4s
+; CHECK-NEXT: addhn v0.4h, v2.4s, v0.4s
+; CHECK-NEXT: addhn2 v0.8h, v3.4s, v1.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <8 x i8> %a to <8 x bfloat>
+ ret <8 x bfloat> %c
+}
+
+define <16 x bfloat> @stofp_v16i8_v16bf16(<16 x i8> %a) {
+; CHECK-LABEL: stofp_v16i8_v16bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshll2 v2.8h, v0.16b, #0
+; CHECK-NEXT: sshll v0.8h, v0.8b, #0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: movi v7.4s, #127, msl #8
+; CHECK-NEXT: sshll v3.4s, v2.4h, #0
+; CHECK-NEXT: sshll v4.4s, v0.4h, #0
+; CHECK-NEXT: sshll2 v2.4s, v2.8h, #0
+; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: scvtf v3.4s, v3.4s
+; CHECK-NEXT: scvtf v4.4s, v4.4s
+; CHECK-NEXT: scvtf v2.4s, v2.4s
+; CHECK-NEXT: scvtf v6.4s, v0.4s
+; CHECK-NEXT: ushr v5.4s, v3.4s, #16
+; CHECK-NEXT: ushr v0.4s, v4.4s, #16
+; CHECK-NEXT: ushr v16.4s, v2.4s, #16
+; CHECK-NEXT: ushr v17.4s, v6.4s, #16
+; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: and v16.16b, v16.16b, v1.16b
+; CHECK-NEXT: and v17.16b, v17.16b, v1.16b
+; CHECK-NEXT: add v5.4s, v5.4s, v7.4s
+; CHECK-NEXT: add v0.4s, v0.4s, v7.4s
+; CHECK-NEXT: addhn v1.4h, v3.4s, v5.4s
+; CHECK-NEXT: addhn v0.4h, v4.4s, v0.4s
+; CHECK-NEXT: add v3.4s, v16.4s, v7.4s
+; CHECK-NEXT: add v4.4s, v17.4s, v7.4s
+; CHECK-NEXT: addhn2 v1.8h, v2.4s, v3.4s
+; CHECK-NEXT: addhn2 v0.8h, v6.4s, v4.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <16 x i8> %a to <16 x bfloat>
+ ret <16 x bfloat> %c
+}
+
+define <16 x bfloat> @utofp_v16i8_v16bf16(<16 x i8> %a) {
+; CHECK-LABEL: utofp_v16i8_v16bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ushll2 v2.8h, v0.16b, #0
+; CHECK-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: movi v7.4s, #127, msl #8
+; CHECK-NEXT: ushll v3.4s, v2.4h, #0
+; CHECK-NEXT: ushll v4.4s, v0.4h, #0
+; CHECK-NEXT: ushll2 v2.4s, v2.8h, #0
+; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: ucvtf v3.4s, v3.4s
+; CHECK-NEXT: ucvtf v4.4s, v4.4s
+; CHECK-NEXT: ucvtf v2.4s, v2.4s
+; CHECK-NEXT: ucvtf v6.4s, v0.4s
+; CHECK-NEXT: ushr v5.4s, v3.4s, #16
+; CHECK-NEXT: ushr v0.4s, v4.4s, #16
+; CHECK-NEXT: ushr v16.4s, v2.4s, #16
+; CHECK-NEXT: ushr v17.4s, v6.4s, #16
+; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: and v16.16b, v16.16b, v1.16b
+; CHECK-NEXT: and v17.16b, v17.16b, v1.16b
+; CHECK-NEXT: add v5.4s, v5.4s, v7.4s
+; CHECK-NEXT: add v0.4s, v0.4s, v7.4s
+; CHECK-NEXT: addhn v1.4h, v3.4s, v5.4s
+; CHECK-NEXT: addhn v0.4h, v4.4s, v0.4s
+; CHECK-NEXT: add v3.4s, v16.4s, v7.4s
+; CHECK-NEXT: add v4.4s, v17.4s, v7.4s
+; CHECK-NEXT: addhn2 v1.8h, v2.4s, v3.4s
+; CHECK-NEXT: addhn2 v0.8h, v6.4s, v4.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <16 x i8> %a to <16 x bfloat>
+ ret <16 x bfloat> %c
+}
+
+define <32 x bfloat> @stofp_v32i8_v32bf16(<32 x i8> %a) {
+; CHECK-LABEL: stofp_v32i8_v32bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshll2 v3.8h, v0.16b, #0
+; CHECK-NEXT: sshll v0.8h, v0.8b, #0
+; CHECK-NEXT: sshll2 v4.8h, v1.16b, #0
+; CHECK-NEXT: sshll v1.8h, v1.8b, #0
+; CHECK-NEXT: movi v2.4s, #1
+; CHECK-NEXT: movi v21.4s, #127, msl #8
+; CHECK-NEXT: sshll v5.4s, v3.4h, #0
+; CHECK-NEXT: sshll v6.4s, v0.4h, #0
+; CHECK-NEXT: sshll v7.4s, v4.4h, #0
+; CHECK-NEXT: sshll v16.4s, v1.4h, #0
+; CHECK-NEXT: sshll2 v3.4s, v3.8h, #0
+; CHECK-NEXT: sshll2 v4.4s, v4.8h, #0
+; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: sshll2 v1.4s, v1.8h, #0
+; CHECK-NEXT: scvtf v5.4s, v5.4s
+; CHECK-NEXT: scvtf v6.4s, v6.4s
+; CHECK-NEXT: scvtf v7.4s, v7.4s
+; CHECK-NEXT: scvtf v16.4s, v16.4s
+; CHECK-NEXT: scvtf v17.4s, v3.4s
+; CHECK-NEXT: scvtf v4.4s, v4.4s
+; CHECK-NEXT: scvtf v18.4s, v0.4s
+; CHECK-NEXT: scvtf v19.4s, v1.4s
+; CHECK-NEXT: ushr v0.4s, v5.4s, #16
+; CHECK-NEXT: ushr v3.4s, v6.4s, #16
+; CHECK-NEXT: ushr v1.4s, v7.4s, #16
+; CHECK-NEXT: ushr v20.4s, v16.4s, #16
+; CHECK-NEXT: ushr v23.4s, v17.4s, #16
+; CHECK-NEXT: ushr v24.4s, v4.4s, #16
+; CHECK-NEXT: ushr v22.4s, v18.4s, #16
+; CHECK-NEXT: ushr v25.4s, v19.4s, #16
+; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: and v3.16b, v3.16b, v2.16b
+; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT: and v20.16b, v20.16b, v2.16b
+; CHECK-NEXT: and v23.16b, v23.16b, v2.16b
+; CHECK-NEXT: and v24.16b, v24.16b, v2.16b
+; CHECK-NEXT: and v22.16b, v22.16b, v2.16b
+; CHECK-NEXT: and v25.16b, v25.16b, v2.16b
+; CHECK-NEXT: add v0.4s, v0.4s, v21.4s
+; CHECK-NEXT: add v3.4s, v3.4s, v21.4s
+; CHECK-NEXT: add v26.4s, v1.4s, v21.4s
+; CHECK-NEXT: add v20.4s, v20.4s, v21.4s
+; CHECK-NEXT: addhn v1.4h, v5.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v6.4s, v3.4s
+; CHECK-NEXT: addhn v3.4h, v7.4s, v26.4s
+; CHECK-NEXT: addhn v2.4h, v16.4s, v20.4s
+; CHECK-NEXT: add v5.4s, v22.4s, v21.4s
+; CHECK-NEXT: add v6.4s, v23.4s, v21.4s
+; CHECK-NEXT: add v7.4s, v24.4s, v21.4s
+; CHECK-NEXT: add v16.4s, v25.4s, v21.4s
+; CHECK-NEXT: addhn2 v0.8h, v18.4s, v5.4s
+; CHECK-NEXT: addhn2 v1.8h, v17.4s, v6.4s
+; CHECK-NEXT: addhn2 v3.8h, v4.4s, v7.4s
+; CHECK-NEXT: addhn2 v2.8h, v19.4s, v16.4s
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <32 x i8> %a to <32 x bfloat>
+ ret <32 x bfloat> %c
+}
+
+define <32 x bfloat> @utofp_v32i8_v32bf16(<32 x i8> %a) {
+; CHECK-LABEL: utofp_v32i8_v32bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ushll2 v3.8h, v0.16b, #0
+; CHECK-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-NEXT: ushll2 v4.8h, v1.16b, #0
+; CHECK-NEXT: ushll v1.8h, v1.8b, #0
+; CHECK-NEXT: movi v2.4s, #1
+; CHECK-NEXT: movi v21.4s, #127, msl #8
+; CHECK-NEXT: ushll v5.4s, v3.4h, #0
+; CHECK-NEXT: ushll v6.4s, v0.4h, #0
+; CHECK-NEXT: ushll v7.4s, v4.4h, #0
+; CHECK-NEXT: ushll v16.4s, v1.4h, #0
+; CHECK-NEXT: ushll2 v3.4s, v3.8h, #0
+; CHECK-NEXT: ushll2 v4.4s, v4.8h, #0
+; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: ushll2 v1.4s, v1.8h, #0
+; CHECK-NEXT: ucvtf v5.4s, v5.4s
+; CHECK-NEXT: ucvtf v6.4s, v6.4s
+; CHECK-NEXT: ucvtf v7.4s, v7.4s
+; CHECK-NEXT: ucvtf v16.4s, v16.4s
+; CHECK-NEXT: ucvtf v17.4s, v3.4s
+; CHECK-NEXT: ucvtf v4.4s, v4.4s
+; CHECK-NEXT: ucvtf v18.4s, v0.4s
+; CHECK-NEXT: ucvtf v19.4s, v1.4s
+; CHECK-NEXT: ushr v0.4s, v5.4s, #16
+; CHECK-NEXT: ushr v3.4s, v6.4s, #16
+; CHECK-NEXT: ushr v1.4s, v7.4s, #16
+; CHECK-NEXT: ushr v20.4s, v16.4s, #16
+; CHECK-NEXT: ushr v23.4s, v17.4s, #16
+; CHECK-NEXT: ushr v24.4s, v4.4s, #16
+; CHECK-NEXT: ushr v22.4s, v18.4s, #16
+; CHECK-NEXT: ushr v25.4s, v19.4s, #16
+; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: and v3.16b, v3.16b, v2.16b
+; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT: and v20.16b, v20.16b, v2.16b
+; CHECK-NEXT: and v23.16b, v23.16b, v2.16b
+; CHECK-NEXT: and v24.16b, v24.16b, v2.16b
+; CHECK-NEXT: and v22.16b, v22.16b, v2.16b
+; CHECK-NEXT: and v25.16b, v25.16b, v2.16b
+; CHECK-NEXT: add v0.4s, v0.4s, v21.4s
+; CHECK-NEXT: add v3.4s, v3.4s, v21.4s
+; CHECK-NEXT: add v26.4s, v1.4s, v21.4s
+; CHECK-NEXT: add v20.4s, v20.4s, v21.4s
+; CHECK-NEXT: addhn v1.4h, v5.4s, v0.4s
+; CHECK-NEXT: addhn v0.4h, v6.4s, v3.4s
+; CHECK-NEXT: addhn v3.4h, v7.4s, v26.4s
+; CHECK-NEXT: addhn v2.4h, v16.4s, v20.4s
+; CHECK-NEXT: add v5.4s, v22.4s, v21.4s
+; CHECK-NEXT: add v6.4s, v23.4s, v21.4s
+; CHECK-NEXT: add v7.4s, v24.4s, v21.4s
+; CHECK-NEXT: add v16.4s, v25.4s, v21.4s
+; CHECK-NEXT: addhn2 v0.8h, v18.4s, v5.4s
+; CHECK-NEXT: addhn2 v1.8h, v17.4s, v6.4s
+; CHECK-NEXT: addhn2 v3.8h, v4.4s, v7.4s
+; CHECK-NEXT: addhn2 v2.8h, v19.4s, v16.4s
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <32 x i8> %a to <32 x bfloat>
+ ret <32 x bfloat> %c
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-GI-FP16: {{.*}}
+; CHECK-GI-NOFP16: {{.*}}
+; CHECK-SD: {{.*}}
+; CHECK-SD-FP16: {{.*}}
+; CHECK-SD-NOFP16: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index ae4ced258bb8e..cdea8f8c5ed21 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -6262,1823 +6262,3 @@ entry:
%c = uitofp <32 x i8> %a to <32 x half>
ret <32 x half> %c
}
-
-define bfloat @stofp_i64_bf16(i64 %a) {
-; CHECK-LABEL: stofp_i64_bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmp x0, #0
-; CHECK-NEXT: and x11, x0, #0x8000000000000000
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: cneg x9, x0, mi
-; CHECK-NEXT: lsr x10, x9, #53
-; CHECK-NEXT: cmp x10, #0
-; CHECK-NEXT: and x10, x9, #0xfffffffffffff000
-; CHECK-NEXT: csel x10, x10, x9, ne
-; CHECK-NEXT: scvtf d0, x10
-; CHECK-NEXT: cset w10, ne
-; CHECK-NEXT: tst x9, #0xfff
-; CHECK-NEXT: csel w10, wzr, w10, eq
-; CHECK-NEXT: fmov x9, d0
-; CHECK-NEXT: orr x9, x9, x11
-; CHECK-NEXT: orr x9, x9, x10
-; CHECK-NEXT: fmov d0, x9
-; CHECK-NEXT: fcvtxn s0, d0
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ubfx w10, w9, #16, #1
-; CHECK-NEXT: add w8, w9, w8
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
-; CHECK-NEXT: ret
-entry:
- %c = sitofp i64 %a to bfloat
- ret bfloat %c
-}
-
-define bfloat @utofp_i64_bf16(i64 %a) {
-; CHECK-LABEL: utofp_i64_bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: lsr x9, x0, #53
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: cmp x9, #0
-; CHECK-NEXT: and x9, x0, #0xfffffffffffff000
-; CHECK-NEXT: csel x9, x9, x0, ne
-; CHECK-NEXT: ucvtf d0, x9
-; CHECK-NEXT: cset w9, ne
-; CHECK-NEXT: tst x0, #0xfff
-; CHECK-NEXT: csel w9, wzr, w9, eq
-; CHECK-NEXT: fmov x10, d0
-; CHECK-NEXT: orr x9, x10, x9
-; CHECK-NEXT: fmov d0, x9
-; CHECK-NEXT: fcvtxn s0, d0
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ubfx w10, w9, #16, #1
-; CHECK-NEXT: add w8, w9, w8
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
-; CHECK-NEXT: ret
-entry:
- %c = uitofp i64 %a to bfloat
- ret bfloat %c
-}
-
-define bfloat @stofp_i32_bf16(i32 %a) {
-; CHECK-LABEL: stofp_i32_bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: scvtf d0, w0
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: fcvtxn s0, d0
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ubfx w10, w9, #16, #1
-; CHECK-NEXT: add w8, w9, w8
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
-; CHECK-NEXT: ret
-entry:
- %c = sitofp i32 %a to bfloat
- ret bfloat %c
-}
-
-define bfloat @utofp_i32_bf16(i32 %a) {
-; CHECK-LABEL: utofp_i32_bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ucvtf d0, w0
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: fcvtxn s0, d0
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ubfx w10, w9, #16, #1
-; CHECK-NEXT: add w8, w9, w8
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
-; CHECK-NEXT: ret
-entry:
- %c = uitofp i32 %a to bfloat
- ret bfloat %c
-}
-
-define bfloat @stofp_i16_bf16(i16 %a) {
-; CHECK-LABEL: stofp_i16_bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sxth w9, w0
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: scvtf s0, w9
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ubfx w10, w9, #16, #1
-; CHECK-NEXT: add w8, w9, w8
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
-; CHECK-NEXT: ret
-entry:
- %c = sitofp i16 %a to bfloat
- ret bfloat %c
-}
-
-define bfloat @utofp_i16_bf16(i16 %a) {
-; CHECK-LABEL: utofp_i16_bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: and w9, w0, #0xffff
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: ucvtf s0, w9
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ubfx w10, w9, #16, #1
-; CHECK-NEXT: add w8, w9, w8
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
-; CHECK-NEXT: ret
-entry:
- %c = uitofp i16 %a to bfloat
- ret bfloat %c
-}
-
-define bfloat @stofp_i8_bf16(i8 %a) {
-; CHECK-LABEL: stofp_i8_bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sxtb w9, w0
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: scvtf s0, w9
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ubfx w10, w9, #16, #1
-; CHECK-NEXT: add w8, w9, w8
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
-; CHECK-NEXT: ret
-entry:
- %c = sitofp i8 %a to bfloat
- ret bfloat %c
-}
-
-define bfloat @utofp_i8_bf16(i8 %a) {
-; CHECK-LABEL: utofp_i8_bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: and w9, w0, #0xff
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: ucvtf s0, w9
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ubfx w10, w9, #16, #1
-; CHECK-NEXT: add w8, w9, w8
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
-; CHECK-NEXT: ret
-entry:
- %c = uitofp i8 %a to bfloat
- ret bfloat %c
-}
-
-define <2 x bfloat> @stofp_v2i64_v2bf16(<2 x i64> %a) {
-; CHECK-LABEL: stofp_v2i64_v2bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov x9, v0.d[1]
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: cmp x9, #0
-; CHECK-NEXT: cneg x10, x9, mi
-; CHECK-NEXT: and x9, x9, #0x8000000000000000
-; CHECK-NEXT: lsr x11, x10, #53
-; CHECK-NEXT: and x12, x10, #0xfffffffffffff000
-; CHECK-NEXT: cmp x11, #0
-; CHECK-NEXT: csel x11, x12, x10, ne
-; CHECK-NEXT: cset w12, ne
-; CHECK-NEXT: tst x10, #0xfff
-; CHECK-NEXT: fmov x10, d0
-; CHECK-NEXT: csel w12, wzr, w12, eq
-; CHECK-NEXT: scvtf d0, x11
-; CHECK-NEXT: cmp x10, #0
-; CHECK-NEXT: cneg x13, x10, mi
-; CHECK-NEXT: and x10, x10, #0x8000000000000000
-; CHECK-NEXT: lsr x14, x13, #53
-; CHECK-NEXT: cmp x14, #0
-; CHECK-NEXT: and x14, x13, #0xfffffffffffff000
-; CHECK-NEXT: csel x11, x14, x13, ne
-; CHECK-NEXT: cset w14, ne
-; CHECK-NEXT: tst x13, #0xfff
-; CHECK-NEXT: scvtf d1, x11
-; CHECK-NEXT: fmov x11, d0
-; CHECK-NEXT: orr x9, x11, x9
-; CHECK-NEXT: csel w11, wzr, w14, eq
-; CHECK-NEXT: fmov x13, d1
-; CHECK-NEXT: orr x9, x9, x12
-; CHECK-NEXT: fmov d0, x9
-; CHECK-NEXT: orr x10, x13, x10
-; CHECK-NEXT: orr x10, x10, x11
-; CHECK-NEXT: fcvtxn s0, d0
-; CHECK-NEXT: fmov d1, x10
-; CHECK-NEXT: fcvtxn s1, d1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ubfx w11, w9, #16, #1
-; CHECK-NEXT: add w9, w9, w8
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: add w9, w11, w9
-; CHECK-NEXT: lsr w9, w9, #16
-; CHECK-NEXT: ubfx w12, w10, #16, #1
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: add w8, w12, w8
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: mov v0.h[1], v1.h[0]
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <2 x i64> %a to <2 x bfloat>
- ret <2 x bfloat> %c
-}
-
-define <2 x bfloat> @utofp_v2i64_v2bf16(<2 x i64> %a) {
-; CHECK-LABEL: utofp_v2i64_v2bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov x9, v0.d[1]
-; CHECK-NEXT: fmov x11, d0
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: lsr x10, x9, #53
-; CHECK-NEXT: and x12, x9, #0xfffffffffffff000
-; CHECK-NEXT: cmp x10, #0
-; CHECK-NEXT: lsr x10, x11, #53
-; CHECK-NEXT: csel x12, x12, x9, ne
-; CHECK-NEXT: cset w13, ne
-; CHECK-NEXT: tst x9, #0xfff
-; CHECK-NEXT: csel w9, wzr, w13, eq
-; CHECK-NEXT: cmp x10, #0
-; CHECK-NEXT: and x10, x11, #0xfffffffffffff000
-; CHECK-NEXT: csel x10, x10, x11, ne
-; CHECK-NEXT: ucvtf d0, x12
-; CHECK-NEXT: ucvtf d1, x10
-; CHECK-NEXT: cset w10, ne
-; CHECK-NEXT: tst x11, #0xfff
-; CHECK-NEXT: csel w10, wzr, w10, eq
-; CHECK-NEXT: fmov x11, d0
-; CHECK-NEXT: fmov x12, d1
-; CHECK-NEXT: orr x9, x11, x9
-; CHECK-NEXT: orr x10, x12, x10
-; CHECK-NEXT: fmov d0, x9
-; CHECK-NEXT: fmov d1, x10
-; CHECK-NEXT: fcvtxn s0, d0
-; CHECK-NEXT: fcvtxn s1, d1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: ubfx w11, w9, #16, #1
-; CHECK-NEXT: add w9, w9, w8
-; CHECK-NEXT: ubfx w12, w10, #16, #1
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: add w9, w11, w9
-; CHECK-NEXT: add w8, w12, w8
-; CHECK-NEXT: lsr w9, w9, #16
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: mov v0.h[1], v1.h[0]
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <2 x i64> %a to <2 x bfloat>
- ret <2 x bfloat> %c
-}
-
-define <3 x bfloat> @stofp_v3i64_v3bf16(<3 x i64> %a) {
-; CHECK-LABEL: stofp_v3i64_v3bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-NEXT: scvtf v1.2d, v2.2d
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: scvtf v0.2d, v0.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushr v3.4s, v0.4s, #16
-; CHECK-NEXT: add v2.4s, v0.4s, v2.4s
-; CHECK-NEXT: and v1.16b, v3.16b, v1.16b
-; CHECK-NEXT: fcmeq v3.4s, v0.4s, v0.4s
-; CHECK-NEXT: orr v0.4s, #64, lsl #16
-; CHECK-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-NEXT: bit v0.16b, v1.16b, v3.16b
-; CHECK-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <3 x i64> %a to <3 x bfloat>
- ret <3 x bfloat> %c
-}
-
-define <3 x bfloat> @utofp_v3i64_v3bf16(<3 x i64> %a) {
-; CHECK-LABEL: utofp_v3i64_v3bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-NEXT: ucvtf v1.2d, v2.2d
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushr v3.4s, v0.4s, #16
-; CHECK-NEXT: add v2.4s, v0.4s, v2.4s
-; CHECK-NEXT: and v1.16b, v3.16b, v1.16b
-; CHECK-NEXT: fcmeq v3.4s, v0.4s, v0.4s
-; CHECK-NEXT: orr v0.4s, #64, lsl #16
-; CHECK-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-NEXT: bit v0.16b, v1.16b, v3.16b
-; CHECK-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <3 x i64> %a to <3 x bfloat>
- ret <3 x bfloat> %c
-}
-
-define <4 x bfloat> @stofp_v4i64_v4bf16(<4 x i64> %a) {
-; CHECK-LABEL: stofp_v4i64_v4bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: scvtf v0.2d, v0.2d
-; CHECK-NEXT: scvtf v1.2d, v1.2d
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushr v3.4s, v0.4s, #16
-; CHECK-NEXT: add v2.4s, v0.4s, v2.4s
-; CHECK-NEXT: and v1.16b, v3.16b, v1.16b
-; CHECK-NEXT: fcmeq v3.4s, v0.4s, v0.4s
-; CHECK-NEXT: orr v0.4s, #64, lsl #16
-; CHECK-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-NEXT: bit v0.16b, v1.16b, v3.16b
-; CHECK-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <4 x i64> %a to <4 x bfloat>
- ret <4 x bfloat> %c
-}
-
-define <4 x bfloat> @utofp_v4i64_v4bf16(<4 x i64> %a) {
-; CHECK-LABEL: utofp_v4i64_v4bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: ucvtf v1.2d, v1.2d
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushr v3.4s, v0.4s, #16
-; CHECK-NEXT: add v2.4s, v0.4s, v2.4s
-; CHECK-NEXT: and v1.16b, v3.16b, v1.16b
-; CHECK-NEXT: fcmeq v3.4s, v0.4s, v0.4s
-; CHECK-NEXT: orr v0.4s, #64, lsl #16
-; CHECK-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-NEXT: bit v0.16b, v1.16b, v3.16b
-; CHECK-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <4 x i64> %a to <4 x bfloat>
- ret <4 x bfloat> %c
-}
-
-define <8 x bfloat> @stofp_v8i64_v8bf16(<8 x i64> %a) {
-; CHECK-LABEL: stofp_v8i64_v8bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: scvtf v2.2d, v2.2d
-; CHECK-NEXT: scvtf v0.2d, v0.2d
-; CHECK-NEXT: scvtf v3.2d, v3.2d
-; CHECK-NEXT: scvtf v1.2d, v1.2d
-; CHECK-NEXT: fcvtn v2.2s, v2.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: fcvtn2 v2.4s, v3.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: movi v3.4s, #127, msl #8
-; CHECK-NEXT: ushr v4.4s, v2.4s, #16
-; CHECK-NEXT: ushr v5.4s, v0.4s, #16
-; CHECK-NEXT: add v6.4s, v2.4s, v3.4s
-; CHECK-NEXT: add v3.4s, v0.4s, v3.4s
-; CHECK-NEXT: and v4.16b, v4.16b, v1.16b
-; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
-; CHECK-NEXT: fcmeq v5.4s, v2.4s, v2.4s
-; CHECK-NEXT: orr v2.4s, #64, lsl #16
-; CHECK-NEXT: add v4.4s, v4.4s, v6.4s
-; CHECK-NEXT: fcmeq v6.4s, v0.4s, v0.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-NEXT: orr v0.4s, #64, lsl #16
-; CHECK-NEXT: bit v2.16b, v4.16b, v5.16b
-; CHECK-NEXT: bit v0.16b, v1.16b, v6.16b
-; CHECK-NEXT: uzp2 v0.8h, v0.8h, v2.8h
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <8 x i64> %a to <8 x bfloat>
- ret <8 x bfloat> %c
-}
-
-define <8 x bfloat> @utofp_v8i64_v8bf16(<8 x i64> %a) {
-; CHECK-LABEL: utofp_v8i64_v8bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ucvtf v2.2d, v2.2d
-; CHECK-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: ucvtf v3.2d, v3.2d
-; CHECK-NEXT: ucvtf v1.2d, v1.2d
-; CHECK-NEXT: fcvtn v2.2s, v2.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: fcvtn2 v2.4s, v3.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: movi v3.4s, #127, msl #8
-; CHECK-NEXT: ushr v4.4s, v2.4s, #16
-; CHECK-NEXT: ushr v5.4s, v0.4s, #16
-; CHECK-NEXT: add v6.4s, v2.4s, v3.4s
-; CHECK-NEXT: add v3.4s, v0.4s, v3.4s
-; CHECK-NEXT: and v4.16b, v4.16b, v1.16b
-; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
-; CHECK-NEXT: fcmeq v5.4s, v2.4s, v2.4s
-; CHECK-NEXT: orr v2.4s, #64, lsl #16
-; CHECK-NEXT: add v4.4s, v4.4s, v6.4s
-; CHECK-NEXT: fcmeq v6.4s, v0.4s, v0.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-NEXT: orr v0.4s, #64, lsl #16
-; CHECK-NEXT: bit v2.16b, v4.16b, v5.16b
-; CHECK-NEXT: bit v0.16b, v1.16b, v6.16b
-; CHECK-NEXT: uzp2 v0.8h, v0.8h, v2.8h
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <8 x i64> %a to <8 x bfloat>
- ret <8 x bfloat> %c
-}
-
-define <16 x bfloat> @stofp_v16i64_v16bf16(<16 x i64> %a) {
-; CHECK-LABEL: stofp_v16i64_v16bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: scvtf v0.2d, v0.2d
-; CHECK-NEXT: scvtf v2.2d, v2.2d
-; CHECK-NEXT: scvtf v6.2d, v6.2d
-; CHECK-NEXT: scvtf v4.2d, v4.2d
-; CHECK-NEXT: scvtf v1.2d, v1.2d
-; CHECK-NEXT: scvtf v3.2d, v3.2d
-; CHECK-NEXT: scvtf v7.2d, v7.2d
-; CHECK-NEXT: scvtf v5.2d, v5.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: fcvtn v2.2s, v2.2d
-; CHECK-NEXT: fcvtn v6.2s, v6.2d
-; CHECK-NEXT: fcvtn v4.2s, v4.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-NEXT: fcvtn2 v2.4s, v3.2d
-; CHECK-NEXT: fcvtn2 v6.4s, v7.2d
-; CHECK-NEXT: fcvtn2 v4.4s, v5.2d
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: movi v3.4s, #127, msl #8
-; CHECK-NEXT: ushr v7.4s, v0.4s, #16
-; CHECK-NEXT: ushr v5.4s, v2.4s, #16
-; CHECK-NEXT: ushr v16.4s, v6.4s, #16
-; CHECK-NEXT: ushr v17.4s, v4.4s, #16
-; CHECK-NEXT: add v19.4s, v0.4s, v3.4s
-; CHECK-NEXT: add v18.4s, v2.4s, v3.4s
-; CHECK-NEXT: add v20.4s, v6.4s, v3.4s
-; CHECK-NEXT: add v3.4s, v4.4s, v3.4s
-; CHECK-NEXT: and v7.16b, v7.16b, v1.16b
-; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
-; CHECK-NEXT: and v16.16b, v16.16b, v1.16b
-; CHECK-NEXT: and v1.16b, v17.16b, v1.16b
-; CHECK-NEXT: fcmeq v17.4s, v2.4s, v2.4s
-; CHECK-NEXT: orr v2.4s, #64, lsl #16
-; CHECK-NEXT: add v7.4s, v7.4s, v19.4s
-; CHECK-NEXT: fcmeq v19.4s, v6.4s, v6.4s
-; CHECK-NEXT: add v5.4s, v5.4s, v18.4s
-; CHECK-NEXT: fcmeq v18.4s, v0.4s, v0.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-NEXT: fcmeq v3.4s, v4.4s, v4.4s
-; CHECK-NEXT: add v16.4s, v16.4s, v20.4s
-; CHECK-NEXT: orr v0.4s, #64, lsl #16
-; CHECK-NEXT: orr v6.4s, #64, lsl #16
-; CHECK-NEXT: orr v4.4s, #64, lsl #16
-; CHECK-NEXT: bit v2.16b, v5.16b, v17.16b
-; CHECK-NEXT: mov v5.16b, v19.16b
-; CHECK-NEXT: bit v0.16b, v7.16b, v18.16b
-; CHECK-NEXT: bif v1.16b, v4.16b, v3.16b
-; CHECK-NEXT: bsl v5.16b, v16.16b, v6.16b
-; CHECK-NEXT: uzp2 v0.8h, v0.8h, v2.8h
-; CHECK-NEXT: uzp2 v1.8h, v1.8h, v5.8h
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <16 x i64> %a to <16 x bfloat>
- ret <16 x bfloat> %c
-}
-
-define <16 x bfloat> @utofp_v16i64_v16bf16(<16 x i64> %a) {
-; CHECK-LABEL: utofp_v16i64_v16bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: ucvtf v2.2d, v2.2d
-; CHECK-NEXT: ucvtf v6.2d, v6.2d
-; CHECK-NEXT: ucvtf v4.2d, v4.2d
-; CHECK-NEXT: ucvtf v1.2d, v1.2d
-; CHECK-NEXT: ucvtf v3.2d, v3.2d
-; CHECK-NEXT: ucvtf v7.2d, v7.2d
-; CHECK-NEXT: ucvtf v5.2d, v5.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: fcvtn v2.2s, v2.2d
-; CHECK-NEXT: fcvtn v6.2s, v6.2d
-; CHECK-NEXT: fcvtn v4.2s, v4.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-NEXT: fcvtn2 v2.4s, v3.2d
-; CHECK-NEXT: fcvtn2 v6.4s, v7.2d
-; CHECK-NEXT: fcvtn2 v4.4s, v5.2d
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: movi v3.4s, #127, msl #8
-; CHECK-NEXT: ushr v7.4s, v0.4s, #16
-; CHECK-NEXT: ushr v5.4s, v2.4s, #16
-; CHECK-NEXT: ushr v16.4s, v6.4s, #16
-; CHECK-NEXT: ushr v17.4s, v4.4s, #16
-; CHECK-NEXT: add v19.4s, v0.4s, v3.4s
-; CHECK-NEXT: add v18.4s, v2.4s, v3.4s
-; CHECK-NEXT: add v20.4s, v6.4s, v3.4s
-; CHECK-NEXT: add v3.4s, v4.4s, v3.4s
-; CHECK-NEXT: and v7.16b, v7.16b, v1.16b
-; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
-; CHECK-NEXT: and v16.16b, v16.16b, v1.16b
-; CHECK-NEXT: and v1.16b, v17.16b, v1.16b
-; CHECK-NEXT: fcmeq v17.4s, v2.4s, v2.4s
-; CHECK-NEXT: orr v2.4s, #64, lsl #16
-; CHECK-NEXT: add v7.4s, v7.4s, v19.4s
-; CHECK-NEXT: fcmeq v19.4s, v6.4s, v6.4s
-; CHECK-NEXT: add v5.4s, v5.4s, v18.4s
-; CHECK-NEXT: fcmeq v18.4s, v0.4s, v0.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-NEXT: fcmeq v3.4s, v4.4s, v4.4s
-; CHECK-NEXT: add v16.4s, v16.4s, v20.4s
-; CHECK-NEXT: orr v0.4s, #64, lsl #16
-; CHECK-NEXT: orr v6.4s, #64, lsl #16
-; CHECK-NEXT: orr v4.4s, #64, lsl #16
-; CHECK-NEXT: bit v2.16b, v5.16b, v17.16b
-; CHECK-NEXT: mov v5.16b, v19.16b
-; CHECK-NEXT: bit v0.16b, v7.16b, v18.16b
-; CHECK-NEXT: bif v1.16b, v4.16b, v3.16b
-; CHECK-NEXT: bsl v5.16b, v16.16b, v6.16b
-; CHECK-NEXT: uzp2 v0.8h, v0.8h, v2.8h
-; CHECK-NEXT: uzp2 v1.8h, v1.8h, v5.8h
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <16 x i64> %a to <16 x bfloat>
- ret <16 x bfloat> %c
-}
-
-define <32 x bfloat> @stofp_v32i64_v32bf16(<32 x i64> %a) {
-; CHECK-LABEL: stofp_v32i64_v32bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: scvtf v17.2d, v2.2d
-; CHECK-NEXT: scvtf v18.2d, v0.2d
-; CHECK-NEXT: scvtf v19.2d, v3.2d
-; CHECK-NEXT: scvtf v3.2d, v6.2d
-; CHECK-NEXT: ldp q21, q20, [sp, #32]
-; CHECK-NEXT: scvtf v4.2d, v4.2d
-; CHECK-NEXT: scvtf v6.2d, v7.2d
-; CHECK-NEXT: scvtf v5.2d, v5.2d
-; CHECK-NEXT: ldp q24, q23, [sp, #64]
-; CHECK-NEXT: movi v16.4s, #1
-; CHECK-NEXT: fcvtn v0.2s, v17.2d
-; CHECK-NEXT: scvtf v17.2d, v1.2d
-; CHECK-NEXT: fcvtn v1.2s, v18.2d
-; CHECK-NEXT: fcvtn v3.2s, v3.2d
-; CHECK-NEXT: ldp q18, q7, [sp]
-; CHECK-NEXT: scvtf v21.2d, v21.2d
-; CHECK-NEXT: fcvtn v4.2s, v4.2d
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: scvtf v20.2d, v20.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v19.2d
-; CHECK-NEXT: ldp q22, q19, [sp, #96]
-; CHECK-NEXT: fcvtn2 v1.4s, v17.2d
-; CHECK-NEXT: fcvtn2 v3.4s, v6.2d
-; CHECK-NEXT: scvtf v18.2d, v18.2d
-; CHECK-NEXT: scvtf v17.2d, v24.2d
-; CHECK-NEXT: fcvtn v6.2s, v21.2d
-; CHECK-NEXT: fcvtn2 v4.4s, v5.2d
-; CHECK-NEXT: scvtf v22.2d, v22.2d
-; CHECK-NEXT: scvtf v21.2d, v23.2d
-; CHECK-NEXT: scvtf v7.2d, v7.2d
-; CHECK-NEXT: ushr v24.4s, v0.4s, #16
-; CHECK-NEXT: add v5.4s, v0.4s, v2.4s
-; CHECK-NEXT: scvtf v19.2d, v19.2d
-; CHECK-NEXT: ushr v23.4s, v1.4s, #16
-; CHECK-NEXT: ushr v25.4s, v3.4s, #16
-; CHECK-NEXT: fcvtn v18.2s, v18.2d
-; CHECK-NEXT: fcvtn2 v6.4s, v20.2d
-; CHECK-NEXT: add v26.4s, v1.4s, v2.4s
-; CHECK-NEXT: fcvtn v17.2s, v17.2d
-; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
-; CHECK-NEXT: fcvtn v22.2s, v22.2d
-; CHECK-NEXT: fcmeq v20.4s, v0.4s, v0.4s
-; CHECK-NEXT: and v23.16b, v23.16b, v16.16b
-; CHECK-NEXT: orr v0.4s, #64, lsl #16
-; CHECK-NEXT: fcmeq v27.4s, v3.4s, v3.4s
-; CHECK-NEXT: fcvtn2 v18.4s, v7.2d
-; CHECK-NEXT: add v7.4s, v3.4s, v2.4s
-; CHECK-NEXT: orr v3.4s, #64, lsl #16
-; CHECK-NEXT: add v5.4s, v24.4s, v5.4s
-; CHECK-NEXT: and v24.16b, v25.16b, v16.16b
-; CHECK-NEXT: ushr v25.4s, v4.4s, #16
-; CHECK-NEXT: fcvtn2 v22.4s, v19.2d
-; CHECK-NEXT: add v19.4s, v23.4s, v26.4s
-; CHECK-NEXT: ushr v26.4s, v6.4s, #16
-; CHECK-NEXT: fcvtn2 v17.4s, v21.2d
-; CHECK-NEXT: fcmeq v21.4s, v1.4s, v1.4s
-; CHECK-NEXT: orr v1.4s, #64, lsl #16
-; CHECK-NEXT: and v23.16b, v25.16b, v16.16b
-; CHECK-NEXT: add v25.4s, v4.4s, v2.4s
-; CHECK-NEXT: add v7.4s, v24.4s, v7.4s
-; CHECK-NEXT: ushr v24.4s, v18.4s, #16
-; CHECK-NEXT: add v30.4s, v18.4s, v2.4s
-; CHECK-NEXT: bit v0.16b, v5.16b, v20.16b
-; CHECK-NEXT: ushr v28.4s, v22.4s, #16
-; CHECK-NEXT: add v31.4s, v22.4s, v2.4s
-; CHECK-NEXT: add v23.4s, v23.4s, v25.4s
-; CHECK-NEXT: and v25.16b, v26.16b, v16.16b
-; CHECK-NEXT: add v26.4s, v6.4s, v2.4s
-; CHECK-NEXT: ushr v29.4s, v17.4s, #16
-; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
-; CHECK-NEXT: add v2.4s, v17.4s, v2.4s
-; CHECK-NEXT: and v28.16b, v28.16b, v16.16b
-; CHECK-NEXT: bit v3.16b, v7.16b, v27.16b
-; CHECK-NEXT: bit v1.16b, v19.16b, v21.16b
-; CHECK-NEXT: add v25.4s, v25.4s, v26.4s
-; CHECK-NEXT: fcmeq v26.4s, v6.4s, v6.4s
-; CHECK-NEXT: orr v6.4s, #64, lsl #16
-; CHECK-NEXT: and v16.16b, v29.16b, v16.16b
-; CHECK-NEXT: add v24.4s, v24.4s, v30.4s
-; CHECK-NEXT: fcmeq v30.4s, v18.4s, v18.4s
-; CHECK-NEXT: add v28.4s, v28.4s, v31.4s
-; CHECK-NEXT: fcmeq v31.4s, v22.4s, v22.4s
-; CHECK-NEXT: fcmeq v29.4s, v4.4s, v4.4s
-; CHECK-NEXT: orr v4.4s, #64, lsl #16
-; CHECK-NEXT: orr v18.4s, #64, lsl #16
-; CHECK-NEXT: orr v22.4s, #64, lsl #16
-; CHECK-NEXT: mov v5.16b, v26.16b
-; CHECK-NEXT: add v2.4s, v16.4s, v2.4s
-; CHECK-NEXT: fcmeq v16.4s, v17.4s, v17.4s
-; CHECK-NEXT: orr v17.4s, #64, lsl #16
-; CHECK-NEXT: uzp2 v0.8h, v1.8h, v0.8h
-; CHECK-NEXT: mov v7.16b, v31.16b
-; CHECK-NEXT: bit v4.16b, v23.16b, v29.16b
-; CHECK-NEXT: bsl v5.16b, v25.16b, v6.16b
-; CHECK-NEXT: mov v6.16b, v30.16b
-; CHECK-NEXT: bsl v16.16b, v2.16b, v17.16b
-; CHECK-NEXT: bsl v7.16b, v28.16b, v22.16b
-; CHECK-NEXT: bsl v6.16b, v24.16b, v18.16b
-; CHECK-NEXT: uzp2 v1.8h, v4.8h, v3.8h
-; CHECK-NEXT: uzp2 v3.8h, v16.8h, v7.8h
-; CHECK-NEXT: uzp2 v2.8h, v6.8h, v5.8h
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <32 x i64> %a to <32 x bfloat>
- ret <32 x bfloat> %c
-}
-
-define <32 x bfloat> @utofp_v32i64_v32bf16(<32 x i64> %a) {
-; CHECK-LABEL: utofp_v32i64_v32bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ucvtf v17.2d, v2.2d
-; CHECK-NEXT: ucvtf v18.2d, v0.2d
-; CHECK-NEXT: ucvtf v19.2d, v3.2d
-; CHECK-NEXT: ucvtf v3.2d, v6.2d
-; CHECK-NEXT: ldp q21, q20, [sp, #32]
-; CHECK-NEXT: ucvtf v4.2d, v4.2d
-; CHECK-NEXT: ucvtf v6.2d, v7.2d
-; CHECK-NEXT: ucvtf v5.2d, v5.2d
-; CHECK-NEXT: ldp q24, q23, [sp, #64]
-; CHECK-NEXT: movi v16.4s, #1
-; CHECK-NEXT: fcvtn v0.2s, v17.2d
-; CHECK-NEXT: ucvtf v17.2d, v1.2d
-; CHECK-NEXT: fcvtn v1.2s, v18.2d
-; CHECK-NEXT: fcvtn v3.2s, v3.2d
-; CHECK-NEXT: ldp q18, q7, [sp]
-; CHECK-NEXT: ucvtf v21.2d, v21.2d
-; CHECK-NEXT: fcvtn v4.2s, v4.2d
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: ucvtf v20.2d, v20.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v19.2d
-; CHECK-NEXT: ldp q22, q19, [sp, #96]
-; CHECK-NEXT: fcvtn2 v1.4s, v17.2d
-; CHECK-NEXT: fcvtn2 v3.4s, v6.2d
-; CHECK-NEXT: ucvtf v18.2d, v18.2d
-; CHECK-NEXT: ucvtf v17.2d, v24.2d
-; CHECK-NEXT: fcvtn v6.2s, v21.2d
-; CHECK-NEXT: fcvtn2 v4.4s, v5.2d
-; CHECK-NEXT: ucvtf v22.2d, v22.2d
-; CHECK-NEXT: ucvtf v21.2d, v23.2d
-; CHECK-NEXT: ucvtf v7.2d, v7.2d
-; CHECK-NEXT: ushr v24.4s, v0.4s, #16
-; CHECK-NEXT: add v5.4s, v0.4s, v2.4s
-; CHECK-NEXT: ucvtf v19.2d, v19.2d
-; CHECK-NEXT: ushr v23.4s, v1.4s, #16
-; CHECK-NEXT: ushr v25.4s, v3.4s, #16
-; CHECK-NEXT: fcvtn v18.2s, v18.2d
-; CHECK-NEXT: fcvtn2 v6.4s, v20.2d
-; CHECK-NEXT: add v26.4s, v1.4s, v2.4s
-; CHECK-NEXT: fcvtn v17.2s, v17.2d
-; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
-; CHECK-NEXT: fcvtn v22.2s, v22.2d
-; CHECK-NEXT: fcmeq v20.4s, v0.4s, v0.4s
-; CHECK-NEXT: and v23.16b, v23.16b, v16.16b
-; CHECK-NEXT: orr v0.4s, #64, lsl #16
-; CHECK-NEXT: fcmeq v27.4s, v3.4s, v3.4s
-; CHECK-NEXT: fcvtn2 v18.4s, v7.2d
-; CHECK-NEXT: add v7.4s, v3.4s, v2.4s
-; CHECK-NEXT: orr v3.4s, #64, lsl #16
-; CHECK-NEXT: add v5.4s, v24.4s, v5.4s
-; CHECK-NEXT: and v24.16b, v25.16b, v16.16b
-; CHECK-NEXT: ushr v25.4s, v4.4s, #16
-; CHECK-NEXT: fcvtn2 v22.4s, v19.2d
-; CHECK-NEXT: add v19.4s, v23.4s, v26.4s
-; CHECK-NEXT: ushr v26.4s, v6.4s, #16
-; CHECK-NEXT: fcvtn2 v17.4s, v21.2d
-; CHECK-NEXT: fcmeq v21.4s, v1.4s, v1.4s
-; CHECK-NEXT: orr v1.4s, #64, lsl #16
-; CHECK-NEXT: and v23.16b, v25.16b, v16.16b
-; CHECK-NEXT: add v25.4s, v4.4s, v2.4s
-; CHECK-NEXT: add v7.4s, v24.4s, v7.4s
-; CHECK-NEXT: ushr v24.4s, v18.4s, #16
-; CHECK-NEXT: add v30.4s, v18.4s, v2.4s
-; CHECK-NEXT: bit v0.16b, v5.16b, v20.16b
-; CHECK-NEXT: ushr v28.4s, v22.4s, #16
-; CHECK-NEXT: add v31.4s, v22.4s, v2.4s
-; CHECK-NEXT: add v23.4s, v23.4s, v25.4s
-; CHECK-NEXT: and v25.16b, v26.16b, v16.16b
-; CHECK-NEXT: add v26.4s, v6.4s, v2.4s
-; CHECK-NEXT: ushr v29.4s, v17.4s, #16
-; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
-; CHECK-NEXT: add v2.4s, v17.4s, v2.4s
-; CHECK-NEXT: and v28.16b, v28.16b, v16.16b
-; CHECK-NEXT: bit v3.16b, v7.16b, v27.16b
-; CHECK-NEXT: bit v1.16b, v19.16b, v21.16b
-; CHECK-NEXT: add v25.4s, v25.4s, v26.4s
-; CHECK-NEXT: fcmeq v26.4s, v6.4s, v6.4s
-; CHECK-NEXT: orr v6.4s, #64, lsl #16
-; CHECK-NEXT: and v16.16b, v29.16b, v16.16b
-; CHECK-NEXT: add v24.4s, v24.4s, v30.4s
-; CHECK-NEXT: fcmeq v30.4s, v18.4s, v18.4s
-; CHECK-NEXT: add v28.4s, v28.4s, v31.4s
-; CHECK-NEXT: fcmeq v31.4s, v22.4s, v22.4s
-; CHECK-NEXT: fcmeq v29.4s, v4.4s, v4.4s
-; CHECK-NEXT: orr v4.4s, #64, lsl #16
-; CHECK-NEXT: orr v18.4s, #64, lsl #16
-; CHECK-NEXT: orr v22.4s, #64, lsl #16
-; CHECK-NEXT: mov v5.16b, v26.16b
-; CHECK-NEXT: add v2.4s, v16.4s, v2.4s
-; CHECK-NEXT: fcmeq v16.4s, v17.4s, v17.4s
-; CHECK-NEXT: orr v17.4s, #64, lsl #16
-; CHECK-NEXT: uzp2 v0.8h, v1.8h, v0.8h
-; CHECK-NEXT: mov v7.16b, v31.16b
-; CHECK-NEXT: bit v4.16b, v23.16b, v29.16b
-; CHECK-NEXT: bsl v5.16b, v25.16b, v6.16b
-; CHECK-NEXT: mov v6.16b, v30.16b
-; CHECK-NEXT: bsl v16.16b, v2.16b, v17.16b
-; CHECK-NEXT: bsl v7.16b, v28.16b, v22.16b
-; CHECK-NEXT: bsl v6.16b, v24.16b, v18.16b
-; CHECK-NEXT: uzp2 v1.8h, v4.8h, v3.8h
-; CHECK-NEXT: uzp2 v3.8h, v16.8h, v7.8h
-; CHECK-NEXT: uzp2 v2.8h, v6.8h, v5.8h
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <32 x i64> %a to <32 x bfloat>
- ret <32 x bfloat> %c
-}
-
-define <2 x bfloat> @stofp_v2i32_v2bf16(<2 x i32> %a) {
-; CHECK-LABEL: stofp_v2i32_v2bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <2 x i32> %a to <2 x bfloat>
- ret <2 x bfloat> %c
-}
-
-define <2 x bfloat> @utofp_v2i32_v2bf16(<2 x i32> %a) {
-; CHECK-LABEL: utofp_v2i32_v2bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <2 x i32> %a to <2 x bfloat>
- ret <2 x bfloat> %c
-}
-
-define <3 x bfloat> @stofp_v3i32_v3bf16(<3 x i32> %a) {
-; CHECK-LABEL: stofp_v3i32_v3bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <3 x i32> %a to <3 x bfloat>
- ret <3 x bfloat> %c
-}
-
-define <3 x bfloat> @utofp_v3i32_v3bf16(<3 x i32> %a) {
-; CHECK-LABEL: utofp_v3i32_v3bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <3 x i32> %a to <3 x bfloat>
- ret <3 x bfloat> %c
-}
-
-define <4 x bfloat> @stofp_v4i32_v4bf16(<4 x i32> %a) {
-; CHECK-LABEL: stofp_v4i32_v4bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <4 x i32> %a to <4 x bfloat>
- ret <4 x bfloat> %c
-}
-
-define <4 x bfloat> @utofp_v4i32_v4bf16(<4 x i32> %a) {
-; CHECK-LABEL: utofp_v4i32_v4bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <4 x i32> %a to <4 x bfloat>
- ret <4 x bfloat> %c
-}
-
-define <8 x bfloat> @stofp_v8i32_v8bf16(<8 x i32> %a) {
-; CHECK-LABEL: stofp_v8i32_v8bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: scvtf v1.4s, v1.4s
-; CHECK-NEXT: movi v5.4s, #127, msl #8
-; CHECK-NEXT: ushr v3.4s, v0.4s, #16
-; CHECK-NEXT: ushr v4.4s, v1.4s, #16
-; CHECK-NEXT: and v3.16b, v3.16b, v2.16b
-; CHECK-NEXT: and v2.16b, v4.16b, v2.16b
-; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
-; CHECK-NEXT: add v1.4s, v2.4s, v1.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v5.4s
-; CHECK-NEXT: addhn2 v0.8h, v1.4s, v5.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <8 x i32> %a to <8 x bfloat>
- ret <8 x bfloat> %c
-}
-
-define <8 x bfloat> @utofp_v8i32_v8bf16(<8 x i32> %a) {
-; CHECK-LABEL: utofp_v8i32_v8bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: ucvtf v1.4s, v1.4s
-; CHECK-NEXT: movi v5.4s, #127, msl #8
-; CHECK-NEXT: ushr v3.4s, v0.4s, #16
-; CHECK-NEXT: ushr v4.4s, v1.4s, #16
-; CHECK-NEXT: and v3.16b, v3.16b, v2.16b
-; CHECK-NEXT: and v2.16b, v4.16b, v2.16b
-; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
-; CHECK-NEXT: add v1.4s, v2.4s, v1.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v5.4s
-; CHECK-NEXT: addhn2 v0.8h, v1.4s, v5.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <8 x i32> %a to <8 x bfloat>
- ret <8 x bfloat> %c
-}
-
-define <16 x bfloat> @stofp_v16i32_v16bf16(<16 x i32> %a) {
-; CHECK-LABEL: stofp_v16i32_v16bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: scvtf v2.4s, v2.4s
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: scvtf v4.4s, v1.4s
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: scvtf v3.4s, v3.4s
-; CHECK-NEXT: movi v17.4s, #127, msl #8
-; CHECK-NEXT: ushr v5.4s, v0.4s, #16
-; CHECK-NEXT: ushr v6.4s, v2.4s, #16
-; CHECK-NEXT: ushr v7.4s, v4.4s, #16
-; CHECK-NEXT: ushr v16.4s, v3.4s, #16
-; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
-; CHECK-NEXT: and v6.16b, v6.16b, v1.16b
-; CHECK-NEXT: add v0.4s, v5.4s, v0.4s
-; CHECK-NEXT: add v2.4s, v6.4s, v2.4s
-; CHECK-NEXT: and v5.16b, v7.16b, v1.16b
-; CHECK-NEXT: and v6.16b, v16.16b, v1.16b
-; CHECK-NEXT: addhn v0.4h, v0.4s, v17.4s
-; CHECK-NEXT: addhn v1.4h, v2.4s, v17.4s
-; CHECK-NEXT: add v2.4s, v5.4s, v4.4s
-; CHECK-NEXT: add v3.4s, v6.4s, v3.4s
-; CHECK-NEXT: addhn2 v0.8h, v2.4s, v17.4s
-; CHECK-NEXT: addhn2 v1.8h, v3.4s, v17.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <16 x i32> %a to <16 x bfloat>
- ret <16 x bfloat> %c
-}
-
-define <16 x bfloat> @utofp_v16i32_v16bf16(<16 x i32> %a) {
-; CHECK-LABEL: utofp_v16i32_v16bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ucvtf v2.4s, v2.4s
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ucvtf v4.4s, v1.4s
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ucvtf v3.4s, v3.4s
-; CHECK-NEXT: movi v17.4s, #127, msl #8
-; CHECK-NEXT: ushr v5.4s, v0.4s, #16
-; CHECK-NEXT: ushr v6.4s, v2.4s, #16
-; CHECK-NEXT: ushr v7.4s, v4.4s, #16
-; CHECK-NEXT: ushr v16.4s, v3.4s, #16
-; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
-; CHECK-NEXT: and v6.16b, v6.16b, v1.16b
-; CHECK-NEXT: add v0.4s, v5.4s, v0.4s
-; CHECK-NEXT: add v2.4s, v6.4s, v2.4s
-; CHECK-NEXT: and v5.16b, v7.16b, v1.16b
-; CHECK-NEXT: and v6.16b, v16.16b, v1.16b
-; CHECK-NEXT: addhn v0.4h, v0.4s, v17.4s
-; CHECK-NEXT: addhn v1.4h, v2.4s, v17.4s
-; CHECK-NEXT: add v2.4s, v5.4s, v4.4s
-; CHECK-NEXT: add v3.4s, v6.4s, v3.4s
-; CHECK-NEXT: addhn2 v0.8h, v2.4s, v17.4s
-; CHECK-NEXT: addhn2 v1.8h, v3.4s, v17.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <16 x i32> %a to <16 x bfloat>
- ret <16 x bfloat> %c
-}
-
-define <32 x bfloat> @stofp_v32i32_v32bf16(<32 x i32> %a) {
-; CHECK-LABEL: stofp_v32i32_v32bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: scvtf v2.4s, v2.4s
-; CHECK-NEXT: scvtf v4.4s, v4.4s
-; CHECK-NEXT: scvtf v6.4s, v6.4s
-; CHECK-NEXT: movi v16.4s, #1
-; CHECK-NEXT: scvtf v1.4s, v1.4s
-; CHECK-NEXT: scvtf v17.4s, v3.4s
-; CHECK-NEXT: scvtf v5.4s, v5.4s
-; CHECK-NEXT: scvtf v7.4s, v7.4s
-; CHECK-NEXT: movi v21.4s, #127, msl #8
-; CHECK-NEXT: ushr v3.4s, v0.4s, #16
-; CHECK-NEXT: ushr v18.4s, v2.4s, #16
-; CHECK-NEXT: ushr v19.4s, v4.4s, #16
-; CHECK-NEXT: ushr v20.4s, v6.4s, #16
-; CHECK-NEXT: ushr v22.4s, v1.4s, #16
-; CHECK-NEXT: ushr v23.4s, v17.4s, #16
-; CHECK-NEXT: ushr v24.4s, v5.4s, #16
-; CHECK-NEXT: ushr v25.4s, v7.4s, #16
-; CHECK-NEXT: and v3.16b, v3.16b, v16.16b
-; CHECK-NEXT: and v18.16b, v18.16b, v16.16b
-; CHECK-NEXT: and v19.16b, v19.16b, v16.16b
-; CHECK-NEXT: and v20.16b, v20.16b, v16.16b
-; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
-; CHECK-NEXT: and v3.16b, v22.16b, v16.16b
-; CHECK-NEXT: add v2.4s, v18.4s, v2.4s
-; CHECK-NEXT: add v4.4s, v19.4s, v4.4s
-; CHECK-NEXT: add v6.4s, v20.4s, v6.4s
-; CHECK-NEXT: and v18.16b, v23.16b, v16.16b
-; CHECK-NEXT: and v19.16b, v24.16b, v16.16b
-; CHECK-NEXT: and v16.16b, v25.16b, v16.16b
-; CHECK-NEXT: add v20.4s, v3.4s, v1.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v21.4s
-; CHECK-NEXT: addhn v1.4h, v2.4s, v21.4s
-; CHECK-NEXT: addhn v2.4h, v4.4s, v21.4s
-; CHECK-NEXT: addhn v3.4h, v6.4s, v21.4s
-; CHECK-NEXT: add v4.4s, v18.4s, v17.4s
-; CHECK-NEXT: add v5.4s, v19.4s, v5.4s
-; CHECK-NEXT: add v6.4s, v16.4s, v7.4s
-; CHECK-NEXT: addhn2 v0.8h, v20.4s, v21.4s
-; CHECK-NEXT: addhn2 v1.8h, v4.4s, v21.4s
-; CHECK-NEXT: addhn2 v2.8h, v5.4s, v21.4s
-; CHECK-NEXT: addhn2 v3.8h, v6.4s, v21.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <32 x i32> %a to <32 x bfloat>
- ret <32 x bfloat> %c
-}
-
-define <32 x bfloat> @utofp_v32i32_v32bf16(<32 x i32> %a) {
-; CHECK-LABEL: utofp_v32i32_v32bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ucvtf v2.4s, v2.4s
-; CHECK-NEXT: ucvtf v4.4s, v4.4s
-; CHECK-NEXT: ucvtf v6.4s, v6.4s
-; CHECK-NEXT: movi v16.4s, #1
-; CHECK-NEXT: ucvtf v1.4s, v1.4s
-; CHECK-NEXT: ucvtf v17.4s, v3.4s
-; CHECK-NEXT: ucvtf v5.4s, v5.4s
-; CHECK-NEXT: ucvtf v7.4s, v7.4s
-; CHECK-NEXT: movi v21.4s, #127, msl #8
-; CHECK-NEXT: ushr v3.4s, v0.4s, #16
-; CHECK-NEXT: ushr v18.4s, v2.4s, #16
-; CHECK-NEXT: ushr v19.4s, v4.4s, #16
-; CHECK-NEXT: ushr v20.4s, v6.4s, #16
-; CHECK-NEXT: ushr v22.4s, v1.4s, #16
-; CHECK-NEXT: ushr v23.4s, v17.4s, #16
-; CHECK-NEXT: ushr v24.4s, v5.4s, #16
-; CHECK-NEXT: ushr v25.4s, v7.4s, #16
-; CHECK-NEXT: and v3.16b, v3.16b, v16.16b
-; CHECK-NEXT: and v18.16b, v18.16b, v16.16b
-; CHECK-NEXT: and v19.16b, v19.16b, v16.16b
-; CHECK-NEXT: and v20.16b, v20.16b, v16.16b
-; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
-; CHECK-NEXT: and v3.16b, v22.16b, v16.16b
-; CHECK-NEXT: add v2.4s, v18.4s, v2.4s
-; CHECK-NEXT: add v4.4s, v19.4s, v4.4s
-; CHECK-NEXT: add v6.4s, v20.4s, v6.4s
-; CHECK-NEXT: and v18.16b, v23.16b, v16.16b
-; CHECK-NEXT: and v19.16b, v24.16b, v16.16b
-; CHECK-NEXT: and v16.16b, v25.16b, v16.16b
-; CHECK-NEXT: add v20.4s, v3.4s, v1.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v21.4s
-; CHECK-NEXT: addhn v1.4h, v2.4s, v21.4s
-; CHECK-NEXT: addhn v2.4h, v4.4s, v21.4s
-; CHECK-NEXT: addhn v3.4h, v6.4s, v21.4s
-; CHECK-NEXT: add v4.4s, v18.4s, v17.4s
-; CHECK-NEXT: add v5.4s, v19.4s, v5.4s
-; CHECK-NEXT: add v6.4s, v16.4s, v7.4s
-; CHECK-NEXT: addhn2 v0.8h, v20.4s, v21.4s
-; CHECK-NEXT: addhn2 v1.8h, v4.4s, v21.4s
-; CHECK-NEXT: addhn2 v2.8h, v5.4s, v21.4s
-; CHECK-NEXT: addhn2 v3.8h, v6.4s, v21.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <32 x i32> %a to <32 x bfloat>
- ret <32 x bfloat> %c
-}
-
-define <2 x bfloat> @stofp_v2i16_v2bf16(<2 x i16> %a) {
-; CHECK-LABEL: stofp_v2i16_v2bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: uzp1 v0.4h, v0.4h, v0.4h
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <2 x i16> %a to <2 x bfloat>
- ret <2 x bfloat> %c
-}
-
-define <2 x bfloat> @utofp_v2i16_v2bf16(<2 x i16> %a) {
-; CHECK-LABEL: utofp_v2i16_v2bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: uzp1 v0.4h, v0.4h, v0.4h
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <2 x i16> %a to <2 x bfloat>
- ret <2 x bfloat> %c
-}
-
-define <3 x bfloat> @stofp_v3i16_v3bf16(<3 x i16> %a) {
-; CHECK-LABEL: stofp_v3i16_v3bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <3 x i16> %a to <3 x bfloat>
- ret <3 x bfloat> %c
-}
-
-define <3 x bfloat> @utofp_v3i16_v3bf16(<3 x i16> %a) {
-; CHECK-LABEL: utofp_v3i16_v3bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <3 x i16> %a to <3 x bfloat>
- ret <3 x bfloat> %c
-}
-
-define <4 x bfloat> @stofp_v4i16_v4bf16(<4 x i16> %a) {
-; CHECK-LABEL: stofp_v4i16_v4bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <4 x i16> %a to <4 x bfloat>
- ret <4 x bfloat> %c
-}
-
-define <4 x bfloat> @utofp_v4i16_v4bf16(<4 x i16> %a) {
-; CHECK-LABEL: utofp_v4i16_v4bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <4 x i16> %a to <4 x bfloat>
- ret <4 x bfloat> %c
-}
-
-define <8 x bfloat> @stofp_v8i16_v8bf16(<8 x i16> %a) {
-; CHECK-LABEL: stofp_v8i16_v8bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sshll v2.4s, v0.4h, #0
-; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: movi v4.4s, #127, msl #8
-; CHECK-NEXT: scvtf v2.4s, v2.4s
-; CHECK-NEXT: scvtf v3.4s, v0.4s
-; CHECK-NEXT: ushr v0.4s, v2.4s, #16
-; CHECK-NEXT: ushr v5.4s, v3.4s, #16
-; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
-; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
-; CHECK-NEXT: add v0.4s, v0.4s, v4.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v4.4s
-; CHECK-NEXT: addhn v0.4h, v2.4s, v0.4s
-; CHECK-NEXT: addhn2 v0.8h, v3.4s, v1.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <8 x i16> %a to <8 x bfloat>
- ret <8 x bfloat> %c
-}
-
-define <8 x bfloat> @utofp_v8i16_v8bf16(<8 x i16> %a) {
-; CHECK-LABEL: utofp_v8i16_v8bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ushll v2.4s, v0.4h, #0
-; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: movi v4.4s, #127, msl #8
-; CHECK-NEXT: ucvtf v2.4s, v2.4s
-; CHECK-NEXT: ucvtf v3.4s, v0.4s
-; CHECK-NEXT: ushr v0.4s, v2.4s, #16
-; CHECK-NEXT: ushr v5.4s, v3.4s, #16
-; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
-; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
-; CHECK-NEXT: add v0.4s, v0.4s, v4.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v4.4s
-; CHECK-NEXT: addhn v0.4h, v2.4s, v0.4s
-; CHECK-NEXT: addhn2 v0.8h, v3.4s, v1.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <8 x i16> %a to <8 x bfloat>
- ret <8 x bfloat> %c
-}
-
-define <16 x bfloat> @stofp_v16i16_v16bf16(<16 x i16> %a) {
-; CHECK-LABEL: stofp_v16i16_v16bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sshll v3.4s, v0.4h, #0
-; CHECK-NEXT: sshll v4.4s, v1.4h, #0
-; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: sshll2 v1.4s, v1.8h, #0
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: movi v7.4s, #127, msl #8
-; CHECK-NEXT: scvtf v3.4s, v3.4s
-; CHECK-NEXT: scvtf v4.4s, v4.4s
-; CHECK-NEXT: scvtf v5.4s, v0.4s
-; CHECK-NEXT: scvtf v6.4s, v1.4s
-; CHECK-NEXT: ushr v0.4s, v3.4s, #16
-; CHECK-NEXT: ushr v1.4s, v4.4s, #16
-; CHECK-NEXT: ushr v16.4s, v5.4s, #16
-; CHECK-NEXT: ushr v17.4s, v6.4s, #16
-; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
-; CHECK-NEXT: and v16.16b, v16.16b, v2.16b
-; CHECK-NEXT: and v2.16b, v17.16b, v2.16b
-; CHECK-NEXT: add v0.4s, v0.4s, v7.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v7.4s
-; CHECK-NEXT: add v2.4s, v2.4s, v7.4s
-; CHECK-NEXT: addhn v0.4h, v3.4s, v0.4s
-; CHECK-NEXT: addhn v1.4h, v4.4s, v1.4s
-; CHECK-NEXT: add v3.4s, v16.4s, v7.4s
-; CHECK-NEXT: addhn2 v0.8h, v5.4s, v3.4s
-; CHECK-NEXT: addhn2 v1.8h, v6.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <16 x i16> %a to <16 x bfloat>
- ret <16 x bfloat> %c
-}
-
-define <16 x bfloat> @utofp_v16i16_v16bf16(<16 x i16> %a) {
-; CHECK-LABEL: utofp_v16i16_v16bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ushll v3.4s, v0.4h, #0
-; CHECK-NEXT: ushll v4.4s, v1.4h, #0
-; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: ushll2 v1.4s, v1.8h, #0
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: movi v7.4s, #127, msl #8
-; CHECK-NEXT: ucvtf v3.4s, v3.4s
-; CHECK-NEXT: ucvtf v4.4s, v4.4s
-; CHECK-NEXT: ucvtf v5.4s, v0.4s
-; CHECK-NEXT: ucvtf v6.4s, v1.4s
-; CHECK-NEXT: ushr v0.4s, v3.4s, #16
-; CHECK-NEXT: ushr v1.4s, v4.4s, #16
-; CHECK-NEXT: ushr v16.4s, v5.4s, #16
-; CHECK-NEXT: ushr v17.4s, v6.4s, #16
-; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
-; CHECK-NEXT: and v16.16b, v16.16b, v2.16b
-; CHECK-NEXT: and v2.16b, v17.16b, v2.16b
-; CHECK-NEXT: add v0.4s, v0.4s, v7.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v7.4s
-; CHECK-NEXT: add v2.4s, v2.4s, v7.4s
-; CHECK-NEXT: addhn v0.4h, v3.4s, v0.4s
-; CHECK-NEXT: addhn v1.4h, v4.4s, v1.4s
-; CHECK-NEXT: add v3.4s, v16.4s, v7.4s
-; CHECK-NEXT: addhn2 v0.8h, v5.4s, v3.4s
-; CHECK-NEXT: addhn2 v1.8h, v6.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <16 x i16> %a to <16 x bfloat>
- ret <16 x bfloat> %c
-}
-
-define <32 x bfloat> @stofp_v32i16_v32bf16(<32 x i16> %a) {
-; CHECK-LABEL: stofp_v32i16_v32bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sshll v4.4s, v1.4h, #0
-; CHECK-NEXT: sshll v5.4s, v0.4h, #0
-; CHECK-NEXT: sshll v6.4s, v2.4h, #0
-; CHECK-NEXT: sshll v7.4s, v3.4h, #0
-; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: sshll2 v1.4s, v1.8h, #0
-; CHECK-NEXT: sshll2 v2.4s, v2.8h, #0
-; CHECK-NEXT: sshll2 v3.4s, v3.8h, #0
-; CHECK-NEXT: movi v16.4s, #1
-; CHECK-NEXT: scvtf v5.4s, v5.4s
-; CHECK-NEXT: scvtf v4.4s, v4.4s
-; CHECK-NEXT: scvtf v6.4s, v6.4s
-; CHECK-NEXT: scvtf v7.4s, v7.4s
-; CHECK-NEXT: scvtf v17.4s, v0.4s
-; CHECK-NEXT: scvtf v18.4s, v1.4s
-; CHECK-NEXT: scvtf v19.4s, v2.4s
-; CHECK-NEXT: scvtf v20.4s, v3.4s
-; CHECK-NEXT: movi v21.4s, #127, msl #8
-; CHECK-NEXT: ushr v0.4s, v5.4s, #16
-; CHECK-NEXT: ushr v1.4s, v4.4s, #16
-; CHECK-NEXT: ushr v2.4s, v6.4s, #16
-; CHECK-NEXT: ushr v3.4s, v7.4s, #16
-; CHECK-NEXT: ushr v22.4s, v17.4s, #16
-; CHECK-NEXT: ushr v23.4s, v18.4s, #16
-; CHECK-NEXT: ushr v24.4s, v19.4s, #16
-; CHECK-NEXT: ushr v25.4s, v20.4s, #16
-; CHECK-NEXT: and v0.16b, v0.16b, v16.16b
-; CHECK-NEXT: and v1.16b, v1.16b, v16.16b
-; CHECK-NEXT: and v2.16b, v2.16b, v16.16b
-; CHECK-NEXT: and v3.16b, v3.16b, v16.16b
-; CHECK-NEXT: and v22.16b, v22.16b, v16.16b
-; CHECK-NEXT: and v23.16b, v23.16b, v16.16b
-; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
-; CHECK-NEXT: and v16.16b, v25.16b, v16.16b
-; CHECK-NEXT: add v0.4s, v0.4s, v21.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v21.4s
-; CHECK-NEXT: add v2.4s, v2.4s, v21.4s
-; CHECK-NEXT: add v3.4s, v3.4s, v21.4s
-; CHECK-NEXT: addhn v0.4h, v5.4s, v0.4s
-; CHECK-NEXT: addhn v1.4h, v4.4s, v1.4s
-; CHECK-NEXT: addhn v2.4h, v6.4s, v2.4s
-; CHECK-NEXT: addhn v3.4h, v7.4s, v3.4s
-; CHECK-NEXT: add v4.4s, v22.4s, v21.4s
-; CHECK-NEXT: add v5.4s, v23.4s, v21.4s
-; CHECK-NEXT: add v6.4s, v24.4s, v21.4s
-; CHECK-NEXT: add v7.4s, v16.4s, v21.4s
-; CHECK-NEXT: addhn2 v0.8h, v17.4s, v4.4s
-; CHECK-NEXT: addhn2 v1.8h, v18.4s, v5.4s
-; CHECK-NEXT: addhn2 v2.8h, v19.4s, v6.4s
-; CHECK-NEXT: addhn2 v3.8h, v20.4s, v7.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <32 x i16> %a to <32 x bfloat>
- ret <32 x bfloat> %c
-}
-
-define <32 x bfloat> @utofp_v32i16_v32bf16(<32 x i16> %a) {
-; CHECK-LABEL: utofp_v32i16_v32bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ushll v4.4s, v1.4h, #0
-; CHECK-NEXT: ushll v5.4s, v0.4h, #0
-; CHECK-NEXT: ushll v6.4s, v2.4h, #0
-; CHECK-NEXT: ushll v7.4s, v3.4h, #0
-; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: ushll2 v1.4s, v1.8h, #0
-; CHECK-NEXT: ushll2 v2.4s, v2.8h, #0
-; CHECK-NEXT: ushll2 v3.4s, v3.8h, #0
-; CHECK-NEXT: movi v16.4s, #1
-; CHECK-NEXT: ucvtf v5.4s, v5.4s
-; CHECK-NEXT: ucvtf v4.4s, v4.4s
-; CHECK-NEXT: ucvtf v6.4s, v6.4s
-; CHECK-NEXT: ucvtf v7.4s, v7.4s
-; CHECK-NEXT: ucvtf v17.4s, v0.4s
-; CHECK-NEXT: ucvtf v18.4s, v1.4s
-; CHECK-NEXT: ucvtf v19.4s, v2.4s
-; CHECK-NEXT: ucvtf v20.4s, v3.4s
-; CHECK-NEXT: movi v21.4s, #127, msl #8
-; CHECK-NEXT: ushr v0.4s, v5.4s, #16
-; CHECK-NEXT: ushr v1.4s, v4.4s, #16
-; CHECK-NEXT: ushr v2.4s, v6.4s, #16
-; CHECK-NEXT: ushr v3.4s, v7.4s, #16
-; CHECK-NEXT: ushr v22.4s, v17.4s, #16
-; CHECK-NEXT: ushr v23.4s, v18.4s, #16
-; CHECK-NEXT: ushr v24.4s, v19.4s, #16
-; CHECK-NEXT: ushr v25.4s, v20.4s, #16
-; CHECK-NEXT: and v0.16b, v0.16b, v16.16b
-; CHECK-NEXT: and v1.16b, v1.16b, v16.16b
-; CHECK-NEXT: and v2.16b, v2.16b, v16.16b
-; CHECK-NEXT: and v3.16b, v3.16b, v16.16b
-; CHECK-NEXT: and v22.16b, v22.16b, v16.16b
-; CHECK-NEXT: and v23.16b, v23.16b, v16.16b
-; CHECK-NEXT: and v24.16b, v24.16b, v16.16b
-; CHECK-NEXT: and v16.16b, v25.16b, v16.16b
-; CHECK-NEXT: add v0.4s, v0.4s, v21.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v21.4s
-; CHECK-NEXT: add v2.4s, v2.4s, v21.4s
-; CHECK-NEXT: add v3.4s, v3.4s, v21.4s
-; CHECK-NEXT: addhn v0.4h, v5.4s, v0.4s
-; CHECK-NEXT: addhn v1.4h, v4.4s, v1.4s
-; CHECK-NEXT: addhn v2.4h, v6.4s, v2.4s
-; CHECK-NEXT: addhn v3.4h, v7.4s, v3.4s
-; CHECK-NEXT: add v4.4s, v22.4s, v21.4s
-; CHECK-NEXT: add v5.4s, v23.4s, v21.4s
-; CHECK-NEXT: add v6.4s, v24.4s, v21.4s
-; CHECK-NEXT: add v7.4s, v16.4s, v21.4s
-; CHECK-NEXT: addhn2 v0.8h, v17.4s, v4.4s
-; CHECK-NEXT: addhn2 v1.8h, v18.4s, v5.4s
-; CHECK-NEXT: addhn2 v2.8h, v19.4s, v6.4s
-; CHECK-NEXT: addhn2 v3.8h, v20.4s, v7.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <32 x i16> %a to <32 x bfloat>
- ret <32 x bfloat> %c
-}
-
-define <2 x bfloat> @stofp_v2i8_v2bf16(<2 x i8> %a) {
-; CHECK-LABEL: stofp_v2i8_v2bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: mov w9, v0.s[1]
-; CHECK-NEXT: fmov w10, s0
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: sxtb w10, w10
-; CHECK-NEXT: sxtb w9, w9
-; CHECK-NEXT: scvtf s1, w10
-; CHECK-NEXT: scvtf s0, w9
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ubfx w12, w10, #16, #1
-; CHECK-NEXT: ubfx w11, w9, #16, #1
-; CHECK-NEXT: add w9, w9, w8
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: add w8, w12, w8
-; CHECK-NEXT: add w9, w11, w9
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: lsr w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: mov v0.h[1], v1.h[0]
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <2 x i8> %a to <2 x bfloat>
- ret <2 x bfloat> %c
-}
-
-define <2 x bfloat> @utofp_v2i8_v2bf16(<2 x i8> %a) {
-; CHECK-LABEL: utofp_v2i8_v2bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: mov w9, v0.s[1]
-; CHECK-NEXT: fmov w10, s0
-; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: and w10, w10, #0xff
-; CHECK-NEXT: and w9, w9, #0xff
-; CHECK-NEXT: ucvtf s1, w10
-; CHECK-NEXT: ucvtf s0, w9
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ubfx w12, w10, #16, #1
-; CHECK-NEXT: ubfx w11, w9, #16, #1
-; CHECK-NEXT: add w9, w9, w8
-; CHECK-NEXT: add w8, w10, w8
-; CHECK-NEXT: add w8, w12, w8
-; CHECK-NEXT: add w9, w11, w9
-; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: lsr w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: mov v0.h[1], v1.h[0]
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <2 x i8> %a to <2 x bfloat>
- ret <2 x bfloat> %c
-}
-
-define <3 x bfloat> @stofp_v3i8_v3bf16(<3 x i8> %a) {
-; CHECK-LABEL: stofp_v3i8_v3bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: mov v0.h[1], w1
-; CHECK-NEXT: mov v0.h[2], w2
-; CHECK-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <3 x i8> %a to <3 x bfloat>
- ret <3 x bfloat> %c
-}
-
-define <3 x bfloat> @utofp_v3i8_v3bf16(<3 x i8> %a) {
-; CHECK-LABEL: utofp_v3i8_v3bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: mov v0.h[1], w1
-; CHECK-NEXT: mov v0.h[2], w2
-; CHECK-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <3 x i8> %a to <3 x bfloat>
- ret <3 x bfloat> %c
-}
-
-define <4 x bfloat> @stofp_v4i8_v4bf16(<4 x i8> %a) {
-; CHECK-LABEL: stofp_v4i8_v4bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <4 x i8> %a to <4 x bfloat>
- ret <4 x bfloat> %c
-}
-
-define <4 x bfloat> @utofp_v4i8_v4bf16(<4 x i8> %a) {
-; CHECK-LABEL: utofp_v4i8_v4bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ushr v2.4s, v0.4s, #16
-; CHECK-NEXT: and v1.16b, v2.16b, v1.16b
-; CHECK-NEXT: movi v2.4s, #127, msl #8
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v0.4s, v2.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <4 x i8> %a to <4 x bfloat>
- ret <4 x bfloat> %c
-}
-
-define <8 x bfloat> @stofp_v8i8_v8bf16(<8 x i8> %a) {
-; CHECK-LABEL: stofp_v8i8_v8bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: movi v4.4s, #127, msl #8
-; CHECK-NEXT: sshll v2.4s, v0.4h, #0
-; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: scvtf v2.4s, v2.4s
-; CHECK-NEXT: scvtf v3.4s, v0.4s
-; CHECK-NEXT: ushr v0.4s, v2.4s, #16
-; CHECK-NEXT: ushr v5.4s, v3.4s, #16
-; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
-; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
-; CHECK-NEXT: add v0.4s, v0.4s, v4.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v4.4s
-; CHECK-NEXT: addhn v0.4h, v2.4s, v0.4s
-; CHECK-NEXT: addhn2 v0.8h, v3.4s, v1.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <8 x i8> %a to <8 x bfloat>
- ret <8 x bfloat> %c
-}
-
-define <8 x bfloat> @utofp_v8i8_v8bf16(<8 x i8> %a) {
-; CHECK-LABEL: utofp_v8i8_v8bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: movi v4.4s, #127, msl #8
-; CHECK-NEXT: ushll v2.4s, v0.4h, #0
-; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: ucvtf v2.4s, v2.4s
-; CHECK-NEXT: ucvtf v3.4s, v0.4s
-; CHECK-NEXT: ushr v0.4s, v2.4s, #16
-; CHECK-NEXT: ushr v5.4s, v3.4s, #16
-; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
-; CHECK-NEXT: and v1.16b, v5.16b, v1.16b
-; CHECK-NEXT: add v0.4s, v0.4s, v4.4s
-; CHECK-NEXT: add v1.4s, v1.4s, v4.4s
-; CHECK-NEXT: addhn v0.4h, v2.4s, v0.4s
-; CHECK-NEXT: addhn2 v0.8h, v3.4s, v1.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <8 x i8> %a to <8 x bfloat>
- ret <8 x bfloat> %c
-}
-
-define <16 x bfloat> @stofp_v16i8_v16bf16(<16 x i8> %a) {
-; CHECK-LABEL: stofp_v16i8_v16bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sshll2 v2.8h, v0.16b, #0
-; CHECK-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: movi v7.4s, #127, msl #8
-; CHECK-NEXT: sshll v3.4s, v2.4h, #0
-; CHECK-NEXT: sshll v4.4s, v0.4h, #0
-; CHECK-NEXT: sshll2 v2.4s, v2.8h, #0
-; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: scvtf v3.4s, v3.4s
-; CHECK-NEXT: scvtf v4.4s, v4.4s
-; CHECK-NEXT: scvtf v2.4s, v2.4s
-; CHECK-NEXT: scvtf v6.4s, v0.4s
-; CHECK-NEXT: ushr v5.4s, v3.4s, #16
-; CHECK-NEXT: ushr v0.4s, v4.4s, #16
-; CHECK-NEXT: ushr v16.4s, v2.4s, #16
-; CHECK-NEXT: ushr v17.4s, v6.4s, #16
-; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
-; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
-; CHECK-NEXT: and v16.16b, v16.16b, v1.16b
-; CHECK-NEXT: and v17.16b, v17.16b, v1.16b
-; CHECK-NEXT: add v5.4s, v5.4s, v7.4s
-; CHECK-NEXT: add v0.4s, v0.4s, v7.4s
-; CHECK-NEXT: addhn v1.4h, v3.4s, v5.4s
-; CHECK-NEXT: addhn v0.4h, v4.4s, v0.4s
-; CHECK-NEXT: add v3.4s, v16.4s, v7.4s
-; CHECK-NEXT: add v4.4s, v17.4s, v7.4s
-; CHECK-NEXT: addhn2 v1.8h, v2.4s, v3.4s
-; CHECK-NEXT: addhn2 v0.8h, v6.4s, v4.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <16 x i8> %a to <16 x bfloat>
- ret <16 x bfloat> %c
-}
-
-define <16 x bfloat> @utofp_v16i8_v16bf16(<16 x i8> %a) {
-; CHECK-LABEL: utofp_v16i8_v16bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ushll2 v2.8h, v0.16b, #0
-; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: movi v7.4s, #127, msl #8
-; CHECK-NEXT: ushll v3.4s, v2.4h, #0
-; CHECK-NEXT: ushll v4.4s, v0.4h, #0
-; CHECK-NEXT: ushll2 v2.4s, v2.8h, #0
-; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: ucvtf v3.4s, v3.4s
-; CHECK-NEXT: ucvtf v4.4s, v4.4s
-; CHECK-NEXT: ucvtf v2.4s, v2.4s
-; CHECK-NEXT: ucvtf v6.4s, v0.4s
-; CHECK-NEXT: ushr v5.4s, v3.4s, #16
-; CHECK-NEXT: ushr v0.4s, v4.4s, #16
-; CHECK-NEXT: ushr v16.4s, v2.4s, #16
-; CHECK-NEXT: ushr v17.4s, v6.4s, #16
-; CHECK-NEXT: and v5.16b, v5.16b, v1.16b
-; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
-; CHECK-NEXT: and v16.16b, v16.16b, v1.16b
-; CHECK-NEXT: and v17.16b, v17.16b, v1.16b
-; CHECK-NEXT: add v5.4s, v5.4s, v7.4s
-; CHECK-NEXT: add v0.4s, v0.4s, v7.4s
-; CHECK-NEXT: addhn v1.4h, v3.4s, v5.4s
-; CHECK-NEXT: addhn v0.4h, v4.4s, v0.4s
-; CHECK-NEXT: add v3.4s, v16.4s, v7.4s
-; CHECK-NEXT: add v4.4s, v17.4s, v7.4s
-; CHECK-NEXT: addhn2 v1.8h, v2.4s, v3.4s
-; CHECK-NEXT: addhn2 v0.8h, v6.4s, v4.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <16 x i8> %a to <16 x bfloat>
- ret <16 x bfloat> %c
-}
-
-define <32 x bfloat> @stofp_v32i8_v32bf16(<32 x i8> %a) {
-; CHECK-LABEL: stofp_v32i8_v32bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sshll2 v3.8h, v0.16b, #0
-; CHECK-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-NEXT: sshll2 v4.8h, v1.16b, #0
-; CHECK-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: movi v21.4s, #127, msl #8
-; CHECK-NEXT: sshll v5.4s, v3.4h, #0
-; CHECK-NEXT: sshll v6.4s, v0.4h, #0
-; CHECK-NEXT: sshll v7.4s, v4.4h, #0
-; CHECK-NEXT: sshll v16.4s, v1.4h, #0
-; CHECK-NEXT: sshll2 v3.4s, v3.8h, #0
-; CHECK-NEXT: sshll2 v4.4s, v4.8h, #0
-; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: sshll2 v1.4s, v1.8h, #0
-; CHECK-NEXT: scvtf v5.4s, v5.4s
-; CHECK-NEXT: scvtf v6.4s, v6.4s
-; CHECK-NEXT: scvtf v7.4s, v7.4s
-; CHECK-NEXT: scvtf v16.4s, v16.4s
-; CHECK-NEXT: scvtf v17.4s, v3.4s
-; CHECK-NEXT: scvtf v4.4s, v4.4s
-; CHECK-NEXT: scvtf v18.4s, v0.4s
-; CHECK-NEXT: scvtf v19.4s, v1.4s
-; CHECK-NEXT: ushr v0.4s, v5.4s, #16
-; CHECK-NEXT: ushr v3.4s, v6.4s, #16
-; CHECK-NEXT: ushr v1.4s, v7.4s, #16
-; CHECK-NEXT: ushr v20.4s, v16.4s, #16
-; CHECK-NEXT: ushr v23.4s, v17.4s, #16
-; CHECK-NEXT: ushr v24.4s, v4.4s, #16
-; CHECK-NEXT: ushr v22.4s, v18.4s, #16
-; CHECK-NEXT: ushr v25.4s, v19.4s, #16
-; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT: and v3.16b, v3.16b, v2.16b
-; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
-; CHECK-NEXT: and v20.16b, v20.16b, v2.16b
-; CHECK-NEXT: and v23.16b, v23.16b, v2.16b
-; CHECK-NEXT: and v24.16b, v24.16b, v2.16b
-; CHECK-NEXT: and v22.16b, v22.16b, v2.16b
-; CHECK-NEXT: and v25.16b, v25.16b, v2.16b
-; CHECK-NEXT: add v0.4s, v0.4s, v21.4s
-; CHECK-NEXT: add v3.4s, v3.4s, v21.4s
-; CHECK-NEXT: add v26.4s, v1.4s, v21.4s
-; CHECK-NEXT: add v20.4s, v20.4s, v21.4s
-; CHECK-NEXT: addhn v1.4h, v5.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v6.4s, v3.4s
-; CHECK-NEXT: addhn v3.4h, v7.4s, v26.4s
-; CHECK-NEXT: addhn v2.4h, v16.4s, v20.4s
-; CHECK-NEXT: add v5.4s, v22.4s, v21.4s
-; CHECK-NEXT: add v6.4s, v23.4s, v21.4s
-; CHECK-NEXT: add v7.4s, v24.4s, v21.4s
-; CHECK-NEXT: add v16.4s, v25.4s, v21.4s
-; CHECK-NEXT: addhn2 v0.8h, v18.4s, v5.4s
-; CHECK-NEXT: addhn2 v1.8h, v17.4s, v6.4s
-; CHECK-NEXT: addhn2 v3.8h, v4.4s, v7.4s
-; CHECK-NEXT: addhn2 v2.8h, v19.4s, v16.4s
-; CHECK-NEXT: ret
-entry:
- %c = sitofp <32 x i8> %a to <32 x bfloat>
- ret <32 x bfloat> %c
-}
-
-define <32 x bfloat> @utofp_v32i8_v32bf16(<32 x i8> %a) {
-; CHECK-LABEL: utofp_v32i8_v32bf16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ushll2 v3.8h, v0.16b, #0
-; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: ushll2 v4.8h, v1.16b, #0
-; CHECK-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: movi v21.4s, #127, msl #8
-; CHECK-NEXT: ushll v5.4s, v3.4h, #0
-; CHECK-NEXT: ushll v6.4s, v0.4h, #0
-; CHECK-NEXT: ushll v7.4s, v4.4h, #0
-; CHECK-NEXT: ushll v16.4s, v1.4h, #0
-; CHECK-NEXT: ushll2 v3.4s, v3.8h, #0
-; CHECK-NEXT: ushll2 v4.4s, v4.8h, #0
-; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: ushll2 v1.4s, v1.8h, #0
-; CHECK-NEXT: ucvtf v5.4s, v5.4s
-; CHECK-NEXT: ucvtf v6.4s, v6.4s
-; CHECK-NEXT: ucvtf v7.4s, v7.4s
-; CHECK-NEXT: ucvtf v16.4s, v16.4s
-; CHECK-NEXT: ucvtf v17.4s, v3.4s
-; CHECK-NEXT: ucvtf v4.4s, v4.4s
-; CHECK-NEXT: ucvtf v18.4s, v0.4s
-; CHECK-NEXT: ucvtf v19.4s, v1.4s
-; CHECK-NEXT: ushr v0.4s, v5.4s, #16
-; CHECK-NEXT: ushr v3.4s, v6.4s, #16
-; CHECK-NEXT: ushr v1.4s, v7.4s, #16
-; CHECK-NEXT: ushr v20.4s, v16.4s, #16
-; CHECK-NEXT: ushr v23.4s, v17.4s, #16
-; CHECK-NEXT: ushr v24.4s, v4.4s, #16
-; CHECK-NEXT: ushr v22.4s, v18.4s, #16
-; CHECK-NEXT: ushr v25.4s, v19.4s, #16
-; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT: and v3.16b, v3.16b, v2.16b
-; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
-; CHECK-NEXT: and v20.16b, v20.16b, v2.16b
-; CHECK-NEXT: and v23.16b, v23.16b, v2.16b
-; CHECK-NEXT: and v24.16b, v24.16b, v2.16b
-; CHECK-NEXT: and v22.16b, v22.16b, v2.16b
-; CHECK-NEXT: and v25.16b, v25.16b, v2.16b
-; CHECK-NEXT: add v0.4s, v0.4s, v21.4s
-; CHECK-NEXT: add v3.4s, v3.4s, v21.4s
-; CHECK-NEXT: add v26.4s, v1.4s, v21.4s
-; CHECK-NEXT: add v20.4s, v20.4s, v21.4s
-; CHECK-NEXT: addhn v1.4h, v5.4s, v0.4s
-; CHECK-NEXT: addhn v0.4h, v6.4s, v3.4s
-; CHECK-NEXT: addhn v3.4h, v7.4s, v26.4s
-; CHECK-NEXT: addhn v2.4h, v16.4s, v20.4s
-; CHECK-NEXT: add v5.4s, v22.4s, v21.4s
-; CHECK-NEXT: add v6.4s, v23.4s, v21.4s
-; CHECK-NEXT: add v7.4s, v24.4s, v21.4s
-; CHECK-NEXT: add v16.4s, v25.4s, v21.4s
-; CHECK-NEXT: addhn2 v0.8h, v18.4s, v5.4s
-; CHECK-NEXT: addhn2 v1.8h, v17.4s, v6.4s
-; CHECK-NEXT: addhn2 v3.8h, v4.4s, v7.4s
-; CHECK-NEXT: addhn2 v2.8h, v19.4s, v16.4s
-; CHECK-NEXT: ret
-entry:
- %c = uitofp <32 x i8> %a to <32 x bfloat>
- ret <32 x bfloat> %c
-}
>From 54de090340acff98bd1c1163f5eaf0bcbfe247e1 Mon Sep 17 00:00:00 2001
From: Tianyi Guan <tguan at nvidia.com>
Date: Tue, 2 Jul 2024 12:09:47 +0100
Subject: [PATCH 221/246] [AArch64] Add i128 and fp128 tests to `itofp.ll`. NFC
---
llvm/test/CodeGen/AArch64/itofp.ll | 1791 ++++++++++++++++++++++++++++
1 file changed, 1791 insertions(+)
diff --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index cdea8f8c5ed21..ac26ccc44128f 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -4,6 +4,227 @@
; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; CHECK-GI: warning: Instruction selection used fallback path for stofp_i128_f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_i128_f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_i64_f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_i64_f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_i32_f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_i32_f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_i16_f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_i16_f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_i8_f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_i8_f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_i128_f64
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_i128_f64
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_i128_f32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_i128_f32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_i128_f16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_i128_f16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v2i128_v2f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v2i128_v2f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v3i128_v3f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i128_v3f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v2i64_v2f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v2i64_v2f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v3i64_v3f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i64_v3f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v2i128_v2f64
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v2i128_v2f64
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v3i128_v3f64
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i128_v3f64
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v2i32_v2f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v2i32_v2f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v3i32_v3f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i32_v3f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v2i16_v2f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v2i16_v2f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v3i16_v3f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i16_v3f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v2i8_v2f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v2i8_v2f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v3i8_v3f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i8_v3f128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v2i128_v2f32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v2i128_v2f32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v3i128_v3f32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i128_v3f32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v2i128_v2f16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v2i128_v2f16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v3i128_v3f16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i128_v3f16
+
+define fp128 @stofp_i128_f128(i128 %a) {
+; CHECK-LABEL: stofp_i128_f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __floattitf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp i128 %a to fp128
+ ret fp128 %c
+}
+
+define fp128 @utofp_i128_f128(i128 %a) {
+; CHECK-LABEL: utofp_i128_f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __floatuntitf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp i128 %a to fp128
+ ret fp128 %c
+}
+
+define fp128 @stofp_i64_f128(i64 %a) {
+; CHECK-LABEL: stofp_i64_f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __floatditf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp i64 %a to fp128
+ ret fp128 %c
+}
+
+define fp128 @utofp_i64_f128(i64 %a) {
+; CHECK-LABEL: utofp_i64_f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __floatunditf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp i64 %a to fp128
+ ret fp128 %c
+}
+
+define fp128 @stofp_i32_f128(i32 %a) {
+; CHECK-LABEL: stofp_i32_f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp i32 %a to fp128
+ ret fp128 %c
+}
+
+define fp128 @utofp_i32_f128(i32 %a) {
+; CHECK-LABEL: utofp_i32_f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp i32 %a to fp128
+ ret fp128 %c
+}
+
+define fp128 @stofp_i16_f128(i16 %a) {
+; CHECK-LABEL: stofp_i16_f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: sxth w0, w0
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp i16 %a to fp128
+ ret fp128 %c
+}
+
+define fp128 @utofp_i16_f128(i16 %a) {
+; CHECK-LABEL: utofp_i16_f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: and w0, w0, #0xffff
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp i16 %a to fp128
+ ret fp128 %c
+}
+
+define fp128 @stofp_i8_f128(i8 %a) {
+; CHECK-LABEL: stofp_i8_f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: sxtb w0, w0
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp i8 %a to fp128
+ ret fp128 %c
+}
+
+define fp128 @utofp_i8_f128(i8 %a) {
+; CHECK-LABEL: utofp_i8_f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: and w0, w0, #0xff
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp i8 %a to fp128
+ ret fp128 %c
+}
+
+define double @stofp_i128_f64(i128 %a) {
+; CHECK-LABEL: stofp_i128_f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __floattidf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp i128 %a to double
+ ret double %c
+}
+
+define double @utofp_i128_f64(i128 %a) {
+; CHECK-LABEL: utofp_i128_f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __floatuntidf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp i128 %a to double
+ ret double %c
+}
+
define double @stofp_i64_f64(i64 %a) {
; CHECK-LABEL: stofp_i64_f64:
; CHECK: // %bb.0: // %entry
@@ -88,6 +309,34 @@ entry:
ret double %c
}
+define float @stofp_i128_f32(i128 %a) {
+; CHECK-LABEL: stofp_i128_f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __floattisf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp i128 %a to float
+ ret float %c
+}
+
+define float @utofp_i128_f32(i128 %a) {
+; CHECK-LABEL: utofp_i128_f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __floatuntisf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp i128 %a to float
+ ret float %c
+}
+
define float @stofp_i64_f32(i64 %a) {
; CHECK-LABEL: stofp_i64_f32:
; CHECK: // %bb.0: // %entry
@@ -172,6 +421,92 @@ entry:
ret float %c
}
+define half @stofp_i128_f16(i128 %a) {
+; CHECK-SD-NOFP16-LABEL: stofp_i128_f16:
+; CHECK-SD-NOFP16: // %bb.0: // %entry
+; CHECK-SD-NOFP16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NOFP16-NEXT: bl __floattisf
+; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ret
+;
+; CHECK-SD-FP16-LABEL: stofp_i128_f16:
+; CHECK-SD-FP16: // %bb.0: // %entry
+; CHECK-SD-FP16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SD-FP16-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-FP16-NEXT: .cfi_offset w30, -16
+; CHECK-SD-FP16-NEXT: bl __floattihf
+; CHECK-SD-FP16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-SD-FP16-NEXT: ret
+;
+; CHECK-GI-NOFP16-LABEL: stofp_i128_f16:
+; CHECK-GI-NOFP16: // %bb.0: // %entry
+; CHECK-GI-NOFP16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w30, -16
+; CHECK-GI-NOFP16-NEXT: bl __floattisf
+; CHECK-GI-NOFP16-NEXT: fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ret
+;
+; CHECK-GI-FP16-LABEL: stofp_i128_f16:
+; CHECK-GI-FP16: // %bb.0: // %entry
+; CHECK-GI-FP16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-FP16-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-FP16-NEXT: .cfi_offset w30, -16
+; CHECK-GI-FP16-NEXT: bl __floattihf
+; CHECK-GI-FP16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-FP16-NEXT: ret
+entry:
+ %c = sitofp i128 %a to half
+ ret half %c
+}
+
+define half @utofp_i128_f16(i128 %a) {
+; CHECK-SD-NOFP16-LABEL: utofp_i128_f16:
+; CHECK-SD-NOFP16: // %bb.0: // %entry
+; CHECK-SD-NOFP16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NOFP16-NEXT: bl __floatuntisf
+; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ret
+;
+; CHECK-SD-FP16-LABEL: utofp_i128_f16:
+; CHECK-SD-FP16: // %bb.0: // %entry
+; CHECK-SD-FP16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SD-FP16-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-FP16-NEXT: .cfi_offset w30, -16
+; CHECK-SD-FP16-NEXT: bl __floatuntihf
+; CHECK-SD-FP16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-SD-FP16-NEXT: ret
+;
+; CHECK-GI-NOFP16-LABEL: utofp_i128_f16:
+; CHECK-GI-NOFP16: // %bb.0: // %entry
+; CHECK-GI-NOFP16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w30, -16
+; CHECK-GI-NOFP16-NEXT: bl __floatuntisf
+; CHECK-GI-NOFP16-NEXT: fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ret
+;
+; CHECK-GI-FP16-LABEL: utofp_i128_f16:
+; CHECK-GI-FP16: // %bb.0: // %entry
+; CHECK-GI-FP16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-FP16-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-FP16-NEXT: .cfi_offset w30, -16
+; CHECK-GI-FP16-NEXT: bl __floatuntihf
+; CHECK-GI-FP16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-FP16-NEXT: ret
+entry:
+ %c = uitofp i128 %a to half
+ ret half %c
+}
+
define half @stofp_i64_f16(i64 %a) {
; CHECK-SD-NOFP16-LABEL: stofp_i64_f16:
; CHECK-SD-NOFP16: // %bb.0: // %entry
@@ -404,6 +739,396 @@ entry:
ret half %c
}
+define <2 x fp128> @stofp_v2i128_v2f128(<2 x i128> %a) {
+; CHECK-LABEL: stofp_v2i128_v2f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov x19, x3
+; CHECK-NEXT: mov x20, x2
+; CHECK-NEXT: bl __floattitf
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floattitf
+; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <2 x i128> %a to <2 x fp128>
+ ret <2 x fp128> %c
+}
+
+define <2 x fp128> @utofp_v2i128_v2f128(<2 x i128> %a) {
+; CHECK-LABEL: utofp_v2i128_v2f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov x19, x3
+; CHECK-NEXT: mov x20, x2
+; CHECK-NEXT: bl __floatuntitf
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatuntitf
+; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <2 x i128> %a to <2 x fp128>
+ ret <2 x fp128> %c
+}
+
+define <3 x fp128> @stofp_v3i128_v3f128(<3 x i128> %a) {
+; CHECK-LABEL: stofp_v3i128_v3f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #80
+; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 80
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w21, -24
+; CHECK-NEXT: .cfi_offset w22, -32
+; CHECK-NEXT: .cfi_offset w30, -48
+; CHECK-NEXT: mov x19, x5
+; CHECK-NEXT: mov x20, x4
+; CHECK-NEXT: mov x21, x3
+; CHECK-NEXT: mov x22, x2
+; CHECK-NEXT: bl __floattitf
+; CHECK-NEXT: mov x0, x22
+; CHECK-NEXT: mov x1, x21
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floattitf
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floattitf
+; CHECK-NEXT: mov v2.16b, v0.16b
+; CHECK-NEXT: ldp q1, q0, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #80
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <3 x i128> %a to <3 x fp128>
+ ret <3 x fp128> %c
+}
+
+define <3 x fp128> @utofp_v3i128_v3f128(<3 x i128> %a) {
+; CHECK-LABEL: utofp_v3i128_v3f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #80
+; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 80
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w21, -24
+; CHECK-NEXT: .cfi_offset w22, -32
+; CHECK-NEXT: .cfi_offset w30, -48
+; CHECK-NEXT: mov x19, x5
+; CHECK-NEXT: mov x20, x4
+; CHECK-NEXT: mov x21, x3
+; CHECK-NEXT: mov x22, x2
+; CHECK-NEXT: bl __floatuntitf
+; CHECK-NEXT: mov x0, x22
+; CHECK-NEXT: mov x1, x21
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatuntitf
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatuntitf
+; CHECK-NEXT: mov v2.16b, v0.16b
+; CHECK-NEXT: ldp q1, q0, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #80
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <3 x i128> %a to <3 x fp128>
+ ret <3 x fp128> %c
+}
+
+define <2 x fp128> @stofp_v2i64_v2f128(<2 x i64> %a) {
+; CHECK-LABEL: stofp_v2i64_v2f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatditf
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: mov x0, v0.d[1]
+; CHECK-NEXT: bl __floatditf
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <2 x i64> %a to <2 x fp128>
+ ret <2 x fp128> %c
+}
+
+define <2 x fp128> @utofp_v2i64_v2f128(<2 x i64> %a) {
+; CHECK-LABEL: utofp_v2i64_v2f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatunditf
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: mov x0, v0.d[1]
+; CHECK-NEXT: bl __floatunditf
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <2 x i64> %a to <2 x fp128>
+ ret <2 x fp128> %c
+}
+
+define <3 x fp128> @stofp_v3i64_v3f128(<3 x i64> %a) {
+; CHECK-LABEL: stofp_v3i64_v3f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #64
+; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: str q2, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatditf
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: bl __floatditf
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: bl __floatditf
+; CHECK-NEXT: mov v2.16b, v0.16b
+; CHECK-NEXT: ldp q1, q0, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <3 x i64> %a to <3 x fp128>
+ ret <3 x fp128> %c
+}
+
+define <3 x fp128> @utofp_v3i64_v3f128(<3 x i64> %a) {
+; CHECK-LABEL: utofp_v3i64_v3f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #64
+; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: str q2, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatunditf
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: bl __floatunditf
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: bl __floatunditf
+; CHECK-NEXT: mov v2.16b, v0.16b
+; CHECK-NEXT: ldp q1, q0, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <3 x i64> %a to <3 x fp128>
+ ret <3 x fp128> %c
+}
+
+define <2 x double> @stofp_v2i128_v2f64(<2 x i128> %a) {
+; CHECK-LABEL: stofp_v2i128_v2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov x19, x1
+; CHECK-NEXT: mov x20, x0
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: mov x1, x3
+; CHECK-NEXT: bl __floattidf
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floattidf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <2 x i128> %a to <2 x double>
+ ret <2 x double> %c
+}
+
+define <2 x double> @utofp_v2i128_v2f64(<2 x i128> %a) {
+; CHECK-LABEL: utofp_v2i128_v2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov x19, x1
+; CHECK-NEXT: mov x20, x0
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: mov x1, x3
+; CHECK-NEXT: bl __floatuntidf
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatuntidf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <2 x i128> %a to <2 x double>
+ ret <2 x double> %c
+}
+
+define <3 x double> @stofp_v3i128_v3f64(<3 x i128> %a) {
+; CHECK-LABEL: stofp_v3i128_v3f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: stp d9, d8, [sp, #-64]! // 16-byte Folded Spill
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w21, -24
+; CHECK-NEXT: .cfi_offset w22, -32
+; CHECK-NEXT: .cfi_offset w30, -48
+; CHECK-NEXT: .cfi_offset b8, -56
+; CHECK-NEXT: .cfi_offset b9, -64
+; CHECK-NEXT: mov x19, x5
+; CHECK-NEXT: mov x20, x4
+; CHECK-NEXT: mov x21, x3
+; CHECK-NEXT: mov x22, x2
+; CHECK-NEXT: bl __floattidf
+; CHECK-NEXT: mov x0, x22
+; CHECK-NEXT: mov x1, x21
+; CHECK-NEXT: fmov d8, d0
+; CHECK-NEXT: bl __floattidf
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: fmov d9, d0
+; CHECK-NEXT: bl __floattidf
+; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: fmov d2, d0
+; CHECK-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: fmov d0, d8
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: fmov d1, d9
+; CHECK-NEXT: ldp d9, d8, [sp], #64 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <3 x i128> %a to <3 x double>
+ ret <3 x double> %c
+}
+
+define <3 x double> @utofp_v3i128_v3f64(<3 x i128> %a) {
+; CHECK-LABEL: utofp_v3i128_v3f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: stp d9, d8, [sp, #-64]! // 16-byte Folded Spill
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w21, -24
+; CHECK-NEXT: .cfi_offset w22, -32
+; CHECK-NEXT: .cfi_offset w30, -48
+; CHECK-NEXT: .cfi_offset b8, -56
+; CHECK-NEXT: .cfi_offset b9, -64
+; CHECK-NEXT: mov x19, x5
+; CHECK-NEXT: mov x20, x4
+; CHECK-NEXT: mov x21, x3
+; CHECK-NEXT: mov x22, x2
+; CHECK-NEXT: bl __floatuntidf
+; CHECK-NEXT: mov x0, x22
+; CHECK-NEXT: mov x1, x21
+; CHECK-NEXT: fmov d8, d0
+; CHECK-NEXT: bl __floatuntidf
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: fmov d9, d0
+; CHECK-NEXT: bl __floatuntidf
+; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: fmov d2, d0
+; CHECK-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: fmov d0, d8
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: fmov d1, d9
+; CHECK-NEXT: ldp d9, d8, [sp], #64 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <3 x i128> %a to <3 x double>
+ ret <3 x double> %c
+}
+
define <2 x double> @stofp_v2i64_v2f64(<2 x i64> %a) {
; CHECK-LABEL: stofp_v2i64_v2f64:
; CHECK: // %bb.0: // %entry
@@ -712,6 +1437,114 @@ entry:
ret <32 x double> %c
}
+define <2 x fp128> @stofp_v2i32_v2f128(<2 x i32> %a) {
+; CHECK-LABEL: stofp_v2i32_v2f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: mov w0, v1.s[1]
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <2 x i32> %a to <2 x fp128>
+ ret <2 x fp128> %c
+}
+
+define <2 x fp128> @utofp_v2i32_v2f128(<2 x i32> %a) {
+; CHECK-LABEL: utofp_v2i32_v2f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: mov w0, v1.s[1]
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <2 x i32> %a to <2 x fp128>
+ ret <2 x fp128> %c
+}
+
+define <3 x fp128> @stofp_v3i32_v3f128(<3 x i32> %a) {
+; CHECK-LABEL: stofp_v3i32_v3f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #64
+; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: str q0, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: fmov w0, s1
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: mov w0, v0.s[1]
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldp q0, q2, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <3 x i32> %a to <3 x fp128>
+ ret <3 x fp128> %c
+}
+
+define <3 x fp128> @utofp_v3i32_v3f128(<3 x i32> %a) {
+; CHECK-LABEL: utofp_v3i32_v3f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #64
+; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: str q0, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: fmov w0, s1
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: mov w0, v0.s[1]
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldp q0, q2, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <3 x i32> %a to <3 x fp128>
+ ret <3 x fp128> %c
+}
+
define <2 x double> @stofp_v2i32_v2f64(<2 x i32> %a) {
; CHECK-LABEL: stofp_v2i32_v2f64:
; CHECK: // %bb.0: // %entry
@@ -1166,6 +1999,118 @@ entry:
ret <32 x double> %c
}
+define <2 x fp128> @stofp_v2i16_v2f128(<2 x i16> %a) {
+; CHECK-LABEL: stofp_v2i16_v2f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov w8, s0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: sxth w0, w8
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: mov w8, v1.s[1]
+; CHECK-NEXT: sxth w0, w8
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <2 x i16> %a to <2 x fp128>
+ ret <2 x fp128> %c
+}
+
+define <2 x fp128> @utofp_v2i16_v2f128(<2 x i16> %a) {
+; CHECK-LABEL: utofp_v2i16_v2f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov w8, s0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: and w0, w8, #0xffff
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: mov w8, v1.s[1]
+; CHECK-NEXT: and w0, w8, #0xffff
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <2 x i16> %a to <2 x fp128>
+ ret <2 x fp128> %c
+}
+
+define <3 x fp128> @stofp_v3i16_v3f128(<3 x i16> %a) {
+; CHECK-LABEL: stofp_v3i16_v3f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: smov w0, v0.h[0]
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: smov w0, v1.h[1]
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: smov w0, v1.h[2]
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: mov v2.16b, v0.16b
+; CHECK-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <3 x i16> %a to <3 x fp128>
+ ret <3 x fp128> %c
+}
+
+define <3 x fp128> @utofp_v3i16_v3f128(<3 x i16> %a) {
+; CHECK-LABEL: utofp_v3i16_v3f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: umov w0, v0.h[0]
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: umov w0, v1.h[1]
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: umov w0, v1.h[2]
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: mov v2.16b, v0.16b
+; CHECK-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <3 x i16> %a to <3 x fp128>
+ ret <3 x fp128> %c
+}
+
define <2 x double> @stofp_v2i16_v2f64(<2 x i16> %a) {
; CHECK-SD-LABEL: stofp_v2i16_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -1704,6 +2649,122 @@ entry:
ret <32 x double> %c
}
+define <2 x fp128> @stofp_v2i8_v2f128(<2 x i8> %a) {
+; CHECK-LABEL: stofp_v2i8_v2f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov w8, s0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: sxtb w0, w8
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: mov w8, v1.s[1]
+; CHECK-NEXT: sxtb w0, w8
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <2 x i8> %a to <2 x fp128>
+ ret <2 x fp128> %c
+}
+
+define <2 x fp128> @utofp_v2i8_v2f128(<2 x i8> %a) {
+; CHECK-LABEL: utofp_v2i8_v2f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov w8, s0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: and w0, w8, #0xff
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: mov w8, v1.s[1]
+; CHECK-NEXT: and w0, w8, #0xff
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <2 x i8> %a to <2 x fp128>
+ ret <2 x fp128> %c
+}
+
+define <3 x fp128> @stofp_v3i8_v3f128(<3 x i8> %a) {
+; CHECK-LABEL: stofp_v3i8_v3f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #64
+; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: sxtb w0, w0
+; CHECK-NEXT: mov w19, w2
+; CHECK-NEXT: mov w20, w1
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: sxtb w0, w20
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: sxtb w0, w19
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatsitf
+; CHECK-NEXT: mov v2.16b, v0.16b
+; CHECK-NEXT: ldp q1, q0, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <3 x i8> %a to <3 x fp128>
+ ret <3 x fp128> %c
+}
+
+define <3 x fp128> @utofp_v3i8_v3f128(<3 x i8> %a) {
+; CHECK-LABEL: utofp_v3i8_v3f128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #64
+; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: and w0, w0, #0xff
+; CHECK-NEXT: mov w19, w2
+; CHECK-NEXT: mov w20, w1
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: and w0, w20, #0xff
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: and w0, w19, #0xff
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatunsitf
+; CHECK-NEXT: mov v2.16b, v0.16b
+; CHECK-NEXT: ldp q1, q0, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <3 x i8> %a to <3 x fp128>
+ ret <3 x fp128> %c
+}
+
define <2 x double> @stofp_v2i8_v2f64(<2 x i8> %a) {
; CHECK-SD-LABEL: stofp_v2i8_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -2612,6 +3673,164 @@ entry:
ret <32 x double> %c
}
+define <2 x float> @stofp_v2i128_v2f32(<2 x i128> %a) {
+; CHECK-LABEL: stofp_v2i128_v2f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov x19, x1
+; CHECK-NEXT: mov x20, x0
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: mov x1, x3
+; CHECK-NEXT: bl __floattisf
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floattisf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <2 x i128> %a to <2 x float>
+ ret <2 x float> %c
+}
+
+define <2 x float> @utofp_v2i128_v2f32(<2 x i128> %a) {
+; CHECK-LABEL: utofp_v2i128_v2f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov x19, x1
+; CHECK-NEXT: mov x20, x0
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: mov x1, x3
+; CHECK-NEXT: bl __floatuntisf
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatuntisf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <2 x i128> %a to <2 x float>
+ ret <2 x float> %c
+}
+
+define <3 x float> @stofp_v3i128_v3f32(<3 x i128> %a) {
+; CHECK-LABEL: stofp_v3i128_v3f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #64
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w21, -24
+; CHECK-NEXT: .cfi_offset w22, -32
+; CHECK-NEXT: .cfi_offset w30, -48
+; CHECK-NEXT: mov x21, x1
+; CHECK-NEXT: mov x22, x0
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: mov x1, x3
+; CHECK-NEXT: mov x19, x5
+; CHECK-NEXT: mov x20, x4
+; CHECK-NEXT: bl __floattisf
+; CHECK-NEXT: mov x0, x22
+; CHECK-NEXT: mov x1, x21
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floattisf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floattisf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: mov v1.s[2], v0.s[0]
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: ret
+entry:
+ %c = sitofp <3 x i128> %a to <3 x float>
+ ret <3 x float> %c
+}
+
+define <3 x float> @utofp_v3i128_v3f32(<3 x i128> %a) {
+; CHECK-LABEL: utofp_v3i128_v3f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #64
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w21, -24
+; CHECK-NEXT: .cfi_offset w22, -32
+; CHECK-NEXT: .cfi_offset w30, -48
+; CHECK-NEXT: mov x21, x1
+; CHECK-NEXT: mov x22, x0
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: mov x1, x3
+; CHECK-NEXT: mov x19, x5
+; CHECK-NEXT: mov x20, x4
+; CHECK-NEXT: bl __floatuntisf
+; CHECK-NEXT: mov x0, x22
+; CHECK-NEXT: mov x1, x21
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatuntisf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: bl __floatuntisf
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: mov v1.s[2], v0.s[0]
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: ret
+entry:
+ %c = uitofp <3 x i128> %a to <3 x float>
+ ret <3 x float> %c
+}
+
define <2 x float> @stofp_v2i64_v2f32(<2 x i64> %a) {
; CHECK-LABEL: stofp_v2i64_v2f32:
; CHECK: // %bb.0: // %entry
@@ -3818,6 +5037,578 @@ entry:
ret <32 x float> %c
}
+define <2 x half> @stofp_v2i128_v2f16(<2 x i128> %a) {
+; CHECK-SD-NOFP16-LABEL: stofp_v2i128_v2f16:
+; CHECK-SD-NOFP16: // %bb.0: // %entry
+; CHECK-SD-NOFP16-NEXT: sub sp, sp, #48
+; CHECK-SD-NOFP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: .cfi_def_cfa_offset 48
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w20, -16
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w30, -32
+; CHECK-SD-NOFP16-NEXT: mov x19, x1
+; CHECK-SD-NOFP16-NEXT: mov x20, x0
+; CHECK-SD-NOFP16-NEXT: mov x0, x2
+; CHECK-SD-NOFP16-NEXT: mov x1, x3
+; CHECK-SD-NOFP16-NEXT: bl __floattisf
+; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: mov x0, x20
+; CHECK-SD-NOFP16-NEXT: mov x1, x19
+; CHECK-SD-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: bl __floattisf
+; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-SD-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NOFP16-NEXT: add sp, sp, #48
+; CHECK-SD-NOFP16-NEXT: ret
+;
+; CHECK-SD-FP16-LABEL: stofp_v2i128_v2f16:
+; CHECK-SD-FP16: // %bb.0: // %entry
+; CHECK-SD-FP16-NEXT: sub sp, sp, #48
+; CHECK-SD-FP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-FP16-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: .cfi_def_cfa_offset 48
+; CHECK-SD-FP16-NEXT: .cfi_offset w19, -8
+; CHECK-SD-FP16-NEXT: .cfi_offset w20, -16
+; CHECK-SD-FP16-NEXT: .cfi_offset w30, -32
+; CHECK-SD-FP16-NEXT: mov x19, x1
+; CHECK-SD-FP16-NEXT: mov x20, x0
+; CHECK-SD-FP16-NEXT: mov x0, x2
+; CHECK-SD-FP16-NEXT: mov x1, x3
+; CHECK-SD-FP16-NEXT: bl __floattihf
+; CHECK-SD-FP16-NEXT: mov x0, x20
+; CHECK-SD-FP16-NEXT: mov x1, x19
+; CHECK-SD-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-SD-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: bl __floattihf
+; CHECK-SD-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-SD-FP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-SD-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-FP16-NEXT: add sp, sp, #48
+; CHECK-SD-FP16-NEXT: ret
+;
+; CHECK-GI-NOFP16-LABEL: stofp_v2i128_v2f16:
+; CHECK-GI-NOFP16: // %bb.0: // %entry
+; CHECK-GI-NOFP16-NEXT: sub sp, sp, #48
+; CHECK-GI-NOFP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: .cfi_def_cfa_offset 48
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NOFP16-NEXT: mov x19, x1
+; CHECK-GI-NOFP16-NEXT: mov x20, x0
+; CHECK-GI-NOFP16-NEXT: mov x0, x2
+; CHECK-GI-NOFP16-NEXT: mov x1, x3
+; CHECK-GI-NOFP16-NEXT: bl __floattisf
+; CHECK-GI-NOFP16-NEXT: fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT: mov x0, x20
+; CHECK-GI-NOFP16-NEXT: mov x1, x19
+; CHECK-GI-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: bl __floattisf
+; CHECK-GI-NOFP16-NEXT: fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NOFP16-NEXT: add sp, sp, #48
+; CHECK-GI-NOFP16-NEXT: ret
+;
+; CHECK-GI-FP16-LABEL: stofp_v2i128_v2f16:
+; CHECK-GI-FP16: // %bb.0: // %entry
+; CHECK-GI-FP16-NEXT: sub sp, sp, #48
+; CHECK-GI-FP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-FP16-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: .cfi_def_cfa_offset 48
+; CHECK-GI-FP16-NEXT: .cfi_offset w19, -8
+; CHECK-GI-FP16-NEXT: .cfi_offset w20, -16
+; CHECK-GI-FP16-NEXT: .cfi_offset w30, -32
+; CHECK-GI-FP16-NEXT: mov x19, x1
+; CHECK-GI-FP16-NEXT: mov x20, x0
+; CHECK-GI-FP16-NEXT: mov x0, x2
+; CHECK-GI-FP16-NEXT: mov x1, x3
+; CHECK-GI-FP16-NEXT: bl __floattihf
+; CHECK-GI-FP16-NEXT: mov x0, x20
+; CHECK-GI-FP16-NEXT: mov x1, x19
+; CHECK-GI-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-GI-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: bl __floattihf
+; CHECK-GI-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-GI-FP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-FP16-NEXT: add sp, sp, #48
+; CHECK-GI-FP16-NEXT: ret
+entry:
+ %c = sitofp <2 x i128> %a to <2 x half>
+ ret <2 x half> %c
+}
+
+define <2 x half> @utofp_v2i128_v2f16(<2 x i128> %a) {
+; CHECK-SD-NOFP16-LABEL: utofp_v2i128_v2f16:
+; CHECK-SD-NOFP16: // %bb.0: // %entry
+; CHECK-SD-NOFP16-NEXT: sub sp, sp, #48
+; CHECK-SD-NOFP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: .cfi_def_cfa_offset 48
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w20, -16
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w30, -32
+; CHECK-SD-NOFP16-NEXT: mov x19, x1
+; CHECK-SD-NOFP16-NEXT: mov x20, x0
+; CHECK-SD-NOFP16-NEXT: mov x0, x2
+; CHECK-SD-NOFP16-NEXT: mov x1, x3
+; CHECK-SD-NOFP16-NEXT: bl __floatuntisf
+; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: mov x0, x20
+; CHECK-SD-NOFP16-NEXT: mov x1, x19
+; CHECK-SD-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: bl __floatuntisf
+; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-SD-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NOFP16-NEXT: add sp, sp, #48
+; CHECK-SD-NOFP16-NEXT: ret
+;
+; CHECK-SD-FP16-LABEL: utofp_v2i128_v2f16:
+; CHECK-SD-FP16: // %bb.0: // %entry
+; CHECK-SD-FP16-NEXT: sub sp, sp, #48
+; CHECK-SD-FP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-FP16-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: .cfi_def_cfa_offset 48
+; CHECK-SD-FP16-NEXT: .cfi_offset w19, -8
+; CHECK-SD-FP16-NEXT: .cfi_offset w20, -16
+; CHECK-SD-FP16-NEXT: .cfi_offset w30, -32
+; CHECK-SD-FP16-NEXT: mov x19, x1
+; CHECK-SD-FP16-NEXT: mov x20, x0
+; CHECK-SD-FP16-NEXT: mov x0, x2
+; CHECK-SD-FP16-NEXT: mov x1, x3
+; CHECK-SD-FP16-NEXT: bl __floatuntihf
+; CHECK-SD-FP16-NEXT: mov x0, x20
+; CHECK-SD-FP16-NEXT: mov x1, x19
+; CHECK-SD-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-SD-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: bl __floatuntihf
+; CHECK-SD-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-SD-FP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-SD-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-FP16-NEXT: add sp, sp, #48
+; CHECK-SD-FP16-NEXT: ret
+;
+; CHECK-GI-NOFP16-LABEL: utofp_v2i128_v2f16:
+; CHECK-GI-NOFP16: // %bb.0: // %entry
+; CHECK-GI-NOFP16-NEXT: sub sp, sp, #48
+; CHECK-GI-NOFP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: .cfi_def_cfa_offset 48
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NOFP16-NEXT: mov x19, x1
+; CHECK-GI-NOFP16-NEXT: mov x20, x0
+; CHECK-GI-NOFP16-NEXT: mov x0, x2
+; CHECK-GI-NOFP16-NEXT: mov x1, x3
+; CHECK-GI-NOFP16-NEXT: bl __floatuntisf
+; CHECK-GI-NOFP16-NEXT: fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT: mov x0, x20
+; CHECK-GI-NOFP16-NEXT: mov x1, x19
+; CHECK-GI-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: bl __floatuntisf
+; CHECK-GI-NOFP16-NEXT: fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NOFP16-NEXT: add sp, sp, #48
+; CHECK-GI-NOFP16-NEXT: ret
+;
+; CHECK-GI-FP16-LABEL: utofp_v2i128_v2f16:
+; CHECK-GI-FP16: // %bb.0: // %entry
+; CHECK-GI-FP16-NEXT: sub sp, sp, #48
+; CHECK-GI-FP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-FP16-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: .cfi_def_cfa_offset 48
+; CHECK-GI-FP16-NEXT: .cfi_offset w19, -8
+; CHECK-GI-FP16-NEXT: .cfi_offset w20, -16
+; CHECK-GI-FP16-NEXT: .cfi_offset w30, -32
+; CHECK-GI-FP16-NEXT: mov x19, x1
+; CHECK-GI-FP16-NEXT: mov x20, x0
+; CHECK-GI-FP16-NEXT: mov x0, x2
+; CHECK-GI-FP16-NEXT: mov x1, x3
+; CHECK-GI-FP16-NEXT: bl __floatuntihf
+; CHECK-GI-FP16-NEXT: mov x0, x20
+; CHECK-GI-FP16-NEXT: mov x1, x19
+; CHECK-GI-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-GI-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: bl __floatuntihf
+; CHECK-GI-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-GI-FP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-FP16-NEXT: add sp, sp, #48
+; CHECK-GI-FP16-NEXT: ret
+entry:
+ %c = uitofp <2 x i128> %a to <2 x half>
+ ret <2 x half> %c
+}
+
+define <3 x half> @stofp_v3i128_v3f16(<3 x i128> %a) {
+; CHECK-SD-NOFP16-LABEL: stofp_v3i128_v3f16:
+; CHECK-SD-NOFP16: // %bb.0: // %entry
+; CHECK-SD-NOFP16-NEXT: sub sp, sp, #64
+; CHECK-SD-NOFP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: .cfi_def_cfa_offset 64
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w20, -16
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w21, -24
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w22, -32
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w30, -48
+; CHECK-SD-NOFP16-NEXT: mov x21, x1
+; CHECK-SD-NOFP16-NEXT: mov x22, x0
+; CHECK-SD-NOFP16-NEXT: mov x0, x2
+; CHECK-SD-NOFP16-NEXT: mov x1, x3
+; CHECK-SD-NOFP16-NEXT: mov x19, x5
+; CHECK-SD-NOFP16-NEXT: mov x20, x4
+; CHECK-SD-NOFP16-NEXT: bl __floattisf
+; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: mov x0, x22
+; CHECK-SD-NOFP16-NEXT: mov x1, x21
+; CHECK-SD-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: bl __floattisf
+; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: mov x0, x20
+; CHECK-SD-NOFP16-NEXT: mov x1, x19
+; CHECK-SD-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-SD-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: bl __floattisf
+; CHECK-SD-NOFP16-NEXT: fcvt h1, s0
+; CHECK-SD-NOFP16-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-SD-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NOFP16-NEXT: add sp, sp, #64
+; CHECK-SD-NOFP16-NEXT: ret
+;
+; CHECK-SD-FP16-LABEL: stofp_v3i128_v3f16:
+; CHECK-SD-FP16: // %bb.0: // %entry
+; CHECK-SD-FP16-NEXT: sub sp, sp, #64
+; CHECK-SD-FP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-FP16-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: .cfi_def_cfa_offset 64
+; CHECK-SD-FP16-NEXT: .cfi_offset w19, -8
+; CHECK-SD-FP16-NEXT: .cfi_offset w20, -16
+; CHECK-SD-FP16-NEXT: .cfi_offset w21, -24
+; CHECK-SD-FP16-NEXT: .cfi_offset w22, -32
+; CHECK-SD-FP16-NEXT: .cfi_offset w30, -48
+; CHECK-SD-FP16-NEXT: mov x21, x1
+; CHECK-SD-FP16-NEXT: mov x22, x0
+; CHECK-SD-FP16-NEXT: mov x0, x2
+; CHECK-SD-FP16-NEXT: mov x1, x3
+; CHECK-SD-FP16-NEXT: mov x19, x5
+; CHECK-SD-FP16-NEXT: mov x20, x4
+; CHECK-SD-FP16-NEXT: bl __floattihf
+; CHECK-SD-FP16-NEXT: mov x0, x22
+; CHECK-SD-FP16-NEXT: mov x1, x21
+; CHECK-SD-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-SD-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: bl __floattihf
+; CHECK-SD-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-SD-FP16-NEXT: mov x0, x20
+; CHECK-SD-FP16-NEXT: mov x1, x19
+; CHECK-SD-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-SD-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: bl __floattihf
+; CHECK-SD-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-SD-FP16-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-FP16-NEXT: mov v1.h[2], v0.h[0]
+; CHECK-SD-FP16-NEXT: fmov d0, d1
+; CHECK-SD-FP16-NEXT: add sp, sp, #64
+; CHECK-SD-FP16-NEXT: ret
+;
+; CHECK-GI-NOFP16-LABEL: stofp_v3i128_v3f16:
+; CHECK-GI-NOFP16: // %bb.0: // %entry
+; CHECK-GI-NOFP16-NEXT: sub sp, sp, #64
+; CHECK-GI-NOFP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w22, -32
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w30, -48
+; CHECK-GI-NOFP16-NEXT: mov x21, x1
+; CHECK-GI-NOFP16-NEXT: mov x22, x0
+; CHECK-GI-NOFP16-NEXT: mov x0, x2
+; CHECK-GI-NOFP16-NEXT: mov x1, x3
+; CHECK-GI-NOFP16-NEXT: mov x19, x5
+; CHECK-GI-NOFP16-NEXT: mov x20, x4
+; CHECK-GI-NOFP16-NEXT: bl __floattisf
+; CHECK-GI-NOFP16-NEXT: fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT: mov x0, x22
+; CHECK-GI-NOFP16-NEXT: mov x1, x21
+; CHECK-GI-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: bl __floattisf
+; CHECK-GI-NOFP16-NEXT: fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: mov x0, x20
+; CHECK-GI-NOFP16-NEXT: mov x1, x19
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: bl __floattisf
+; CHECK-GI-NOFP16-NEXT: fcvt h1, s0
+; CHECK-GI-NOFP16-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NOFP16-NEXT: add sp, sp, #64
+; CHECK-GI-NOFP16-NEXT: ret
+;
+; CHECK-GI-FP16-LABEL: stofp_v3i128_v3f16:
+; CHECK-GI-FP16: // %bb.0: // %entry
+; CHECK-GI-FP16-NEXT: sub sp, sp, #64
+; CHECK-GI-FP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-FP16-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-FP16-NEXT: .cfi_offset w19, -8
+; CHECK-GI-FP16-NEXT: .cfi_offset w20, -16
+; CHECK-GI-FP16-NEXT: .cfi_offset w21, -24
+; CHECK-GI-FP16-NEXT: .cfi_offset w22, -32
+; CHECK-GI-FP16-NEXT: .cfi_offset w30, -48
+; CHECK-GI-FP16-NEXT: mov x21, x1
+; CHECK-GI-FP16-NEXT: mov x22, x0
+; CHECK-GI-FP16-NEXT: mov x0, x2
+; CHECK-GI-FP16-NEXT: mov x1, x3
+; CHECK-GI-FP16-NEXT: mov x19, x5
+; CHECK-GI-FP16-NEXT: mov x20, x4
+; CHECK-GI-FP16-NEXT: bl __floattihf
+; CHECK-GI-FP16-NEXT: mov x0, x22
+; CHECK-GI-FP16-NEXT: mov x1, x21
+; CHECK-GI-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-GI-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: bl __floattihf
+; CHECK-GI-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-GI-FP16-NEXT: mov x0, x20
+; CHECK-GI-FP16-NEXT: mov x1, x19
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: bl __floattihf
+; CHECK-GI-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-GI-FP16-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-FP16-NEXT: mov v1.h[2], v0.h[0]
+; CHECK-GI-FP16-NEXT: fmov d0, d1
+; CHECK-GI-FP16-NEXT: add sp, sp, #64
+; CHECK-GI-FP16-NEXT: ret
+entry:
+ %c = sitofp <3 x i128> %a to <3 x half>
+ ret <3 x half> %c
+}
+
+define <3 x half> @utofp_v3i128_v3f16(<3 x i128> %a) {
+; CHECK-SD-NOFP16-LABEL: utofp_v3i128_v3f16:
+; CHECK-SD-NOFP16: // %bb.0: // %entry
+; CHECK-SD-NOFP16-NEXT: sub sp, sp, #64
+; CHECK-SD-NOFP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: .cfi_def_cfa_offset 64
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w20, -16
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w21, -24
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w22, -32
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w30, -48
+; CHECK-SD-NOFP16-NEXT: mov x21, x1
+; CHECK-SD-NOFP16-NEXT: mov x22, x0
+; CHECK-SD-NOFP16-NEXT: mov x0, x2
+; CHECK-SD-NOFP16-NEXT: mov x1, x3
+; CHECK-SD-NOFP16-NEXT: mov x19, x5
+; CHECK-SD-NOFP16-NEXT: mov x20, x4
+; CHECK-SD-NOFP16-NEXT: bl __floatuntisf
+; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: mov x0, x22
+; CHECK-SD-NOFP16-NEXT: mov x1, x21
+; CHECK-SD-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: bl __floatuntisf
+; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: mov x0, x20
+; CHECK-SD-NOFP16-NEXT: mov x1, x19
+; CHECK-SD-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-SD-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: bl __floatuntisf
+; CHECK-SD-NOFP16-NEXT: fcvt h1, s0
+; CHECK-SD-NOFP16-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-SD-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NOFP16-NEXT: add sp, sp, #64
+; CHECK-SD-NOFP16-NEXT: ret
+;
+; CHECK-SD-FP16-LABEL: utofp_v3i128_v3f16:
+; CHECK-SD-FP16: // %bb.0: // %entry
+; CHECK-SD-FP16-NEXT: sub sp, sp, #64
+; CHECK-SD-FP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-FP16-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: .cfi_def_cfa_offset 64
+; CHECK-SD-FP16-NEXT: .cfi_offset w19, -8
+; CHECK-SD-FP16-NEXT: .cfi_offset w20, -16
+; CHECK-SD-FP16-NEXT: .cfi_offset w21, -24
+; CHECK-SD-FP16-NEXT: .cfi_offset w22, -32
+; CHECK-SD-FP16-NEXT: .cfi_offset w30, -48
+; CHECK-SD-FP16-NEXT: mov x21, x1
+; CHECK-SD-FP16-NEXT: mov x22, x0
+; CHECK-SD-FP16-NEXT: mov x0, x2
+; CHECK-SD-FP16-NEXT: mov x1, x3
+; CHECK-SD-FP16-NEXT: mov x19, x5
+; CHECK-SD-FP16-NEXT: mov x20, x4
+; CHECK-SD-FP16-NEXT: bl __floatuntihf
+; CHECK-SD-FP16-NEXT: mov x0, x22
+; CHECK-SD-FP16-NEXT: mov x1, x21
+; CHECK-SD-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-SD-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: bl __floatuntihf
+; CHECK-SD-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-SD-FP16-NEXT: mov x0, x20
+; CHECK-SD-FP16-NEXT: mov x1, x19
+; CHECK-SD-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-SD-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: bl __floatuntihf
+; CHECK-SD-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-SD-FP16-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-FP16-NEXT: mov v1.h[2], v0.h[0]
+; CHECK-SD-FP16-NEXT: fmov d0, d1
+; CHECK-SD-FP16-NEXT: add sp, sp, #64
+; CHECK-SD-FP16-NEXT: ret
+;
+; CHECK-GI-NOFP16-LABEL: utofp_v3i128_v3f16:
+; CHECK-GI-NOFP16: // %bb.0: // %entry
+; CHECK-GI-NOFP16-NEXT: sub sp, sp, #64
+; CHECK-GI-NOFP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w22, -32
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w30, -48
+; CHECK-GI-NOFP16-NEXT: mov x21, x1
+; CHECK-GI-NOFP16-NEXT: mov x22, x0
+; CHECK-GI-NOFP16-NEXT: mov x0, x2
+; CHECK-GI-NOFP16-NEXT: mov x1, x3
+; CHECK-GI-NOFP16-NEXT: mov x19, x5
+; CHECK-GI-NOFP16-NEXT: mov x20, x4
+; CHECK-GI-NOFP16-NEXT: bl __floatuntisf
+; CHECK-GI-NOFP16-NEXT: fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT: mov x0, x22
+; CHECK-GI-NOFP16-NEXT: mov x1, x21
+; CHECK-GI-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: bl __floatuntisf
+; CHECK-GI-NOFP16-NEXT: fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: mov x0, x20
+; CHECK-GI-NOFP16-NEXT: mov x1, x19
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: bl __floatuntisf
+; CHECK-GI-NOFP16-NEXT: fcvt h1, s0
+; CHECK-GI-NOFP16-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NOFP16-NEXT: add sp, sp, #64
+; CHECK-GI-NOFP16-NEXT: ret
+;
+; CHECK-GI-FP16-LABEL: utofp_v3i128_v3f16:
+; CHECK-GI-FP16: // %bb.0: // %entry
+; CHECK-GI-FP16-NEXT: sub sp, sp, #64
+; CHECK-GI-FP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-FP16-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-FP16-NEXT: .cfi_offset w19, -8
+; CHECK-GI-FP16-NEXT: .cfi_offset w20, -16
+; CHECK-GI-FP16-NEXT: .cfi_offset w21, -24
+; CHECK-GI-FP16-NEXT: .cfi_offset w22, -32
+; CHECK-GI-FP16-NEXT: .cfi_offset w30, -48
+; CHECK-GI-FP16-NEXT: mov x21, x1
+; CHECK-GI-FP16-NEXT: mov x22, x0
+; CHECK-GI-FP16-NEXT: mov x0, x2
+; CHECK-GI-FP16-NEXT: mov x1, x3
+; CHECK-GI-FP16-NEXT: mov x19, x5
+; CHECK-GI-FP16-NEXT: mov x20, x4
+; CHECK-GI-FP16-NEXT: bl __floatuntihf
+; CHECK-GI-FP16-NEXT: mov x0, x22
+; CHECK-GI-FP16-NEXT: mov x1, x21
+; CHECK-GI-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-GI-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: bl __floatuntihf
+; CHECK-GI-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-GI-FP16-NEXT: mov x0, x20
+; CHECK-GI-FP16-NEXT: mov x1, x19
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: bl __floatuntihf
+; CHECK-GI-FP16-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: // kill: def $h0 killed $h0 def $q0
+; CHECK-GI-FP16-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-FP16-NEXT: mov v1.h[2], v0.h[0]
+; CHECK-GI-FP16-NEXT: fmov d0, d1
+; CHECK-GI-FP16-NEXT: add sp, sp, #64
+; CHECK-GI-FP16-NEXT: ret
+entry:
+ %c = uitofp <3 x i128> %a to <3 x half>
+ ret <3 x half> %c
+}
+
define <2 x half> @stofp_v2i64_v2f16(<2 x i64> %a) {
; CHECK-SD-NOFP16-LABEL: stofp_v2i64_v2f16:
; CHECK-SD-NOFP16: // %bb.0: // %entry
>From 173514d58ec4e6166670f1e37a038df3865c8b96 Mon Sep 17 00:00:00 2001
From: Tianyi Guan <tguan at nvidia.com>
Date: Tue, 2 Jul 2024 16:42:12 +0100
Subject: [PATCH 222/246] [AArch64] Add fp128 tests in `fcmp.ll`. NFC
---
llvm/test/CodeGen/AArch64/fcmp.ll | 400 ++++++++++++++++++++++++++++++
1 file changed, 400 insertions(+)
diff --git a/llvm/test/CodeGen/AArch64/fcmp.ll b/llvm/test/CodeGen/AArch64/fcmp.ll
index b1ca88975a621..a76d0b36fa1aa 100644
--- a/llvm/test/CodeGen/AArch64/fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/fcmp.ll
@@ -4,6 +4,226 @@
; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+
+; CHECK-GI: warning: Instruction selection used fallback path for f128_fp128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for f128_i128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for f128_double
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for f128_float
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for f128_i32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for f128_half
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for v2f128_fp128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for v3f128_fp128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for v2f128_double
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for v3f128_double
+
+
+define fp128 @f128_fp128(fp128 %a, fp128 %b, fp128 %d, fp128 %e) {
+; CHECK-LABEL: f128_fp128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: stp q2, q3, [sp] // 32-byte Folded Spill
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: b.ge .LBB0_2
+; CHECK-NEXT: // %bb.1: // %entry
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: .LBB0_2: // %entry
+; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %c = fcmp olt fp128 %a, %b
+ %s = select i1 %c, fp128 %d, fp128 %e
+ ret fp128 %s
+}
+
+define i128 @f128_i128(fp128 %a, fp128 %b, i128 %d, i128 %e) {
+; CHECK-LABEL: f128_i128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #80
+; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 80
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w21, -24
+; CHECK-NEXT: .cfi_offset w22, -32
+; CHECK-NEXT: .cfi_offset w30, -48
+; CHECK-NEXT: mov x19, x3
+; CHECK-NEXT: mov x20, x2
+; CHECK-NEXT: mov x21, x1
+; CHECK-NEXT: mov x22, x0
+; CHECK-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: csel x20, x22, x20, lt
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: csel x1, x21, x19, lt
+; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #80
+; CHECK-NEXT: ret
+entry:
+ %c = fcmp olt fp128 %a, %b
+ %s = select i1 %c, i128 %d, i128 %e
+ ret i128 %s
+}
+
+define double @f128_double(fp128 %a, fp128 %b, double %d, double %e) {
+; CHECK-LABEL: f128_double:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: .cfi_offset b8, -24
+; CHECK-NEXT: .cfi_offset b9, -32
+; CHECK-NEXT: fmov d8, d3
+; CHECK-NEXT: fmov d9, d2
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: fcsel d0, d9, d8, lt
+; CHECK-NEXT: ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = fcmp olt fp128 %a, %b
+ %s = select i1 %c, double %d, double %e
+ ret double %s
+}
+
+define float @f128_float(fp128 %a, fp128 %b, float %d, float %e) {
+; CHECK-LABEL: f128_float:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: .cfi_offset b8, -24
+; CHECK-NEXT: .cfi_offset b9, -32
+; CHECK-NEXT: fmov s8, s3
+; CHECK-NEXT: fmov s9, s2
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: fcsel s0, s9, s8, lt
+; CHECK-NEXT: ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = fcmp olt fp128 %a, %b
+ %s = select i1 %c, float %d, float %e
+ ret float %s
+}
+
+define i32 @f128_i32(fp128 %a, fp128 %b, i32 %d, i32 %e) {
+; CHECK-LABEL: f128_i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov w19, w1
+; CHECK-NEXT: mov w20, w0
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: csel w0, w20, w19, lt
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %c = fcmp olt fp128 %a, %b
+ %s = select i1 %c, i32 %d, i32 %e
+ ret i32 %s
+}
+
+define half @f128_half(fp128 %a, fp128 %b, half %d, half %e) {
+; CHECK-SD-NOFP16-LABEL: f128_half:
+; CHECK-SD-NOFP16: // %bb.0: // %entry
+; CHECK-SD-NOFP16-NEXT: stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT: .cfi_def_cfa_offset 32
+; CHECK-SD-NOFP16-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NOFP16-NEXT: .cfi_offset b8, -24
+; CHECK-SD-NOFP16-NEXT: .cfi_offset b9, -32
+; CHECK-SD-NOFP16-NEXT: fmov s8, s3
+; CHECK-SD-NOFP16-NEXT: fmov s9, s2
+; CHECK-SD-NOFP16-NEXT: bl __lttf2
+; CHECK-SD-NOFP16-NEXT: cmp w0, #0
+; CHECK-SD-NOFP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: fcsel s0, s9, s8, lt
+; CHECK-SD-NOFP16-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-SD-NOFP16-NEXT: ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT: ret
+;
+; CHECK-SD-FP16-LABEL: f128_half:
+; CHECK-SD-FP16: // %bb.0: // %entry
+; CHECK-SD-FP16-NEXT: stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-FP16-NEXT: .cfi_def_cfa_offset 32
+; CHECK-SD-FP16-NEXT: .cfi_offset w30, -16
+; CHECK-SD-FP16-NEXT: .cfi_offset b8, -24
+; CHECK-SD-FP16-NEXT: .cfi_offset b9, -32
+; CHECK-SD-FP16-NEXT: fmov s8, s3
+; CHECK-SD-FP16-NEXT: fmov s9, s2
+; CHECK-SD-FP16-NEXT: bl __lttf2
+; CHECK-SD-FP16-NEXT: cmp w0, #0
+; CHECK-SD-FP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-FP16-NEXT: fcsel h0, h9, h8, lt
+; CHECK-SD-FP16-NEXT: ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT: ret
+;
+; CHECK-GI-NOFP16-LABEL: f128_half:
+; CHECK-GI-NOFP16: // %bb.0: // %entry
+; CHECK-GI-NOFP16-NEXT: stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NOFP16-NEXT: .cfi_offset w30, -16
+; CHECK-GI-NOFP16-NEXT: .cfi_offset b8, -24
+; CHECK-GI-NOFP16-NEXT: .cfi_offset b9, -32
+; CHECK-GI-NOFP16-NEXT: fmov s8, s3
+; CHECK-GI-NOFP16-NEXT: fmov s9, s2
+; CHECK-GI-NOFP16-NEXT: bl __lttf2
+; CHECK-GI-NOFP16-NEXT: cmp w0, #0
+; CHECK-GI-NOFP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: fcsel s0, s9, s8, lt
+; CHECK-GI-NOFP16-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-GI-NOFP16-NEXT: ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT: ret
+;
+; CHECK-GI-FP16-LABEL: f128_half:
+; CHECK-GI-FP16: // %bb.0: // %entry
+; CHECK-GI-FP16-NEXT: stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-FP16-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-FP16-NEXT: .cfi_offset w30, -16
+; CHECK-GI-FP16-NEXT: .cfi_offset b8, -24
+; CHECK-GI-FP16-NEXT: .cfi_offset b9, -32
+; CHECK-GI-FP16-NEXT: fmov s8, s3
+; CHECK-GI-FP16-NEXT: fmov s9, s2
+; CHECK-GI-FP16-NEXT: bl __lttf2
+; CHECK-GI-FP16-NEXT: cmp w0, #0
+; CHECK-GI-FP16-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-FP16-NEXT: fcsel h0, h9, h8, lt
+; CHECK-GI-FP16-NEXT: ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT: ret
+entry:
+ %c = fcmp olt fp128 %a, %b
+ %s = select i1 %c, half %d, half %e
+ ret half %s
+}
+
define double @f64_double(double %a, double %b, double %d, double %e) {
; CHECK-LABEL: f64_double:
; CHECK: // %bb.0: // %entry
@@ -135,6 +355,186 @@ entry:
ret i32 %s
}
+define <2 x fp128> @v2f128_fp128(<2 x fp128> %a, <2 x fp128> %b, <2 x fp128> %d, <2 x fp128> %e) {
+; CHECK-LABEL: v2f128_fp128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #112
+; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 112
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: stp q4, q5, [sp] // 32-byte Folded Spill
+; CHECK-NEXT: stp q1, q3, [sp, #32] // 32-byte Folded Spill
+; CHECK-NEXT: mov v1.16b, v2.16b
+; CHECK-NEXT: stp q7, q6, [sp, #64] // 32-byte Folded Spill
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: b.ge .LBB12_2
+; CHECK-NEXT: // %bb.1: // %entry
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT: .LBB12_2: // %entry
+; CHECK-NEXT: ldp q0, q1, [sp, #32] // 32-byte Folded Reload
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: b.ge .LBB12_4
+; CHECK-NEXT: // %bb.3: // %entry
+; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: .LBB12_4: // %entry
+; CHECK-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #112
+; CHECK-NEXT: ret
+entry:
+ %c = fcmp olt <2 x fp128> %a, %b
+ %s = select <2 x i1> %c, <2 x fp128> %d, <2 x fp128> %e
+ ret <2 x fp128> %s
+}
+
+define <3 x fp128> @v3f128_fp128(<3 x fp128> %a, <3 x fp128> %b, <3 x fp128> %d, <3 x fp128> %e) {
+; CHECK-LABEL: v3f128_fp128:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #112
+; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 112
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: stp q1, q4, [sp] // 32-byte Folded Spill
+; CHECK-NEXT: mov v1.16b, v3.16b
+; CHECK-NEXT: stp q2, q5, [sp, #32] // 32-byte Folded Spill
+; CHECK-NEXT: stp q6, q7, [sp, #64] // 32-byte Folded Spill
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: b.lt .LBB13_2
+; CHECK-NEXT: // %bb.1:
+; CHECK-NEXT: ldr q0, [sp, #128]
+; CHECK-NEXT: str q0, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: .LBB13_2: // %entry
+; CHECK-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: b.lt .LBB13_4
+; CHECK-NEXT: // %bb.3:
+; CHECK-NEXT: ldr q0, [sp, #144]
+; CHECK-NEXT: str q0, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT: .LBB13_4: // %entry
+; CHECK-NEXT: ldp q0, q1, [sp, #32] // 32-byte Folded Reload
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: add x8, sp, #160
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: add x9, sp, #112
+; CHECK-NEXT: csel x8, x9, x8, lt
+; CHECK-NEXT: ldp q0, q1, [sp, #64] // 32-byte Folded Reload
+; CHECK-NEXT: ldr q2, [x8]
+; CHECK-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #112
+; CHECK-NEXT: ret
+entry:
+ %c = fcmp olt <3 x fp128> %a, %b
+ %s = select <3 x i1> %c, <3 x fp128> %d, <3 x fp128> %e
+ ret <3 x fp128> %s
+}
+
+
+define <2 x double> @v2f128_double(<2 x fp128> %a, <2 x fp128> %b, <2 x double> %d, <2 x double> %e) {
+; CHECK-LABEL: v2f128_double:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #96
+; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 96
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: mov v1.16b, v3.16b
+; CHECK-NEXT: stp q4, q5, [sp, #48] // 32-byte Folded Spill
+; CHECK-NEXT: str q2, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: sbfx x8, x8, #0, #1
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: sbfx x8, x8, #0, #1
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: ldp q2, q1, [sp, #48] // 32-byte Folded Reload
+; CHECK-NEXT: bsl v0.16b, v2.16b, v1.16b
+; CHECK-NEXT: add sp, sp, #96
+; CHECK-NEXT: ret
+entry:
+ %c = fcmp olt <2 x fp128> %a, %b
+ %s = select <2 x i1> %c, <2 x double> %d, <2 x double> %e
+ ret <2 x double> %s
+}
+
+define <3 x double> @v3f128_double(<3 x fp128> %a, <3 x fp128> %b, <3 x double> %d, <3 x double> %e) {
+; CHECK-LABEL: v3f128_double:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #160
+; CHECK-NEXT: str x30, [sp, #144] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 160
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: stp q2, q5, [sp, #112] // 32-byte Folded Spill
+; CHECK-NEXT: // kill: def $d6 killed $d6 def $q6
+; CHECK-NEXT: // kill: def $d7 killed $d7 def $q7
+; CHECK-NEXT: ldr d5, [sp, #184]
+; CHECK-NEXT: str q3, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: ldp d3, d2, [sp, #168]
+; CHECK-NEXT: mov v6.d[1], v7.d[0]
+; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: mov v1.16b, v4.16b
+; CHECK-NEXT: str q5, [sp, #96] // 16-byte Folded Spill
+; CHECK-NEXT: ldr d5, [sp, #160]
+; CHECK-NEXT: mov v3.d[1], v2.d[0]
+; CHECK-NEXT: str q5, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT: stp q6, q3, [sp, #32] // 32-byte Folded Spill
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: sbfx x8, x8, #0, #1
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: sbfx x8, x8, #0, #1
+; CHECK-NEXT: fmov d1, x8
+; CHECK-NEXT: mov v1.d[1], v0.d[0]
+; CHECK-NEXT: str q1, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: ldp q0, q1, [sp, #112] // 32-byte Folded Reload
+; CHECK-NEXT: bl __lttf2
+; CHECK-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: ldp q2, q4, [sp, #64] // 32-byte Folded Reload
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: sbfx x8, x8, #0, #1
+; CHECK-NEXT: ldr q3, [sp, #96] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #144] // 8-byte Folded Reload
+; CHECK-NEXT: bit v0.16b, v1.16b, v2.16b
+; CHECK-NEXT: fmov d2, x8
+; CHECK-NEXT: bsl v2.16b, v4.16b, v3.16b
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-NEXT: add sp, sp, #160
+; CHECK-NEXT: ret
+entry:
+ %c = fcmp olt <3 x fp128> %a, %b
+ %s = select <3 x i1> %c, <3 x double> %d, <3 x double> %e
+ ret <3 x double> %s
+}
+
define <2 x double> @v2f64_double(<2 x double> %a, <2 x double> %b, <2 x double> %d, <2 x double> %e) {
; CHECK-LABEL: v2f64_double:
; CHECK: // %bb.0: // %entry
>From 328d9f62976defb96cba8102ea54f44cf88c8032 Mon Sep 17 00:00:00 2001
From: David Spickett <david.spickett at linaro.org>
Date: Thu, 4 Jul 2024 07:51:47 +0000
Subject: [PATCH 223/246] Reland "[lldb] Print empty enums as if they were
unrecognised normal enums (#97553)"
This reverts commit 927def49728371d746476e79a6570cd93a4d335c.
I've refactored the tests so that we're explicit about whether the
enum is signed or not. Which means we use the proper types
throughout.
---
.../TypeSystem/Clang/TypeSystemClang.cpp | 33 +++++----
.../x86/debug-types-missing-signature.test | 4 +-
.../DumpValueObjectOptionsTests.cpp | 72 ++++++++++++-------
3 files changed, 67 insertions(+), 42 deletions(-)
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index 9c77a5d6e66ee..f70efe5ed57e4 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -8656,20 +8656,25 @@ static bool DumpEnumValue(const clang::QualType &qual_type, Stream &s,
// every enumerator is either a one bit value or a superset of the previous
// enumerators. Also 0 doesn't make sense when the enumerators are used as
// flags.
- for (auto *enumerator : enum_decl->enumerators()) {
- llvm::APSInt init_val = enumerator->getInitVal();
- uint64_t val =
- qual_type_is_signed ? init_val.getSExtValue() : init_val.getZExtValue();
- if (qual_type_is_signed)
- val = llvm::SignExtend64(val, 8 * byte_size);
- if (llvm::popcount(val) != 1 && (val & ~covered_bits) != 0)
- can_be_bitfield = false;
- covered_bits |= val;
- ++num_enumerators;
- if (val == enum_svalue) {
- // Found an exact match, that's all we need to do.
- s.PutCString(enumerator->getNameAsString());
- return true;
+ clang::EnumDecl::enumerator_range enumerators = enum_decl->enumerators();
+ if (enumerators.empty())
+ can_be_bitfield = false;
+ else {
+ for (auto *enumerator : enumerators) {
+ llvm::APSInt init_val = enumerator->getInitVal();
+ uint64_t val = qual_type_is_signed ? init_val.getSExtValue()
+ : init_val.getZExtValue();
+ if (qual_type_is_signed)
+ val = llvm::SignExtend64(val, 8 * byte_size);
+ if (llvm::popcount(val) != 1 && (val & ~covered_bits) != 0)
+ can_be_bitfield = false;
+ covered_bits |= val;
+ ++num_enumerators;
+ if (val == enum_svalue) {
+ // Found an exact match, that's all we need to do.
+ s.PutCString(enumerator->getNameAsString());
+ return true;
+ }
}
}
diff --git a/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test b/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test
index 548dd6cdbc275..b2c792ed6003e 100644
--- a/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test
+++ b/lldb/test/Shell/SymbolFile/DWARF/x86/debug-types-missing-signature.test
@@ -22,5 +22,5 @@ PRINTEC: use of undeclared identifier 'EC'
RUN: %lldb %t -b -o "target variable a e ec" | FileCheck --check-prefix=VARS %s
VARS: (const (unnamed struct)) a = <incomplete type "const (unnamed struct)">
-VARS: (const (unnamed enum)) e = 0x1
-VARS: (const (unnamed enum)) ec = 0x1
+VARS: (const (unnamed enum)) e = 1
+VARS: (const (unnamed enum)) ec = 1
diff --git a/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp b/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
index a7ccd74721f66..950e981a3f5a4 100644
--- a/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
+++ b/lldb/unittests/ValueObject/DumpValueObjectOptionsTests.cpp
@@ -18,6 +18,8 @@
#include "gtest/gtest.h"
+#include <type_traits>
+
using namespace lldb;
using namespace lldb_private;
@@ -70,28 +72,12 @@ class ValueObjectMockProcessTest : public ::testing::Test {
m_type_system = m_holder->GetAST();
}
- CompilerType
- MakeEnumType(const std::vector<std::pair<const char *, int>> enumerators) {
- CompilerType uint_type = m_type_system->GetBuiltinTypeForEncodingAndBitSize(
- lldb::eEncodingUint, 32);
- CompilerType enum_type = m_type_system->CreateEnumerationType(
- "TestEnum", m_type_system->GetTranslationUnitDecl(),
- OptionalClangModuleID(), Declaration(), uint_type, false);
-
- m_type_system->StartTagDeclarationDefinition(enum_type);
- Declaration decl;
- for (auto [name, value] : enumerators)
- m_type_system->AddEnumerationValueToEnumerationType(enum_type, decl, name,
- value, 32);
- m_type_system->CompleteTagDeclarationDefinition(enum_type);
-
- return enum_type;
- }
-
- void TestDumpValueObject(
- CompilerType enum_type,
- const std::vector<
- std::tuple<uint32_t, DumpValueObjectOptions, const char *>> &tests) {
+ template <typename UnderlyingType>
+ void TestDumpEnum(
+ const std::vector<std::pair<const char *, UnderlyingType>> enumerators,
+ const std::vector<std::tuple<UnderlyingType, DumpValueObjectOptions,
+ const char *>> &tests) {
+ CompilerType enum_type = MakeEnumType(enumerators);
StreamString strm;
ConstString var_name("test_var");
ByteOrder endian = endian::InlHostByteOrder();
@@ -107,6 +93,27 @@ class ValueObjectMockProcessTest : public ::testing::Test {
}
}
+ template <typename UnderlyingType>
+ CompilerType MakeEnumType(
+ const std::vector<std::pair<const char *, UnderlyingType>> enumerators) {
+ CompilerType int_type = m_type_system->GetBuiltinTypeForEncodingAndBitSize(
+ std::is_same<UnderlyingType, int>::value ? lldb::eEncodingSint
+ : lldb::eEncodingUint,
+ 32);
+ CompilerType enum_type = m_type_system->CreateEnumerationType(
+ "TestEnum", m_type_system->GetTranslationUnitDecl(),
+ OptionalClangModuleID(), Declaration(), int_type, false);
+
+ m_type_system->StartTagDeclarationDefinition(enum_type);
+ Declaration decl;
+ for (auto [name, value] : enumerators)
+ m_type_system->AddEnumerationValueToEnumerationType(enum_type, decl, name,
+ value, 32);
+ m_type_system->CompleteTagDeclarationDefinition(enum_type);
+
+ return enum_type;
+ }
+
ExecutionContext m_exe_ctx;
TypeSystemClang *m_type_system;
@@ -123,12 +130,25 @@ class ValueObjectMockProcessTest : public ::testing::Test {
lldb::ProcessSP m_process_sp;
};
+TEST_F(ValueObjectMockProcessTest, EmptyEnum) {
+ // All values of an empty enum should be shown as plain numbers.
+ TestDumpEnum<unsigned>({}, {{0, {}, "(TestEnum) test_var = 0\n"},
+ {1, {}, "(TestEnum) test_var = 1\n"},
+ {2, {}, "(TestEnum) test_var = 2\n"}});
+
+ TestDumpEnum<int>({}, {{-2, {}, "(TestEnum) test_var = -2\n"},
+ {-1, {}, "(TestEnum) test_var = -1\n"},
+ {0, {}, "(TestEnum) test_var = 0\n"},
+ {1, {}, "(TestEnum) test_var = 1\n"},
+ {2, {}, "(TestEnum) test_var = 2\n"}});
+}
+
TEST_F(ValueObjectMockProcessTest, Enum) {
// This is not a bitfield-like enum, so values are printed as decimal by
// default. Also we only show the enumerator name if the value is an
// exact match.
- TestDumpValueObject(
- MakeEnumType({{"test_2", 2}, {"test_3", 3}}),
+ TestDumpEnum<unsigned>(
+ {{"test_2", 2}, {"test_3", 3}},
{{0, {}, "(TestEnum) test_var = 0\n"},
{1, {}, "(TestEnum) test_var = 1\n"},
{2, {}, "(TestEnum) test_var = test_2\n"},
@@ -151,8 +171,8 @@ TEST_F(ValueObjectMockProcessTest, BitFieldLikeEnum) {
// set. lldb treats this as a "bitfield like enum". This means we show values
// as hex, and values without exact matches are shown as a combination of
// enumerators and any remaining value left over.
- TestDumpValueObject(
- MakeEnumType({{"test_2", 2}, {"test_4", 4}}),
+ TestDumpEnum<unsigned>(
+ {{"test_2", 2}, {"test_4", 4}},
{
{0, {}, "(TestEnum) test_var = 0x0\n"},
{1, {}, "(TestEnum) test_var = 0x1\n"},
>From 4002e380d0f7c689df7d26d9637118c2c7c6f985 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Thu, 4 Jul 2024 16:53:48 +0800
Subject: [PATCH 224/246] [RISCV] Use splat shorthand in strided load store
tests. NFC
---
.../fixed-vectors-strided-load-store-asm.ll | 138 ++++++++---------
.../rvv/fixed-vectors-strided-load-store.ll | 140 +++++++++---------
2 files changed, 139 insertions(+), 139 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
index da8db345c8783..e95b0bf3497fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
@@ -34,15 +34,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <32 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16, i64 17, i64 18, i64 19, i64 20, i64 21, i64 22, i64 23, i64 24, i64 25, i64 26, i64 27, i64 28, i64 29, i64 30, i64 31>, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i1 = getelementptr inbounds i8, ptr %B, <32 x i64> %i
- %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i2 = getelementptr inbounds i8, ptr %A, i64 %index
%wide.load = load <32 x i8>, ptr %i2, align 1
%i4 = add <32 x i8> %wide.load, %wide.masked.gather
store <32 x i8> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -79,7 +79,7 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <32 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16, i64 17, i64 18, i64 19, i64 20, i64 21, i64 22, i64 23, i64 24, i64 25, i64 26, i64 27, i64 28, i64 29, i64 30, i64 31>, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i1 = getelementptr inbounds i8, ptr %B, <32 x i64> %i
%wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <32 x i8> %maskedoff)
%i2 = getelementptr inbounds i8, ptr %A, i64 %index
@@ -87,7 +87,7 @@ vector.body: ; preds = %vector.body, %entry
%i4 = add <32 x i8> %wide.load, %wide.masked.gather
store <32 x i8> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -120,15 +120,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <32 x i64> [ <i64 31, i64 30, i64 29, i64 28, i64 27, i64 26, i64 25, i64 24, i64 23, i64 22, i64 21, i64 20, i64 19, i64 18, i64 17, i64 16, i64 15, i64 14, i64 13, i64 12, i64 11, i64 10, i64 9, i64 8, i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i1 = getelementptr inbounds i8, ptr %B, <32 x i64> %i
- %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i2 = getelementptr inbounds i8, ptr %A, i64 %index
%wide.load = load <32 x i8>, ptr %i2, align 1
%i4 = add <32 x i8> %wide.load, %wide.masked.gather
store <32 x i8> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -159,15 +159,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <32 x i64> [ zeroinitializer, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i1 = getelementptr inbounds i8, ptr %B, <32 x i64> %i
- %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i2 = getelementptr inbounds i8, ptr %A, i64 %index
%wide.load = load <32 x i8>, ptr %i2, align 1
%i4 = add <32 x i8> %wide.load, %wide.masked.gather
store <32 x i8> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -197,15 +197,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <8 x i64> [ zeroinitializer, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <8 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i = mul nuw nsw <8 x i64> %vec.ind, splat (i64 5)
%i1 = getelementptr inbounds i8, ptr %B, <8 x i64> %i
- %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i2 = getelementptr inbounds i8, ptr %A, i64 %index
%wide.load = load <8 x i32>, ptr %i2, align 4
%i4 = add <8 x i32> %wide.load, %wide.masked.gather
store <8 x i32> %i4, ptr %i2, align 4
%index.next = add nuw i64 %index, 8
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -271,15 +271,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <32 x i64> [ zeroinitializer, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i1 = getelementptr inbounds i8, ptr %B, <32 x i64> %i
- %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i2 = getelementptr inbounds i8, ptr %A, i64 %index
%wide.load = load <32 x i8>, ptr %i2, align 1
%i4 = udiv <32 x i8> %wide.masked.gather, %wide.load
store <32 x i8> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -317,13 +317,13 @@ vector.body: ; preds = %vector.body, %entry
%vec.ind = phi <32 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16, i64 17, i64 18, i64 19, i64 20, i64 21, i64 22, i64 23, i64 24, i64 25, i64 26, i64 27, i64 28, i64 29, i64 30, i64 31>, %entry ], [ %vec.ind.next, %vector.body ]
%i = getelementptr inbounds i8, ptr %B, i64 %index
%wide.load = load <32 x i8>, ptr %i, align 1
- %i2 = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i2 = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i3 = getelementptr inbounds i8, ptr %A, <32 x i64> %i2
- %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i3, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i3, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i4 = add <32 x i8> %wide.masked.gather, %wide.load
- call void @llvm.masked.scatter.v32i8.v32p0(<32 x i8> %i4, <32 x ptr> %i3, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+ call void @llvm.masked.scatter.v32i8.v32p0(<32 x i8> %i4, <32 x ptr> %i3, i32 1, <32 x i1> splat (i1 true))
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i5 = icmp eq i64 %index.next, 1024
br i1 %i5, label %for.cond.cleanup, label %vector.body
@@ -362,13 +362,13 @@ vector.body: ; preds = %vector.body, %entry
%vec.ind = phi <32 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16, i64 17, i64 18, i64 19, i64 20, i64 21, i64 22, i64 23, i64 24, i64 25, i64 26, i64 27, i64 28, i64 29, i64 30, i64 31>, %entry ], [ %vec.ind.next, %vector.body ]
%i = getelementptr inbounds i8, ptr %B, i64 %index
%wide.load = load <32 x i8>, ptr %i, align 1
- %i2 = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i2 = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i3 = getelementptr inbounds i8, ptr %A, <32 x i64> %i2
%wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i3, i32 1, <32 x i1> <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <32 x i8> %maskedoff)
%i4 = add <32 x i8> %wide.masked.gather, %wide.load
call void @llvm.masked.scatter.v32i8.v32p0(<32 x i8> %i4, <32 x ptr> %i3, i32 1, <32 x i1> <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>)
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i5 = icmp eq i64 %index.next, 1024
br i1 %i5, label %for.cond.cleanup, label %vector.body
@@ -408,15 +408,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, %entry ], [ %vec.ind.next, %vector.body ]
- %i = shl nsw <8 x i64> %vec.ind, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ %i = shl nsw <8 x i64> %vec.ind, splat (i64 2)
%i1 = getelementptr inbounds i32, ptr %B, <8 x i64> %i
- %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i2 = getelementptr inbounds i32, ptr %A, i64 %index
%wide.load = load <8 x i32>, ptr %i2, align 1
%i4 = add <8 x i32> %wide.load, %wide.masked.gather
store <8 x i32> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 8
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 8)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -456,13 +456,13 @@ vector.body: ; preds = %vector.body, %entry
%vec.ind = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, %entry ], [ %vec.ind.next, %vector.body ]
%i = getelementptr inbounds i32, ptr %B, i64 %index
%wide.load = load <8 x i32>, ptr %i, align 1
- %i2 = shl nuw nsw <8 x i64> %vec.ind, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ %i2 = shl nuw nsw <8 x i64> %vec.ind, splat (i64 2)
%i3 = getelementptr inbounds i32, ptr %A, <8 x i64> %i2
- %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i3, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i3, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i4 = add <8 x i32> %wide.masked.gather, %wide.load
- call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i4, <8 x ptr> %i3, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+ call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i4, <8 x ptr> %i3, i32 4, <8 x i1> splat (i1 true))
%index.next = add nuw i64 %index, 8
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 8)
%i5 = icmp eq i64 %index.next, 1024
br i1 %i5, label %for.cond.cleanup, label %vector.body
@@ -509,11 +509,11 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, %entry ], [ %vec.ind.next, %vector.body ]
- %step.add = add <8 x i64> %vec.ind, <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
+ %step.add = add <8 x i64> %vec.ind, splat (i64 8)
%i = getelementptr inbounds %struct.foo, ptr %B, <8 x i64> %vec.ind, i32 1
%i1 = getelementptr inbounds %struct.foo, ptr %B, <8 x i64> %step.add, i32 1
- %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
- %wide.masked.gather9 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
+ %wide.masked.gather9 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i2 = getelementptr inbounds i32, ptr %A, i64 %index
%wide.load = load <8 x i32>, ptr %i2, align 4
%i4 = getelementptr inbounds i32, ptr %i2, i64 8
@@ -523,7 +523,7 @@ vector.body: ; preds = %vector.body, %entry
store <8 x i32> %i6, ptr %i2, align 4
store <8 x i32> %i7, ptr %i4, align 4
%index.next = add nuw i64 %index, 16
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 16)
%i10 = icmp eq i64 %index.next, 1024
br i1 %i10, label %for.cond.cleanup, label %vector.body
@@ -582,39 +582,39 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <8 x i64> [ <i64 0, i64 4, i64 8, i64 12, i64 16, i64 20, i64 24, i64 28>, %entry ], [ %vec.ind.next, %vector.body ]
- %i = shl nuw nsw <8 x i64> %vec.ind, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ %i = shl nuw nsw <8 x i64> %vec.ind, splat (i64 2)
%i1 = getelementptr inbounds i32, ptr %B, <8 x i64> %i
- %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i2 = getelementptr inbounds i32, ptr %A, <8 x i64> %vec.ind
- %wide.masked.gather52 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i2, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather52 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i2, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i3 = add nsw <8 x i32> %wide.masked.gather52, %wide.masked.gather
- call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i3, <8 x ptr> %i2, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
- %i4 = or disjoint <8 x i64> %vec.ind, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
- %i5 = shl nsw <8 x i64> %i4, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i3, <8 x ptr> %i2, i32 4, <8 x i1> splat (i1 true))
+ %i4 = or disjoint <8 x i64> %vec.ind, splat (i64 1)
+ %i5 = shl nsw <8 x i64> %i4, splat (i64 2)
%i6 = getelementptr inbounds i32, ptr %B, <8 x i64> %i5
- %wide.masked.gather53 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i6, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather53 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i6, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i7 = getelementptr inbounds i32, ptr %A, <8 x i64> %i4
- %wide.masked.gather54 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i7, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather54 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i7, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i8 = add nsw <8 x i32> %wide.masked.gather54, %wide.masked.gather53
- call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i8, <8 x ptr> %i7, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
- %i9 = or disjoint <8 x i64> %vec.ind, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
- %i10 = shl nsw <8 x i64> %i9, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i8, <8 x ptr> %i7, i32 4, <8 x i1> splat (i1 true))
+ %i9 = or disjoint <8 x i64> %vec.ind, splat (i64 2)
+ %i10 = shl nsw <8 x i64> %i9, splat (i64 2)
%i11 = getelementptr inbounds i32, ptr %B, <8 x i64> %i10
- %wide.masked.gather55 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i11, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather55 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i11, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i12 = getelementptr inbounds i32, ptr %A, <8 x i64> %i9
- %wide.masked.gather56 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i12, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather56 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i12, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i13 = add nsw <8 x i32> %wide.masked.gather56, %wide.masked.gather55
- call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i13, <8 x ptr> %i12, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+ call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i13, <8 x ptr> %i12, i32 4, <8 x i1> splat (i1 true))
%i14 = or disjoint <8 x i64> %vec.ind, <i64 3, i64 3, i64 3, i64 3, i64 3, i64 3, i64 3, i64 3>
%i15 = shl nsw <8 x i64> %i14, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
%i16 = getelementptr inbounds i32, ptr %B, <8 x i64> %i15
- %wide.masked.gather57 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i16, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather57 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i16, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i17 = getelementptr inbounds i32, ptr %A, <8 x i64> %i14
- %wide.masked.gather58 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i17, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather58 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i17, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i18 = add nsw <8 x i32> %wide.masked.gather58, %wide.masked.gather57
- call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i18, <8 x ptr> %i17, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+ call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i18, <8 x ptr> %i17, i32 4, <8 x i1> splat (i1 true))
%index.next = add nuw i64 %index, 8
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 32)
%i19 = icmp eq i64 %index.next, 256
br i1 %i19, label %for.cond.cleanup, label %vector.body
@@ -682,13 +682,13 @@ bb:
bb2: ; preds = %bb2, %bb
%i = phi i64 [ 0, %bb ], [ %i15, %bb2 ]
%i3 = phi <2 x i64> [ <i64 0, i64 1>, %bb ], [ %i16, %bb2 ]
- %i4 = mul nuw nsw <2 x i64> %i3, <i64 5, i64 5>
- %i5 = mul <2 x i64> %i3, <i64 5, i64 5>
+ %i4 = mul nuw nsw <2 x i64> %i3, splat (i64 5)
+ %i5 = mul <2 x i64> %i3, splat (i64 5)
%i6 = add <2 x i64> %i5, <i64 10, i64 10>
%i7 = getelementptr inbounds ptr, ptr %arg1, <2 x i64> %i4
%i8 = getelementptr inbounds ptr, ptr %arg1, <2 x i64> %i6
- %i9 = call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> %i7, i32 8, <2 x i1> <i1 true, i1 true>, <2 x ptr> undef)
- %i10 = call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> %i8, i32 8, <2 x i1> <i1 true, i1 true>, <2 x ptr> undef)
+ %i9 = call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> %i7, i32 8, <2 x i1> splat (i1 true), <2 x ptr> undef)
+ %i10 = call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> %i8, i32 8, <2 x i1> splat (i1 true), <2 x ptr> undef)
%i11 = getelementptr inbounds ptr, ptr %arg, i64 %i
store <2 x ptr> %i9, ptr %i11, align 8
%i13 = getelementptr inbounds ptr, ptr %i11, i64 2
@@ -763,13 +763,13 @@ bb2: ; preds = %bb2, %bb
%i6 = load <2 x ptr>, ptr %i4, align 8
%i7 = getelementptr inbounds ptr, ptr %i4, i64 2
%i9 = load <2 x ptr>, ptr %i7, align 8
- %i10 = mul nuw nsw <2 x i64> %i3, <i64 5, i64 5>
- %i11 = mul <2 x i64> %i3, <i64 5, i64 5>
+ %i10 = mul nuw nsw <2 x i64> %i3, splat (i64 5)
+ %i11 = mul <2 x i64> %i3, splat (i64 5)
%i12 = add <2 x i64> %i11, <i64 10, i64 10>
%i13 = getelementptr inbounds ptr, ptr %arg, <2 x i64> %i10
%i14 = getelementptr inbounds ptr, ptr %arg, <2 x i64> %i12
- call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> %i6, <2 x ptr> %i13, i32 8, <2 x i1> <i1 true, i1 true>)
- call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> %i9, <2 x ptr> %i14, i32 8, <2 x i1> <i1 true, i1 true>)
+ call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> %i6, <2 x ptr> %i13, i32 8, <2 x i1> splat (i1 true))
+ call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> %i9, <2 x ptr> %i14, i32 8, <2 x i1> splat (i1 true))
%i15 = add nuw i64 %i, 4
%i16 = add <2 x i64> %i3, <i64 4, i64 4>
%i17 = icmp eq i64 %i15, 1024
@@ -863,15 +863,15 @@ bb15: ; preds = %bb15, %bb9
%i16 = phi i64 [ 0, %bb9 ], [ %i27, %bb15 ]
%i17 = phi <32 x i64> [ %i14, %bb9 ], [ %i28, %bb15 ]
%i18 = add i64 %i16, %i4
- %i19 = mul nsw <32 x i64> %i17, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i19 = mul nsw <32 x i64> %i17, splat (i64 5)
%i20 = getelementptr inbounds i8, ptr %arg1, <32 x i64> %i19
- %i21 = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i20, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %i21 = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i20, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i22 = getelementptr inbounds i8, ptr %arg, i64 %i18
%i24 = load <32 x i8>, ptr %i22, align 1
%i25 = add <32 x i8> %i24, %i21
store <32 x i8> %i25, ptr %i22, align 1
%i27 = add nuw i64 %i16, 32
- %i28 = add <32 x i64> %i17, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %i28 = add <32 x i64> %i17, splat (i64 32)
%i29 = icmp eq i64 %i27, %i10
br i1 %i29, label %bb30, label %bb15
@@ -938,15 +938,15 @@ bb2: ; preds = %bb
bb4: ; preds = %bb4, %bb2
%i5 = phi i64 [ %i13, %bb4 ], [ 0, %bb2 ]
%i6 = phi <16 x i64> [ %i14, %bb4 ], [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, %bb2 ]
- %i7 = mul <16 x i64> %i6, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i7 = mul <16 x i64> %i6, splat (i64 5)
%i8 = getelementptr inbounds i8, ptr %arg1, <16 x i64> %i7
- %i9 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %i8, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+ %i9 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %i8, i32 1, <16 x i1> splat (i1 true), <16 x i8> undef)
%i10 = getelementptr inbounds i8, ptr %arg, i64 %i5
%i11 = load <16 x i8>, ptr %i10, align 1
%i12 = add <16 x i8> %i11, %i9
store <16 x i8> %i12, ptr %i10, align 1
%i13 = add nuw i64 %i5, 16
- %i14 = add <16 x i64> %i6, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16>
+ %i14 = add <16 x i64> %i6, splat (i64 16)
%i15 = icmp eq i64 %i13, %i
br i1 %i15, label %bb16, label %bb4
@@ -977,15 +977,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <8 x i64> [ zeroinitializer, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <8 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i = mul nuw nsw <8 x i64> %vec.ind, splat (i64 5)
%i1 = getelementptr inbounds float, ptr %B, <8 x i64> %i
- %wide.masked.gather = call <8 x float> @llvm.masked.gather.v8f32.v32p0(<8 x ptr> %i1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> undef)
+ %wide.masked.gather = call <8 x float> @llvm.masked.gather.v8f32.v32p0(<8 x ptr> %i1, i32 4, <8 x i1> splat (i1 true), <8 x float> undef)
%i2 = getelementptr inbounds float, ptr %A, i64 %index
%wide.load = load <8 x float>, ptr %i2, align 4
%i4 = fadd <8 x float> %wide.load, %wide.masked.gather
store <8 x float> %i4, ptr %i2, align 4
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll
index 77243c0e0354d..ab5885a604443 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll
@@ -34,15 +34,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <32 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16, i64 17, i64 18, i64 19, i64 20, i64 21, i64 22, i64 23, i64 24, i64 25, i64 26, i64 27, i64 28, i64 29, i64 30, i64 31>, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i1 = getelementptr inbounds i8, ptr %B, <32 x i64> %i
- %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i2 = getelementptr inbounds i8, ptr %A, i64 %index
%wide.load = load <32 x i8>, ptr %i2, align 1
%i4 = add <32 x i8> %wide.load, %wide.masked.gather
store <32 x i8> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -76,7 +76,7 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <32 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16, i64 17, i64 18, i64 19, i64 20, i64 21, i64 22, i64 23, i64 24, i64 25, i64 26, i64 27, i64 28, i64 29, i64 30, i64 31>, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i1 = getelementptr inbounds i8, ptr %B, <32 x i64> %i
%wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <32 x i8> %maskedoff)
%i2 = getelementptr inbounds i8, ptr %A, i64 %index
@@ -84,7 +84,7 @@ vector.body: ; preds = %vector.body, %entry
%i4 = add <32 x i8> %wide.load, %wide.masked.gather
store <32 x i8> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -118,15 +118,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <32 x i64> [ <i64 31, i64 30, i64 29, i64 28, i64 27, i64 26, i64 25, i64 24, i64 23, i64 22, i64 21, i64 20, i64 19, i64 18, i64 17, i64 16, i64 15, i64 14, i64 13, i64 12, i64 11, i64 10, i64 9, i64 8, i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i1 = getelementptr inbounds i8, ptr %B, <32 x i64> %i
- %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i2 = getelementptr inbounds i8, ptr %A, i64 %index
%wide.load = load <32 x i8>, ptr %i2, align 1
%i4 = add <32 x i8> %wide.load, %wide.masked.gather
store <32 x i8> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -160,15 +160,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <32 x i64> [ zeroinitializer, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i1 = getelementptr inbounds i8, ptr %B, <32 x i64> %i
- %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i2 = getelementptr inbounds i8, ptr %A, i64 %index
%wide.load = load <32 x i8>, ptr %i2, align 1
%i4 = add <32 x i8> %wide.load, %wide.masked.gather
store <32 x i8> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -208,13 +208,13 @@ vector.body: ; preds = %vector.body, %entry
%vec.ind = phi <32 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16, i64 17, i64 18, i64 19, i64 20, i64 21, i64 22, i64 23, i64 24, i64 25, i64 26, i64 27, i64 28, i64 29, i64 30, i64 31>, %entry ], [ %vec.ind.next, %vector.body ]
%i = getelementptr inbounds i8, ptr %B, i64 %index
%wide.load = load <32 x i8>, ptr %i, align 1
- %i2 = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i2 = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i3 = getelementptr inbounds i8, ptr %A, <32 x i64> %i2
- %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i3, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i3, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i4 = add <32 x i8> %wide.masked.gather, %wide.load
- call void @llvm.masked.scatter.v32i8.v32p0(<32 x i8> %i4, <32 x ptr> %i3, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+ call void @llvm.masked.scatter.v32i8.v32p0(<32 x i8> %i4, <32 x ptr> %i3, i32 1, <32 x i1> splat (i1 true))
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i5 = icmp eq i64 %index.next, 1024
br i1 %i5, label %for.cond.cleanup, label %vector.body
@@ -250,13 +250,13 @@ vector.body: ; preds = %vector.body, %entry
%vec.ind = phi <32 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16, i64 17, i64 18, i64 19, i64 20, i64 21, i64 22, i64 23, i64 24, i64 25, i64 26, i64 27, i64 28, i64 29, i64 30, i64 31>, %entry ], [ %vec.ind.next, %vector.body ]
%i = getelementptr inbounds i8, ptr %B, i64 %index
%wide.load = load <32 x i8>, ptr %i, align 1
- %i2 = mul nuw nsw <32 x i64> %vec.ind, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i2 = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5)
%i3 = getelementptr inbounds i8, ptr %A, <32 x i64> %i2
%wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i3, i32 1, <32 x i1> <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <32 x i8> %maskedoff)
%i4 = add <32 x i8> %wide.masked.gather, %wide.load
call void @llvm.masked.scatter.v32i8.v32p0(<32 x i8> %i4, <32 x ptr> %i3, i32 1, <32 x i1> <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>)
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <32 x i64> %vec.ind, splat (i64 32)
%i5 = icmp eq i64 %index.next, 1024
br i1 %i5, label %for.cond.cleanup, label %vector.body
@@ -294,15 +294,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, %entry ], [ %vec.ind.next, %vector.body ]
- %i = shl nsw <8 x i64> %vec.ind, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ %i = shl nsw <8 x i64> %vec.ind, splat (i64 2)
%i1 = getelementptr inbounds i32, ptr %B, <8 x i64> %i
- %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i2 = getelementptr inbounds i32, ptr %A, i64 %index
%wide.load = load <8 x i32>, ptr %i2, align 1
%i4 = add <8 x i32> %wide.load, %wide.masked.gather
store <8 x i32> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 8
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 8)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -343,13 +343,13 @@ vector.body: ; preds = %vector.body, %entry
%vec.ind = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, %entry ], [ %vec.ind.next, %vector.body ]
%i = shl nsw <8 x i64> %vec.ind, %.splat
%i1 = getelementptr inbounds i32, ptr %B, <8 x i64> %i
- %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i2 = getelementptr inbounds i32, ptr %A, i64 %index
%wide.load = load <8 x i32>, ptr %i2, align 1
%i4 = add <8 x i32> %wide.load, %wide.masked.gather
store <8 x i32> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 8
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 8)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -390,13 +390,13 @@ vector.body: ; preds = %vector.body, %entry
%vec.ind = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, %entry ], [ %vec.ind.next, %vector.body ]
%i = shl nsw <8 x i64> %.splat, %vec.ind
%i1 = getelementptr inbounds i32, ptr %B, <8 x i64> %i
- %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i2 = getelementptr inbounds i32, ptr %A, i64 %index
%wide.load = load <8 x i32>, ptr %i2, align 1
%i4 = add <8 x i32> %wide.load, %wide.masked.gather
store <8 x i32> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 8
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 8)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -436,13 +436,13 @@ vector.body: ; preds = %vector.body, %entry
%vec.ind = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, %entry ], [ %vec.ind.next, %vector.body ]
%i = getelementptr inbounds i32, ptr %B, i64 %index
%wide.load = load <8 x i32>, ptr %i, align 1
- %i2 = shl nuw nsw <8 x i64> %vec.ind, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ %i2 = shl nuw nsw <8 x i64> %vec.ind, splat (i64 2)
%i3 = getelementptr inbounds i32, ptr %A, <8 x i64> %i2
- %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i3, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i3, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i4 = add <8 x i32> %wide.masked.gather, %wide.load
- call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i4, <8 x ptr> %i3, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+ call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i4, <8 x ptr> %i3, i32 4, <8 x i1> splat (i1 true))
%index.next = add nuw i64 %index, 8
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 8)
%i5 = icmp eq i64 %index.next, 1024
br i1 %i5, label %for.cond.cleanup, label %vector.body
@@ -492,11 +492,11 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, %entry ], [ %vec.ind.next, %vector.body ]
- %step.add = add <8 x i64> %vec.ind, <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
+ %step.add = add <8 x i64> %vec.ind, splat (i64 8)
%i = getelementptr inbounds %struct.foo, ptr %B, <8 x i64> %vec.ind, i32 1
%i1 = getelementptr inbounds %struct.foo, ptr %B, <8 x i64> %step.add, i32 1
- %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
- %wide.masked.gather9 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
+ %wide.masked.gather9 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i2 = getelementptr inbounds i32, ptr %A, i64 %index
%wide.load = load <8 x i32>, ptr %i2, align 4
%i4 = getelementptr inbounds i32, ptr %i2, i64 8
@@ -506,7 +506,7 @@ vector.body: ; preds = %vector.body, %entry
store <8 x i32> %i6, ptr %i2, align 4
store <8 x i32> %i7, ptr %i4, align 4
%index.next = add nuw i64 %index, 16
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 16)
%i10 = icmp eq i64 %index.next, 1024
br i1 %i10, label %for.cond.cleanup, label %vector.body
@@ -580,39 +580,39 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <8 x i64> [ <i64 0, i64 4, i64 8, i64 12, i64 16, i64 20, i64 24, i64 28>, %entry ], [ %vec.ind.next, %vector.body ]
- %i = shl nuw nsw <8 x i64> %vec.ind, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ %i = shl nuw nsw <8 x i64> %vec.ind, splat (i64 2)
%i1 = getelementptr inbounds i32, ptr %B, <8 x i64> %i
- %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i1, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i2 = getelementptr inbounds i32, ptr %A, <8 x i64> %vec.ind
- %wide.masked.gather52 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i2, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather52 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i2, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i3 = add nsw <8 x i32> %wide.masked.gather52, %wide.masked.gather
- call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i3, <8 x ptr> %i2, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
- %i4 = or disjoint <8 x i64> %vec.ind, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
- %i5 = shl nsw <8 x i64> %i4, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i3, <8 x ptr> %i2, i32 4, <8 x i1> splat (i1 true))
+ %i4 = or disjoint <8 x i64> %vec.ind, splat (i64 1)
+ %i5 = shl nsw <8 x i64> %i4, splat (i64 2)
%i6 = getelementptr inbounds i32, ptr %B, <8 x i64> %i5
- %wide.masked.gather53 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i6, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather53 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i6, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i7 = getelementptr inbounds i32, ptr %A, <8 x i64> %i4
- %wide.masked.gather54 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i7, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather54 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i7, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i8 = add nsw <8 x i32> %wide.masked.gather54, %wide.masked.gather53
- call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i8, <8 x ptr> %i7, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
- %i9 = or disjoint <8 x i64> %vec.ind, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
- %i10 = shl nsw <8 x i64> %i9, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i8, <8 x ptr> %i7, i32 4, <8 x i1> splat (i1 true))
+ %i9 = or disjoint <8 x i64> %vec.ind, splat (i64 2)
+ %i10 = shl nsw <8 x i64> %i9, splat (i64 2)
%i11 = getelementptr inbounds i32, ptr %B, <8 x i64> %i10
- %wide.masked.gather55 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i11, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather55 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i11, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i12 = getelementptr inbounds i32, ptr %A, <8 x i64> %i9
- %wide.masked.gather56 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i12, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather56 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i12, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i13 = add nsw <8 x i32> %wide.masked.gather56, %wide.masked.gather55
- call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i13, <8 x ptr> %i12, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
- %i14 = or disjoint <8 x i64> %vec.ind, <i64 3, i64 3, i64 3, i64 3, i64 3, i64 3, i64 3, i64 3>
- %i15 = shl nsw <8 x i64> %i14, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i13, <8 x ptr> %i12, i32 4, <8 x i1> splat (i1 true))
+ %i14 = or disjoint <8 x i64> %vec.ind, splat (i64 3)
+ %i15 = shl nsw <8 x i64> %i14, splat (i64 2)
%i16 = getelementptr inbounds i32, ptr %B, <8 x i64> %i15
- %wide.masked.gather57 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i16, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather57 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i16, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i17 = getelementptr inbounds i32, ptr %A, <8 x i64> %i14
- %wide.masked.gather58 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i17, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %wide.masked.gather58 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %i17, i32 4, <8 x i1> splat (i1 true), <8 x i32> undef)
%i18 = add nsw <8 x i32> %wide.masked.gather58, %wide.masked.gather57
- call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i18, <8 x ptr> %i17, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+ call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %i18, <8 x ptr> %i17, i32 4, <8 x i1> splat (i1 true))
%index.next = add nuw i64 %index, 8
- %vec.ind.next = add <8 x i64> %vec.ind, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %vec.ind.next = add <8 x i64> %vec.ind, splat (i64 32)
%i19 = icmp eq i64 %index.next, 256
br i1 %i19, label %for.cond.cleanup, label %vector.body
@@ -680,13 +680,13 @@ bb:
bb2: ; preds = %bb2, %bb
%i = phi i64 [ 0, %bb ], [ %i15, %bb2 ]
%i3 = phi <2 x i64> [ <i64 0, i64 1>, %bb ], [ %i16, %bb2 ]
- %i4 = mul nuw nsw <2 x i64> %i3, <i64 5, i64 5>
- %i5 = mul <2 x i64> %i3, <i64 5, i64 5>
+ %i4 = mul nuw nsw <2 x i64> %i3, splat (i64 5)
+ %i5 = mul <2 x i64> %i3, splat (i64 5)
%i6 = add <2 x i64> %i5, <i64 10, i64 10>
%i7 = getelementptr inbounds ptr, ptr %arg1, <2 x i64> %i4
%i8 = getelementptr inbounds ptr, ptr %arg1, <2 x i64> %i6
- %i9 = call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> %i7, i32 8, <2 x i1> <i1 true, i1 true>, <2 x ptr> undef)
- %i10 = call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> %i8, i32 8, <2 x i1> <i1 true, i1 true>, <2 x ptr> undef)
+ %i9 = call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> %i7, i32 8, <2 x i1> splat (i1 true), <2 x ptr> undef)
+ %i10 = call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> %i8, i32 8, <2 x i1> splat (i1 true), <2 x ptr> undef)
%i11 = getelementptr inbounds ptr, ptr %arg, i64 %i
store <2 x ptr> %i9, ptr %i11, align 8
%i13 = getelementptr inbounds ptr, ptr %i11, i64 2
@@ -761,13 +761,13 @@ bb2: ; preds = %bb2, %bb
%i6 = load <2 x ptr>, ptr %i4, align 8
%i7 = getelementptr inbounds ptr, ptr %i4, i64 2
%i9 = load <2 x ptr>, ptr %i7, align 8
- %i10 = mul nuw nsw <2 x i64> %i3, <i64 5, i64 5>
- %i11 = mul <2 x i64> %i3, <i64 5, i64 5>
+ %i10 = mul nuw nsw <2 x i64> %i3, splat (i64 5)
+ %i11 = mul <2 x i64> %i3, splat (i64 5)
%i12 = add <2 x i64> %i11, <i64 10, i64 10>
%i13 = getelementptr inbounds ptr, ptr %arg, <2 x i64> %i10
%i14 = getelementptr inbounds ptr, ptr %arg, <2 x i64> %i12
- call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> %i6, <2 x ptr> %i13, i32 8, <2 x i1> <i1 true, i1 true>)
- call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> %i9, <2 x ptr> %i14, i32 8, <2 x i1> <i1 true, i1 true>)
+ call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> %i6, <2 x ptr> %i13, i32 8, <2 x i1> splat (i1 true))
+ call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> %i9, <2 x ptr> %i14, i32 8, <2 x i1> splat (i1 true))
%i15 = add nuw i64 %i, 4
%i16 = add <2 x i64> %i3, <i64 4, i64 4>
%i17 = icmp eq i64 %i15, 1024
@@ -856,15 +856,15 @@ bb15: ; preds = %bb15, %bb9
%i16 = phi i64 [ 0, %bb9 ], [ %i27, %bb15 ]
%i17 = phi <32 x i64> [ %i14, %bb9 ], [ %i28, %bb15 ]
%i18 = add i64 %i16, %i4
- %i19 = mul nsw <32 x i64> %i17, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i19 = mul nsw <32 x i64> %i17, splat (i64 5)
%i20 = getelementptr inbounds i8, ptr %arg1, <32 x i64> %i19
- %i21 = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i20, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %i21 = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i20, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i22 = getelementptr inbounds i8, ptr %arg, i64 %i18
%i24 = load <32 x i8>, ptr %i22, align 1
%i25 = add <32 x i8> %i24, %i21
store <32 x i8> %i25, ptr %i22, align 1
%i27 = add nuw i64 %i16, 32
- %i28 = add <32 x i64> %i17, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %i28 = add <32 x i64> %i17, splat (i64 32)
%i29 = icmp eq i64 %i27, %i10
br i1 %i29, label %bb30, label %bb15
@@ -932,15 +932,15 @@ bb2: ; preds = %bb
bb4: ; preds = %bb4, %bb2
%i5 = phi i64 [ %i13, %bb4 ], [ 0, %bb2 ]
%i6 = phi <16 x i64> [ %i14, %bb4 ], [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, %bb2 ]
- %i7 = mul <16 x i64> %i6, <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>
+ %i7 = mul <16 x i64> %i6, splat (i64 5)
%i8 = getelementptr inbounds i8, ptr %arg1, <16 x i64> %i7
- %i9 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %i8, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+ %i9 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %i8, i32 1, <16 x i1> splat (i1 true), <16 x i8> undef)
%i10 = getelementptr inbounds i8, ptr %arg, i64 %i5
%i11 = load <16 x i8>, ptr %i10, align 1
%i12 = add <16 x i8> %i11, %i9
store <16 x i8> %i12, ptr %i10, align 1
%i13 = add nuw i64 %i5, 16
- %i14 = add <16 x i64> %i6, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16>
+ %i14 = add <16 x i64> %i6, splat (i64 16)
%i15 = icmp eq i64 %i13, %i
br i1 %i15, label %bb16, label %bb4
@@ -958,7 +958,7 @@ entry:
%0 = insertelement <8 x ptr> poison, ptr %a, i64 0
%1 = shufflevector <8 x ptr> %0, <8 x ptr> poison, <8 x i32> zeroinitializer
%2 = getelementptr i8, <8 x ptr> %1, <8 x i64> <i64 0, i64 64, i64 128, i64 192, i64 256, i64 320, i64 384, i64 448>
- %3 = tail call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %2, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> poison)
+ %3 = tail call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %2, i32 1, <8 x i1> splat (i1 true), <8 x i8> poison)
ret <8 x i8> %3
}
@@ -991,15 +991,15 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%vec.ind = phi <32 x i16> [ <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23, i16 24, i16 25, i16 26, i16 27, i16 28, i16 29, i16 30, i16 31>, %entry ], [ %vec.ind.next, %vector.body ]
- %i = mul nuw nsw <32 x i16> %vec.ind, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %i = mul nuw nsw <32 x i16> %vec.ind, splat (i16 5)
%i1 = getelementptr inbounds i8, ptr %B, <32 x i16> %i
- %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %i1, i32 1, <32 x i1> splat (i1 true), <32 x i8> undef)
%i2 = getelementptr inbounds i8, ptr %A, i64 %index
%wide.load = load <32 x i8>, ptr %i2, align 1
%i4 = add <32 x i8> %wide.load, %wide.masked.gather
store <32 x i8> %i4, ptr %i2, align 1
%index.next = add nuw i64 %index, 32
- %vec.ind.next = add <32 x i16> %vec.ind, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32>
+ %vec.ind.next = add <32 x i16> %vec.ind, splat (i16 32)
%i6 = icmp eq i64 %index.next, 1024
br i1 %i6, label %for.cond.cleanup, label %vector.body
>From 28be3f8ac51e62610f8e78b411adf05a2365dff7 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Thu, 4 Jul 2024 10:00:04 +0100
Subject: [PATCH 225/246] [LAA] Cache pointer bounds expansions (NFCI).
This avoids expanding the same bounds multiple times, which helps reduce
the compile-time impact of removing the restrictions added in
234cc40adc6, notably -0.06% on stage1-O3 and -0.05% on both
stage1-ReleaseThinLTO and stage1-ReleaseLTO-g.
https://llvm-compile-time-tracker.com/compare.php?from=8b9ebc4bb86cf0979e05908cbb04336f2d01dda5&to=fabd36f96c31e47ea72653f5a404feaadfc7b5b5&stat=instructions:u
---
.../llvm/Analysis/LoopAccessAnalysis.h | 9 +++++++
llvm/lib/Analysis/LoopAccessAnalysis.cpp | 25 ++++++++++++-------
2 files changed, 25 insertions(+), 9 deletions(-)
diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
index 7a54fe55014be..c74e76604e786 100644
--- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -269,6 +269,11 @@ class MemoryDepChecker {
const Loop *getInnermostLoop() const { return InnermostLoop; }
+ DenseMap<const SCEV *, std::pair<const SCEV *, const SCEV *>> &
+ getPointerBounds() {
+ return PointerBounds;
+ }
+
private:
/// A wrapper around ScalarEvolution, used to add runtime SCEV checks, and
/// applies dynamic knowledge to simplify SCEV expressions and convert them
@@ -327,6 +332,10 @@ class MemoryDepChecker {
/// backwards-vectorizable or unknown (triggering a runtime check).
unsigned MaxTargetVectorWidthInBits = 0;
+ /// Mapping of SCEV expressions to their expanded pointer bounds (pair of
+ /// start and end pointer expressions).
+ DenseMap<const SCEV *, std::pair<const SCEV *, const SCEV *>> PointerBounds;
+
/// Check whether there is a plausible dependence between the two
/// accesses.
///
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 38bf6d8160aa9..f132e45540525 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -203,11 +203,18 @@ RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
///
/// There is no conflict when the intervals are disjoint:
/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
-static std::pair<const SCEV *, const SCEV *>
-getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
- PredicatedScalarEvolution &PSE) {
+static std::pair<const SCEV *, const SCEV *> getStartAndEndForAccess(
+ const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
+ PredicatedScalarEvolution &PSE,
+ DenseMap<const SCEV *, std::pair<const SCEV *, const SCEV *>>
+ &PointerBounds) {
ScalarEvolution *SE = PSE.getSE();
+ auto [Iter, Ins] = PointerBounds.insert(
+ {PtrExpr, {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
+ if (!Ins)
+ return Iter->second;
+
const SCEV *ScStart;
const SCEV *ScEnd;
@@ -244,7 +251,8 @@ getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
- return {ScStart, ScEnd};
+ Iter->second = {ScStart, ScEnd};
+ return Iter->second;
}
/// Calculate Start and End points of memory access using
@@ -254,8 +262,8 @@ void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
unsigned DepSetId, unsigned ASId,
PredicatedScalarEvolution &PSE,
bool NeedsFreeze) {
- const auto &[ScStart, ScEnd] =
- getStartAndEndForAccess(Lp, PtrExpr, AccessTy, PSE);
+ const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
+ Lp, PtrExpr, AccessTy, PSE, DC.getPointerBounds());
assert(!isa<SCEVCouldNotCompute>(ScStart) &&
!isa<SCEVCouldNotCompute>(ScEnd) &&
"must be able to compute both start and end expressions");
@@ -1964,10 +1972,9 @@ MemoryDepChecker::getDependenceDistanceStrideAndSize(
if (SE.isLoopInvariant(Src, InnermostLoop) ||
SE.isLoopInvariant(Sink, InnermostLoop)) {
const auto &[SrcStart, SrcEnd] =
- getStartAndEndForAccess(InnermostLoop, Src, ATy, PSE);
+ getStartAndEndForAccess(InnermostLoop, Src, ATy, PSE, PointerBounds);
const auto &[SinkStart, SinkEnd] =
- getStartAndEndForAccess(InnermostLoop, Sink, BTy, PSE);
-
+ getStartAndEndForAccess(InnermostLoop, Sink, BTy, PSE, PointerBounds);
if (!isa<SCEVCouldNotCompute>(SrcStart) &&
!isa<SCEVCouldNotCompute>(SrcEnd) &&
!isa<SCEVCouldNotCompute>(SinkStart) &&
>From f99746f30a125324500cc7a46829fcfd44bed3dc Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Thu, 4 Jul 2024 11:06:48 +0200
Subject: [PATCH 226/246] [Orc][RuntimeDyld] Return after handling error
(#95364)
If OnLoaded failed, return after passing the error to OnEmitted instead
of also calling finalizeAsync (which would use values that have already
been moved and perform another call to OnEmitted).
---
llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
index a9aaff42433f6..79a190f447879 100644
--- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -1466,8 +1466,10 @@ void jitLinkForORC(
return;
}
- if (auto Err = OnLoaded(*O.getBinary(), *Info, RTDyld.getSymbolTable()))
+ if (auto Err = OnLoaded(*O.getBinary(), *Info, RTDyld.getSymbolTable())) {
OnEmitted(std::move(O), std::move(Info), std::move(Err));
+ return;
+ }
RuntimeDyldImpl::finalizeAsync(std::move(RTDyld.Dyld), std::move(OnEmitted),
std::move(O), std::move(Info));
>From 94b2b1d74a4488e61d7992b12d1042ae6a99f3d5 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Thu, 4 Jul 2024 10:35:32 +0100
Subject: [PATCH 227/246] Fix MSVC "not all control paths return a value"
warning. NFC
---
llvm/lib/SandboxIR/SandboxIR.cpp | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/lib/SandboxIR/SandboxIR.cpp b/llvm/lib/SandboxIR/SandboxIR.cpp
index 47e1ae4422c98..ea2f15754d340 100644
--- a/llvm/lib/SandboxIR/SandboxIR.cpp
+++ b/llvm/lib/SandboxIR/SandboxIR.cpp
@@ -103,6 +103,7 @@ const char *Instruction::getOpcodeName(Opcode Opc) {
#define DEF_INSTR(ID, OPC, CLASS) OPC
#include "llvm/SandboxIR/SandboxIRValues.def"
}
+ llvm_unreachable("Unknown Opcode");
}
bool Instruction::classof(const sandboxir::Value *From) {
>From 81660bbc1c066646ef3c232f5af097092e76f8b7 Mon Sep 17 00:00:00 2001
From: Tomas Matheson <Tomas.Matheson at arm.com>
Date: Thu, 4 Jul 2024 10:47:07 +0100
Subject: [PATCH 228/246] [AArch64] remove pointless AEK_NONE (#97569)
---
llvm/unittests/TargetParser/TargetParserTest.cpp | 15 ++++-----------
llvm/utils/TableGen/ARMTargetDefEmitter.cpp | 7 ++-----
2 files changed, 6 insertions(+), 16 deletions(-)
diff --git a/llvm/unittests/TargetParser/TargetParserTest.cpp b/llvm/unittests/TargetParser/TargetParserTest.cpp
index 97d09d677fbdd..d1c3dcb2f8ee4 100644
--- a/llvm/unittests/TargetParser/TargetParserTest.cpp
+++ b/llvm/unittests/TargetParser/TargetParserTest.cpp
@@ -68,11 +68,6 @@ std::string FormatExtensionFlags(int64_t Flags) {
std::string FormatExtensionFlags(AArch64::ExtensionBitset Flags) {
std::vector<StringRef> Features;
-
- // AEK_NONE is not meant to be shown to the user so the target parser
- // does not recognise it. It is relevant here though.
- if (Flags.test(AArch64::AEK_NONE))
- Features.push_back("none");
AArch64::getExtensionFeatures(Flags, Features);
// The target parser also includes every extension you don't have.
@@ -2009,10 +2004,9 @@ TEST(TargetParserTest, AArch64ExtensionFeatures) {
for (auto Ext : Extensions)
ExtVal.set(Ext);
- // NONE has no feature names.
- // We return True here because NONE is a valid choice.
- EXPECT_TRUE(AArch64::getExtensionFeatures({AArch64::AEK_NONE}, Features));
- EXPECT_TRUE(!Features.size());
+ // Test an empty set of features.
+ EXPECT_TRUE(AArch64::getExtensionFeatures({}, Features));
+ EXPECT_TRUE(Features.size() == 0);
AArch64::getExtensionFeatures(ExtVal, Features);
EXPECT_EQ(Extensions.size(), Features.size());
@@ -2092,8 +2086,7 @@ TEST(TargetParserTest, AArch64ExtensionFeatures) {
EXPECT_TRUE(llvm::is_contained(Features, "+complxnum"));
// Assuming we listed every extension above, this should produce the same
- // result. (note that AEK_NONE doesn't have a name so it won't be in the
- // result despite its bit being set)
+ // result.
std::vector<StringRef> AllFeatures;
EXPECT_TRUE(AArch64::getExtensionFeatures(ExtVal, AllFeatures));
EXPECT_THAT(Features, ::testing::ContainerEq(AllFeatures));
diff --git a/llvm/utils/TableGen/ARMTargetDefEmitter.cpp b/llvm/utils/TableGen/ARMTargetDefEmitter.cpp
index 1435696a5d42f..a4b25025b3c61 100644
--- a/llvm/utils/TableGen/ARMTargetDefEmitter.cpp
+++ b/llvm/utils/TableGen/ARMTargetDefEmitter.cpp
@@ -78,12 +78,10 @@ static void EmitARMTargetDef(RecordKeeper &RK, raw_ostream &OS) {
// Emit the ArchExtKind enum
OS << "#ifdef EMIT_ARCHEXTKIND_ENUM\n"
- << "enum ArchExtKind : unsigned {\n"
- << " AEK_NONE = 1,\n";
+ << "enum ArchExtKind : unsigned {\n";
for (const Record *Rec : SortedExtensions) {
auto AEK = Rec->getValueAsString("ArchExtKindSpelling").upper();
- if (AEK != "AEK_NONE")
- OS << " " << AEK << ",\n";
+ OS << " " << AEK << ",\n";
}
OS << " AEK_NUM_EXTENSIONS\n"
<< "};\n"
@@ -108,7 +106,6 @@ static void EmitARMTargetDef(RecordKeeper &RK, raw_ostream &OS) {
OS << ", \"-" << Rec->getValueAsString("Name") << "\""; // negfeature
OS << "},\n";
};
- OS << " {\"none\", {}, AArch64::AEK_NONE, {}, {}, {}, {} },\n";
OS << "};\n"
<< "#undef EMIT_EXTENSIONS\n"
<< "#endif // EMIT_EXTENSIONS\n"
>From 2c0add93b2df27b34c819c11474d8ba7b8e52e70 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Thu, 4 Jul 2024 10:59:21 +0100
Subject: [PATCH 229/246] [TTI] Return a more sensible cost for histogram
intrinsic. (#97397)
This is just an initial cost, making it invalid for any target which
doesn't specifically return a cost for now. Also adds an AArch64
specific cost check.
We will need to improve that later, e.g. by returning a scalarization
cost for generic targets and possibly introducing a new TTI method, at
least once LoopVectorize has changed it's cost model. The reason is
that the histogram intrinsic also effectively contains a gather and
scatter, and we will need details of the addressing to determine an
appropriate cost for that.
---
.../llvm/Analysis/TargetTransformInfoImpl.h | 3 +
.../AArch64/AArch64TargetTransformInfo.cpp | 33 +++++
.../CostModel/AArch64/sve-intrinsics.ll | 118 ++++++++++++++++++
3 files changed, 154 insertions(+)
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 0ded98f162abf..01624de190d51 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -728,6 +728,9 @@ class TargetTransformInfoImplBase {
switch (ICA.getID()) {
default:
break;
+ case Intrinsic::experimental_vector_histogram_add:
+ // For now, we want explicit support from the target for histograms.
+ return InstructionCost::getInvalid();
case Intrinsic::allow_runtime_check:
case Intrinsic::allow_ubsan_check:
case Intrinsic::annotation:
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index eb60b966c8e02..0ee8136884119 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -61,6 +61,11 @@ static cl::opt<bool> EnableOrLikeSelectOpt("enable-aarch64-or-like-select",
static cl::opt<bool> EnableLSRCostOpt("enable-aarch64-lsr-cost-opt",
cl::init(true), cl::Hidden);
+// A complete guess as to a reasonable cost.
+static cl::opt<unsigned>
+ BaseHistCntCost("aarch64-base-histcnt-cost", cl::init(8), cl::Hidden,
+ cl::desc("The cost of a histcnt instruction"));
+
namespace {
class TailFoldingOption {
// These bitfields will only ever be set to something non-zero in operator=,
@@ -508,11 +513,39 @@ static bool isUnpackedVectorVT(EVT VecVT) {
VecVT.getSizeInBits().getKnownMinValue() < AArch64::SVEBitsPerBlock;
}
+static InstructionCost getHistogramCost(const IntrinsicCostAttributes &ICA) {
+ Type *BucketPtrsTy = ICA.getArgTypes()[0]; // Type of vector of pointers
+ Type *EltTy = ICA.getArgTypes()[1]; // Type of bucket elements
+
+ // Only allow (32b and 64b) integers or pointers for now...
+ if ((!EltTy->isIntegerTy() && !EltTy->isPointerTy()) ||
+ (EltTy->getScalarSizeInBits() != 32 &&
+ EltTy->getScalarSizeInBits() != 64))
+ return InstructionCost::getInvalid();
+
+ // FIXME: Hacky check for legal vector types. We can promote smaller types
+ // but we cannot legalize vectors via splitting for histcnt.
+ // FIXME: We should be able to generate histcnt for fixed-length vectors
+ // using ptrue with a specific VL.
+ if (VectorType *VTy = dyn_cast<VectorType>(BucketPtrsTy))
+ if ((VTy->getElementCount().getKnownMinValue() != 2 &&
+ VTy->getElementCount().getKnownMinValue() != 4) ||
+ VTy->getPrimitiveSizeInBits().getKnownMinValue() > 128 ||
+ !VTy->isScalableTy())
+ return InstructionCost::getInvalid();
+
+ return InstructionCost(BaseHistCntCost);
+}
+
InstructionCost
AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind) {
auto *RetTy = ICA.getReturnType();
switch (ICA.getID()) {
+ case Intrinsic::experimental_vector_histogram_add:
+ if (!ST->hasSVE2())
+ return InstructionCost::getInvalid();
+ return getHistogramCost(ICA);
case Intrinsic::umin:
case Intrinsic::umax:
case Intrinsic::smin:
diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
index 1ff280d75b4e9..1993023c91e26 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
@@ -909,6 +909,123 @@ define void @masked_scatter_v1i128(<1 x i128> %data, <1 x ptr> %ptrs, <1 x i1> %
ret void
}
+define void @histogram_nxv2i64(<vscale x 2 x ptr> %buckets, <vscale x 2 x i1> %mask) #3 {
+; CHECK-LABEL: 'histogram_nxv2i64'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> %buckets, i64 1, <vscale x 2 x i1> %mask)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; TYPE_BASED_ONLY-LABEL: 'histogram_nxv2i64'
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> %buckets, i64 1, <vscale x 2 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> %buckets, i64 1, <vscale x 2 x i1> %mask)
+ ret void
+}
+
+define void @histogram_nxv4i32(<vscale x 4 x ptr> %buckets, <vscale x 4 x i1> %mask) #3 {
+; CHECK-LABEL: 'histogram_nxv4i32'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> %buckets, i32 1, <vscale x 4 x i1> %mask)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; TYPE_BASED_ONLY-LABEL: 'histogram_nxv4i32'
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> %buckets, i32 1, <vscale x 4 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> %buckets, i32 1, <vscale x 4 x i1> %mask)
+ ret void
+}
+
+define void @histogram_nxv8i16(<vscale x 8 x ptr> %buckets, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: 'histogram_nxv8i16'
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr> %buckets, i16 1, <vscale x 8 x i1> %mask)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; TYPE_BASED_ONLY-LABEL: 'histogram_nxv8i16'
+; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr> %buckets, i16 1, <vscale x 8 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr> %buckets, i16 1, <vscale x 8 x i1> %mask)
+ ret void
+}
+
+define void @histogram_nxv16i8(<vscale x 16 x ptr> %buckets, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: 'histogram_nxv16i8'
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr> %buckets, i8 1, <vscale x 16 x i1> %mask)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; TYPE_BASED_ONLY-LABEL: 'histogram_nxv16i8'
+; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr> %buckets, i8 1, <vscale x 16 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call void @llvm.experimental.vector.histogram.add.nxv16p0.i64(<vscale x 16 x ptr> %buckets, i8 1, <vscale x 16 x i1> %mask)
+ ret void
+}
+
+define void @histogram_v2i64(<2 x ptr> %buckets, <2 x i1> %mask) {
+; CHECK-LABEL: 'histogram_v2i64'
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; TYPE_BASED_ONLY-LABEL: 'histogram_v2i64'
+; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask)
+ ret void
+}
+
+define void @histogram_v4i32(<4 x ptr> %buckets, <4 x i1> %mask) {
+; CHECK-LABEL: 'histogram_v4i32'
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; TYPE_BASED_ONLY-LABEL: 'histogram_v4i32'
+; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask)
+ ret void
+}
+
+define void @histogram_v8i16(<8 x ptr> %buckets, <8 x i1> %mask) {
+; CHECK-LABEL: 'histogram_v8i16'
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; TYPE_BASED_ONLY-LABEL: 'histogram_v8i16'
+; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask)
+ ret void
+}
+
+define void @histogram_v16i8(<16 x ptr> %buckets, <16 x i1> %mask) {
+; CHECK-LABEL: 'histogram_v16i8'
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; TYPE_BASED_ONLY-LABEL: 'histogram_v16i8'
+; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call void @llvm.experimental.vector.histogram.add.v16p0.i64(<16 x ptr> %buckets, i8 1, <16 x i1> %mask)
+ ret void
+}
+
+define void @histogram_nxv4i64(<vscale x 4 x ptr> %buckets, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: 'histogram_nxv4i64'
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i64(<vscale x 4 x ptr> %buckets, i64 1, <vscale x 4 x i1> %mask)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; TYPE_BASED_ONLY-LABEL: 'histogram_nxv4i64'
+; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i64(<vscale x 4 x ptr> %buckets, i64 1, <vscale x 4 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call void @llvm.experimental.vector.histogram.add.nxv4p0.i64(<vscale x 4 x ptr> %buckets, i64 1, <vscale x 4 x i1> %mask)
+ ret void
+}
+
declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64, i64)
declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64, i64)
declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64, i64)
@@ -949,3 +1066,4 @@ declare void @llvm.masked.scatter.v1i128.v1p0(<1 x i128> %data, <1 x ptr> %ptrs,
attributes #0 = { "target-features"="+sve,+bf16" }
attributes #1 = { "target-features"="+sve" vscale_range(1,16) }
attributes #2 = { "target-features"="+sve" vscale_range(2, 16) }
+attributes #3 = { "target-features"="+sve,+sve2" vscale_range(1,16) }
>From e6cf292003fa53214a8abf2b4150e58296f76483 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Thu, 4 Jul 2024 10:44:25 +0100
Subject: [PATCH 230/246] [InstCombine][X86] Add some basic knownbits tests for
PMULH/PMULHU
Recognise known zero upperbits
---
.../Transforms/InstCombine/X86/x86-pmulh.ll | 49 +++++++++++++++++++
.../Transforms/InstCombine/X86/x86-pmulhu.ll | 49 +++++++++++++++++++
2 files changed, 98 insertions(+)
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
index 185ab46deed89..63b6b1a7c6b17 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
@@ -233,3 +233,52 @@ define <32 x i16> @elts_pmulh_512(<32 x i16> %a0, <32 x i16> %a1) {
%4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> zeroinitializer
ret <32 x i16> %4
}
+
+;
+; Known Bits
+;
+
+define <8 x i16> @known_pmulh_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) {
+; CHECK-LABEL: @known_pmulh_128(
+; CHECK-NEXT: [[X0:%.*]] = lshr <8 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+; CHECK-NEXT: [[X1:%.*]] = and <8 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+; CHECK-NEXT: [[M:%.*]] = tail call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[X0]], <8 x i16> [[X1]])
+; CHECK-NEXT: [[R:%.*]] = add <8 x i16> [[M]], [[A2:%.*]]
+; CHECK-NEXT: ret <8 x i16> [[R]]
+;
+ %x0 = lshr <8 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %x1 = and <8 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %m = tail call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %x0, <8 x i16> %x1)
+ %r = add <8 x i16> %m, %a2
+ ret <8 x i16> %r
+}
+
+define <16 x i16> @known_pmulh_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2) {
+; CHECK-LABEL: @known_pmulh_256(
+; CHECK-NEXT: [[X0:%.*]] = lshr <16 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+; CHECK-NEXT: [[X1:%.*]] = and <16 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+; CHECK-NEXT: [[M:%.*]] = tail call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[X0]], <16 x i16> [[X1]])
+; CHECK-NEXT: [[R:%.*]] = add <16 x i16> [[M]], [[A2:%.*]]
+; CHECK-NEXT: ret <16 x i16> [[R]]
+;
+ %x0 = lshr <16 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %x1 = and <16 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %m = tail call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %x0, <16 x i16> %x1)
+ %r = add <16 x i16> %m, %a2
+ ret <16 x i16> %r
+}
+
+define <32 x i16> @known_pmulh_512(<32 x i16> %a0, <32 x i16> %a1, <32 x i16> %a2) {
+; CHECK-LABEL: @known_pmulh_512(
+; CHECK-NEXT: [[X0:%.*]] = lshr <32 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+; CHECK-NEXT: [[X1:%.*]] = and <32 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+; CHECK-NEXT: [[M:%.*]] = tail call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[X0]], <32 x i16> [[X1]])
+; CHECK-NEXT: [[R:%.*]] = add <32 x i16> [[M]], [[A2:%.*]]
+; CHECK-NEXT: ret <32 x i16> [[R]]
+;
+ %x0 = lshr <32 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %x1 = and <32 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %m = tail call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> %x0, <32 x i16> %x1)
+ %r = add <32 x i16> %m, %a2
+ ret <32 x i16> %r
+}
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
index b18833f703a5f..8234bba696db5 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
@@ -227,3 +227,52 @@ define <32 x i16> @elts_pmulhu_512(<32 x i16> %a0, <32 x i16> %a1) {
%4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> zeroinitializer
ret <32 x i16> %4
}
+
+;
+; Known Bits
+;
+
+define <8 x i16> @known_pmulhu_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) {
+; CHECK-LABEL: @known_pmulhu_128(
+; CHECK-NEXT: [[X0:%.*]] = lshr <8 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+; CHECK-NEXT: [[X1:%.*]] = and <8 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+; CHECK-NEXT: [[M:%.*]] = tail call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[X0]], <8 x i16> [[X1]])
+; CHECK-NEXT: [[R:%.*]] = add <8 x i16> [[M]], [[A2:%.*]]
+; CHECK-NEXT: ret <8 x i16> [[R]]
+;
+ %x0 = lshr <8 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %x1 = and <8 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %m = tail call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %x0, <8 x i16> %x1)
+ %r = add <8 x i16> %m, %a2
+ ret <8 x i16> %r
+}
+
+define <16 x i16> @known_pmulhu_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2) {
+; CHECK-LABEL: @known_pmulhu_256(
+; CHECK-NEXT: [[X0:%.*]] = lshr <16 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+; CHECK-NEXT: [[X1:%.*]] = and <16 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+; CHECK-NEXT: [[M:%.*]] = tail call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[X0]], <16 x i16> [[X1]])
+; CHECK-NEXT: [[R:%.*]] = add <16 x i16> [[M]], [[A2:%.*]]
+; CHECK-NEXT: ret <16 x i16> [[R]]
+;
+ %x0 = lshr <16 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %x1 = and <16 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %m = tail call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %x0, <16 x i16> %x1)
+ %r = add <16 x i16> %m, %a2
+ ret <16 x i16> %r
+}
+
+define <32 x i16> @known_pmulhu_512(<32 x i16> %a0, <32 x i16> %a1, <32 x i16> %a2) {
+; CHECK-LABEL: @known_pmulhu_512(
+; CHECK-NEXT: [[X0:%.*]] = lshr <32 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+; CHECK-NEXT: [[X1:%.*]] = and <32 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+; CHECK-NEXT: [[M:%.*]] = tail call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[X0]], <32 x i16> [[X1]])
+; CHECK-NEXT: [[R:%.*]] = add <32 x i16> [[M]], [[A2:%.*]]
+; CHECK-NEXT: ret <32 x i16> [[R]]
+;
+ %x0 = lshr <32 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %x1 = and <32 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %m = tail call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> %x0, <32 x i16> %x1)
+ %r = add <32 x i16> %m, %a2
+ ret <32 x i16> %r
+}
>From 5c204b1d2619cbad7b7ad228b62feae08781a6db Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Thu, 4 Jul 2024 11:07:51 +0100
Subject: [PATCH 231/246] [ValueTracking][X86] computeKnownBitsFromOperator -
add PMULH/PMULHU intrinsics mulhs/mulhu known bits handling.
These map directly to the KnownBits implementations.
---
llvm/lib/Analysis/ValueTracking.cpp | 14 ++++++++++++++
.../Transforms/InstCombine/X86/x86-pmulh.ll | 18 +++---------------
.../Transforms/InstCombine/X86/x86-pmulhu.ll | 18 +++---------------
3 files changed, 20 insertions(+), 30 deletions(-)
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 258576f0cdff8..85abf00774a02 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1739,6 +1739,20 @@ static void computeKnownBitsFromOperator(const Operator *I,
Known &= Known2.anyextOrTrunc(BitWidth);
break;
}
+ case Intrinsic::x86_sse2_pmulh_w:
+ case Intrinsic::x86_avx2_pmulh_w:
+ case Intrinsic::x86_avx512_pmulh_w_512:
+ computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
+ computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+ Known = KnownBits::mulhs(Known, Known2);
+ break;
+ case Intrinsic::x86_sse2_pmulhu_w:
+ case Intrinsic::x86_avx2_pmulhu_w:
+ case Intrinsic::x86_avx512_pmulhu_w_512:
+ computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
+ computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+ Known = KnownBits::mulhu(Known, Known2);
+ break;
case Intrinsic::x86_sse42_crc32_64_64:
Known.Zero.setBitsFrom(32);
break;
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
index 63b6b1a7c6b17..947c7d38d26ee 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulh.ll
@@ -240,11 +240,7 @@ define <32 x i16> @elts_pmulh_512(<32 x i16> %a0, <32 x i16> %a1) {
define <8 x i16> @known_pmulh_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) {
; CHECK-LABEL: @known_pmulh_128(
-; CHECK-NEXT: [[X0:%.*]] = lshr <8 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
-; CHECK-NEXT: [[X1:%.*]] = and <8 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
-; CHECK-NEXT: [[M:%.*]] = tail call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[X0]], <8 x i16> [[X1]])
-; CHECK-NEXT: [[R:%.*]] = add <8 x i16> [[M]], [[A2:%.*]]
-; CHECK-NEXT: ret <8 x i16> [[R]]
+; CHECK-NEXT: ret <8 x i16> [[A2:%.*]]
;
%x0 = lshr <8 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%x1 = and <8 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -255,11 +251,7 @@ define <8 x i16> @known_pmulh_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) {
define <16 x i16> @known_pmulh_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2) {
; CHECK-LABEL: @known_pmulh_256(
-; CHECK-NEXT: [[X0:%.*]] = lshr <16 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
-; CHECK-NEXT: [[X1:%.*]] = and <16 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
-; CHECK-NEXT: [[M:%.*]] = tail call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[X0]], <16 x i16> [[X1]])
-; CHECK-NEXT: [[R:%.*]] = add <16 x i16> [[M]], [[A2:%.*]]
-; CHECK-NEXT: ret <16 x i16> [[R]]
+; CHECK-NEXT: ret <16 x i16> [[A2:%.*]]
;
%x0 = lshr <16 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%x1 = and <16 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -270,11 +262,7 @@ define <16 x i16> @known_pmulh_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a
define <32 x i16> @known_pmulh_512(<32 x i16> %a0, <32 x i16> %a1, <32 x i16> %a2) {
; CHECK-LABEL: @known_pmulh_512(
-; CHECK-NEXT: [[X0:%.*]] = lshr <32 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
-; CHECK-NEXT: [[X1:%.*]] = and <32 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
-; CHECK-NEXT: [[M:%.*]] = tail call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[X0]], <32 x i16> [[X1]])
-; CHECK-NEXT: [[R:%.*]] = add <32 x i16> [[M]], [[A2:%.*]]
-; CHECK-NEXT: ret <32 x i16> [[R]]
+; CHECK-NEXT: ret <32 x i16> [[A2:%.*]]
;
%x0 = lshr <32 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%x1 = and <32 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
index 8234bba696db5..560969f7c4a81 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-pmulhu.ll
@@ -234,11 +234,7 @@ define <32 x i16> @elts_pmulhu_512(<32 x i16> %a0, <32 x i16> %a1) {
define <8 x i16> @known_pmulhu_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) {
; CHECK-LABEL: @known_pmulhu_128(
-; CHECK-NEXT: [[X0:%.*]] = lshr <8 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
-; CHECK-NEXT: [[X1:%.*]] = and <8 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
-; CHECK-NEXT: [[M:%.*]] = tail call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[X0]], <8 x i16> [[X1]])
-; CHECK-NEXT: [[R:%.*]] = add <8 x i16> [[M]], [[A2:%.*]]
-; CHECK-NEXT: ret <8 x i16> [[R]]
+; CHECK-NEXT: ret <8 x i16> [[A2:%.*]]
;
%x0 = lshr <8 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%x1 = and <8 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -249,11 +245,7 @@ define <8 x i16> @known_pmulhu_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2)
define <16 x i16> @known_pmulhu_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2) {
; CHECK-LABEL: @known_pmulhu_256(
-; CHECK-NEXT: [[X0:%.*]] = lshr <16 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
-; CHECK-NEXT: [[X1:%.*]] = and <16 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
-; CHECK-NEXT: [[M:%.*]] = tail call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[X0]], <16 x i16> [[X1]])
-; CHECK-NEXT: [[R:%.*]] = add <16 x i16> [[M]], [[A2:%.*]]
-; CHECK-NEXT: ret <16 x i16> [[R]]
+; CHECK-NEXT: ret <16 x i16> [[A2:%.*]]
;
%x0 = lshr <16 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%x1 = and <16 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -264,11 +256,7 @@ define <16 x i16> @known_pmulhu_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %
define <32 x i16> @known_pmulhu_512(<32 x i16> %a0, <32 x i16> %a1, <32 x i16> %a2) {
; CHECK-LABEL: @known_pmulhu_512(
-; CHECK-NEXT: [[X0:%.*]] = lshr <32 x i16> [[A0:%.*]], <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
-; CHECK-NEXT: [[X1:%.*]] = and <32 x i16> [[A1:%.*]], <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
-; CHECK-NEXT: [[M:%.*]] = tail call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[X0]], <32 x i16> [[X1]])
-; CHECK-NEXT: [[R:%.*]] = add <32 x i16> [[M]], [[A2:%.*]]
-; CHECK-NEXT: ret <32 x i16> [[R]]
+; CHECK-NEXT: ret <32 x i16> [[A2:%.*]]
;
%x0 = lshr <32 x i16> %a0, <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%x1 = and <32 x i16> %a1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
>From 2b3b405b09a0d965a4aff1f92958418ddbf1e7f6 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Thu, 4 Jul 2024 11:44:50 +0100
Subject: [PATCH 232/246] [LV] Don't vectorize first-order recurrence with VF
<vscale x 1 x ..>
The assertion added as part of https://github.com/llvm/llvm-project/pull/93395
surfaced cases where first-order recurrences are vectorized with
<vscale x 1 x ..>. If vscale is 1, then we are unable to extract the
penultimate value (second to last lane). Previously this case got
mis-compiled, trying to extract from an invalid lane (-1)
https://llvm.godbolt.org/z/3adzYYcf9.
Fixes https://github.com/llvm/llvm-project/issues/97452.
---
.../Transforms/Vectorize/LoopVectorize.cpp | 5 ++
.../first-order-recurrence-scalable-vf1.ll | 73 ++++++++++++++++++
.../first-order-recurrence-scalable-vf1.ll | 76 +++++++++++++++++++
3 files changed, 154 insertions(+)
create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll
create mode 100644 llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index f9c0c66e6e0af..d7b0240fd8a81 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -6813,6 +6813,11 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
// First-order recurrences are replaced by vector shuffles inside the loop.
if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
+ // For <vscale x 1 x i64>, if vscale = 1 we are unable to extract the
+ // penultimate value of the recurrence.
+ // TODO: Consider vscale_range info.
+ if (VF.isScalable() && VF.getKnownMinValue() == 1)
+ return InstructionCost::getInvalid();
SmallVector<int> Mask(VF.getKnownMinValue());
std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
return TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll
new file mode 100644
index 0000000000000..75907e02a4373
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -p loop-vectorize -S %s | FileCheck %s
+
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "riscv64-unknown-linux-gnu"
+
+; Make sure we do not pick <vscale x 1 x i64> as VF for a loop with a
+; first-order recurrence.
+define i64 @pr97452_scalable_vf1_for(ptr %src) #0 {
+; CHECK-LABEL: define i64 @pr97452_scalable_vf1_for(
+; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, %[[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[WIDE_LOAD1]] = load <4 x i64>, ptr [[TMP5]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[WIDE_LOAD1]], i32 2
+; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[WIDE_LOAD1]], i32 3
+; CHECK-NEXT: br i1 false, label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]]
+; CHECK-NEXT: [[L]] = load i64, ptr [[GEP]], align 8
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 22
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[SCALAR_RECUR]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i64 [[RES]]
+;
+entry:
+ br label %loop
+
+loop:
+ %for = phi i64 [ 0, %entry ], [ %l, %loop ]
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i64 %iv, 1
+ %gep = getelementptr inbounds i64, ptr %src, i64 %iv
+ %l = load i64, ptr %gep, align 8
+ %ec = icmp eq i64 %iv, 22
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ %res = phi i64 [ %for, %loop ]
+ ret i64 %res
+}
+
+attributes #0 = { "target-features"="+64bit,+v,+zvl128b,+zvl256b" }
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll
new file mode 100644
index 0000000000000..314a133debbd7
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll
@@ -0,0 +1,76 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -p loop-vectorize -scalable-vectorization=on -force-vector-width=1 -force-target-supports-scalable-vectors=true -S %s | FileCheck %s
+
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+
+
+define i64 @pr97452_scalable_vf1_for_live_out(ptr %src) {
+; CHECK-LABEL: define i64 @pr97452_scalable_vf1_for_live_out(
+; CHECK-SAME: ptr [[SRC:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[FOR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[L:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]]
+; CHECK-NEXT: [[L]] = load i64, ptr [[GEP]], align 8
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 22
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[FOR]], %[[LOOP]] ]
+; CHECK-NEXT: ret i64 [[RES]]
+;
+entry:
+ br label %loop
+
+loop:
+ %for = phi i64 [ 0, %entry ], [ %l, %loop ]
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i64 %iv, 1
+ %gep = getelementptr inbounds i64, ptr %src, i64 %iv
+ %l = load i64, ptr %gep, align 8
+ %ec = icmp eq i64 %iv, 22
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ %res = phi i64 [ %for, %loop ]
+ ret i64 %res
+}
+
+
+define void @pr97452_scalable_vf1_for_no_live_out(ptr %src, ptr noalias %dst) {
+; CHECK-LABEL: define void @pr97452_scalable_vf1_for_no_live_out(
+; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[FOR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[L:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]]
+; CHECK-NEXT: [[L]] = load i64, ptr [[GEP]], align 8
+; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: store i64 [[L]], ptr [[GEP_DST]], align 8
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 22
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %for = phi i64 [ 0, %entry ], [ %l, %loop ]
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i64 %iv, 1
+ %gep = getelementptr inbounds i64, ptr %src, i64 %iv
+ %l = load i64, ptr %gep, align 8
+ %gep.dst = getelementptr inbounds i64, ptr %dst, i64 %iv
+ store i64 %l, ptr %gep.dst
+ %ec = icmp eq i64 %iv, 22
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
>From e7bfd4d77fafdcad890f80f8feee50ca02e0e2c3 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Thu, 4 Jul 2024 12:47:33 +0200
Subject: [PATCH 233/246] [Mem2Reg] Add some single store tests (NFC)
For https://github.com/llvm/llvm-project/issues/97702.
---
llvm/test/Transforms/Mem2Reg/single-store.ll | 67 ++++++++++++++++++++
1 file changed, 67 insertions(+)
create mode 100644 llvm/test/Transforms/Mem2Reg/single-store.ll
diff --git a/llvm/test/Transforms/Mem2Reg/single-store.ll b/llvm/test/Transforms/Mem2Reg/single-store.ll
new file mode 100644
index 0000000000000..b82e26158a361
--- /dev/null
+++ b/llvm/test/Transforms/Mem2Reg/single-store.ll
@@ -0,0 +1,67 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=mem2reg < %s | FileCheck %s
+
+; FIXME: This is a miscompile.
+define i8 @single_store_literal_poison(i1 %cond) {
+; CHECK-LABEL: define i8 @single_store_literal_poison(
+; CHECK-SAME: i1 [[COND:%.*]]) {
+; CHECK-NEXT: br i1 [[COND]], label %[[IF:.*]], label %[[EXIT:.*]]
+; CHECK: [[IF]]:
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret i8 poison
+;
+ %a = alloca i8, align 1
+ br i1 %cond, label %if, label %exit
+
+if:
+ store i8 poison, ptr %a, align 1
+ br label %exit
+
+exit:
+ %v = load i8, ptr %a, align 1
+ ret i8 %v
+}
+
+; FIXME: This is a miscompile.
+define i8 @single_store_maybe_poison(i1 %cond, i8 %x) {
+; CHECK-LABEL: define i8 @single_store_maybe_poison(
+; CHECK-SAME: i1 [[COND:%.*]], i8 [[X:%.*]]) {
+; CHECK-NEXT: br i1 [[COND]], label %[[IF:.*]], label %[[EXIT:.*]]
+; CHECK: [[IF]]:
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret i8 [[X]]
+;
+ %a = alloca i8, align 1
+ br i1 %cond, label %if, label %exit
+
+if:
+ store i8 %x, ptr %a, align 1
+ br label %exit
+
+exit:
+ %v = load i8, ptr %a, align 1
+ ret i8 %v
+}
+
+define i8 @single_store_cant_be_poison(i1 %cond, i8 noundef %x) {
+; CHECK-LABEL: define i8 @single_store_cant_be_poison(
+; CHECK-SAME: i1 [[COND:%.*]], i8 noundef [[X:%.*]]) {
+; CHECK-NEXT: br i1 [[COND]], label %[[IF:.*]], label %[[EXIT:.*]]
+; CHECK: [[IF]]:
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret i8 [[X]]
+;
+ %a = alloca i8, align 1
+ br i1 %cond, label %if, label %exit
+
+if:
+ store i8 %x, ptr %a, align 1
+ br label %exit
+
+exit:
+ %v = load i8, ptr %a, align 1
+ ret i8 %v
+}
>From 07b3e2c0c68b93a3d4d89426dc7fd14cc31ca6be Mon Sep 17 00:00:00 2001
From: Muhammad Omair Javaid <omair.javaid at linaro.org>
Date: Thu, 4 Jul 2024 15:55:43 +0500
Subject: [PATCH 234/246] [LLDB] XFail on Windows TestThreadAPI.py
test_StepInstruction
TestThreadAPI.py test_StepInstruction started failing after #97493
Following assertion fails but I am not sure if test will pass after
changing the test.
AssertionError: 'void __cdecl call_me(bool)' != 'call_me(bool)'
I have marked it as xfail I ll run it on a Windows machine to find
an appropriate fix.
https://lab.llvm.org/buildbot/#/builders/141/builds/476
---
lldb/test/API/python_api/thread/TestThreadAPI.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/lldb/test/API/python_api/thread/TestThreadAPI.py b/lldb/test/API/python_api/thread/TestThreadAPI.py
index d5fc77532d859..a74302263aa45 100644
--- a/lldb/test/API/python_api/thread/TestThreadAPI.py
+++ b/lldb/test/API/python_api/thread/TestThreadAPI.py
@@ -51,7 +51,8 @@ def test_negative_indexing(self):
"""Test SBThread.frame with negative indexes."""
self.build()
self.validate_negative_indexing()
-
+
+ @expectedFailureAll(oslist=["windows"])
def test_StepInstruction(self):
"""Test that StepInstruction preserves the plan stack."""
self.build()
>From d6af73e9fbc84315100499a096f17ec5eeeeea23 Mon Sep 17 00:00:00 2001
From: David Spickett <david.spickett at linaro.org>
Date: Thu, 4 Jul 2024 11:03:30 +0000
Subject: [PATCH 235/246] [compiler-rt][Fuzzer] Disable fuzzer-leak test
This has been flaky and reporting a lot of unrelated failures to PRs.
See https://github.com/llvm/llvm-project/issues/97712
---
compiler-rt/test/fuzzer/fuzzer-leak.test | 2 ++
1 file changed, 2 insertions(+)
diff --git a/compiler-rt/test/fuzzer/fuzzer-leak.test b/compiler-rt/test/fuzzer/fuzzer-leak.test
index dd22fdec8677e..43d896e62d546 100644
--- a/compiler-rt/test/fuzzer/fuzzer-leak.test
+++ b/compiler-rt/test/fuzzer/fuzzer-leak.test
@@ -1,4 +1,6 @@
REQUIRES: lsan
+// See https://github.com/llvm/llvm-project/issues/97712.
+UNSUPPORTED: target={{.*}}
RUN: %cpp_compiler %S/LeakTest.cpp -o %t-LeakTest
RUN: %cpp_compiler %S/ThreadedLeakTest.cpp -o %t-ThreadedLeakTest
>From 483557224b8d36761f39d5847e17ef7361757f1b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Krist=C3=B3f=20Umann?= <dkszelethus at gmail.com>
Date: Thu, 4 Jul 2024 13:46:22 +0200
Subject: [PATCH 236/246] [analyzer] Check the correct first and last elements
in cstring.UninitializedRead (#95408)
I intend to fix this checker up so that we can move it out of alpha. I
made a bunch of analyses, and found many similar false positives:
```c++
int t[] = {1,2,3};
memcpy(dst, t, sizeof(t) / sizeof(t[0])); // warn
```
The problem here is the way CStringChecker checks whether the
destination and source buffers are initialized: heuristically, it only
checks the first and last element. This is fine, however, it retrieves
these elements as characters, even if the underlaying object is not a
character array. Reading the last byte of an integer is undefined, so
the checker emits a bug here.
A quick search tells you the rationale: "Both objects are reinterpreted
as arrays of unsigned char.". But the static analyzer right now can't
check byte-by-byte if a memory region is _initialized_, it can only
check if its a well-defined character or not.
In this patch, I pry the original array out of the arguments to memcpy
(and similar functions), and retrieve the actual first and last elements
according to the array's actual element type.
Currently, my improvements reduced the number of reports to 29 on these
projects: memcached,tmux,curl,twin,vim,openssl,sqlite,ffmpeg,postgres
https://codechecker-demo.eastus.cloudapp.azure.com/Default/reports?detection-status=New&detection-status=Reopened&detection-status=Unresolved&is-unique=on&run=%2acstring_uninit_upper_bound_patched&newcheck=%2acstring_uninit_upper_bounds_patched&diff-type=New&checker-name=alpha.unix.cstring.UninitializedRead&items-per-page=100
Before my patch, there were 87.
https://codechecker-demo.eastus.cloudapp.azure.com/Default/reports?detection-status=New&detection-status=Reopened&detection-status=Unresolved&is-unique=on&run=%2acstring_uninit_baseline&newcheck=%2acstring_uninit_upper_bounds_patched&diff-type=New&checker-name=alpha.unix.cstring.UninitializedRead&items-per-page=100
---
.../Core/PathSensitive/MemRegion.h | 5 +
.../Checkers/CStringChecker.cpp | 211 ++++++++++++++----
clang/lib/StaticAnalyzer/Core/MemRegion.cpp | 11 +
clang/test/Analysis/bstring_UninitRead.c | 119 ++++++++--
4 files changed, 280 insertions(+), 66 deletions(-)
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
index c0d3fbd0eb961..0d9566285f5d4 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
@@ -34,6 +34,7 @@
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstdint>
#include <limits>
@@ -99,6 +100,8 @@ class MemRegion : public llvm::FoldingSetNode {
#define REGION(Id, Parent) Id ## Kind,
#define REGION_RANGE(Id, First, Last) BEGIN_##Id = First, END_##Id = Last,
#include "clang/StaticAnalyzer/Core/PathSensitive/Regions.def"
+#undef REGION
+#undef REGION_RANGE
};
private:
@@ -171,6 +174,8 @@ class MemRegion : public llvm::FoldingSetNode {
Kind getKind() const { return kind; }
+ StringRef getKindStr() const;
+
template<typename RegionTy> const RegionTy* getAs() const;
template <typename RegionTy>
LLVM_ATTRIBUTE_RETURNS_NONNULL const RegionTy *castAs() const;
diff --git a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 238e87a712a43..8dd08f14b2728 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "InterCheckerAPI.h"
+#include "clang/AST/OperationKinds.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
@@ -22,10 +23,13 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include <functional>
#include <optional>
@@ -304,6 +308,10 @@ class CStringChecker : public Checker< eval::Call,
// Re-usable checks
ProgramStateRef checkNonNull(CheckerContext &C, ProgramStateRef State,
AnyArgExpr Arg, SVal l) const;
+ // Check whether the origin region behind \p Element (like the actual array
+ // region \p Element is from) is initialized.
+ ProgramStateRef checkInit(CheckerContext &C, ProgramStateRef state,
+ AnyArgExpr Buffer, SVal Element, SVal Size) const;
ProgramStateRef CheckLocation(CheckerContext &C, ProgramStateRef state,
AnyArgExpr Buffer, SVal Element,
AccessKind Access,
@@ -329,7 +337,7 @@ class CStringChecker : public Checker< eval::Call,
const Stmt *S, StringRef WarningMsg) const;
void emitAdditionOverflowBug(CheckerContext &C, ProgramStateRef State) const;
void emitUninitializedReadBug(CheckerContext &C, ProgramStateRef State,
- const Expr *E) const;
+ const Expr *E, StringRef Msg) const;
ProgramStateRef checkAdditionOverflow(CheckerContext &C,
ProgramStateRef state,
NonLoc left,
@@ -351,16 +359,16 @@ REGISTER_MAP_WITH_PROGRAMSTATE(CStringLength, const MemRegion *, SVal)
// Individual checks and utility methods.
//===----------------------------------------------------------------------===//
-std::pair<ProgramStateRef , ProgramStateRef >
-CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef state, SVal V,
+std::pair<ProgramStateRef, ProgramStateRef>
+CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef State, SVal V,
QualType Ty) {
std::optional<DefinedSVal> val = V.getAs<DefinedSVal>();
if (!val)
- return std::pair<ProgramStateRef , ProgramStateRef >(state, state);
+ return std::pair<ProgramStateRef, ProgramStateRef>(State, State);
SValBuilder &svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal zero = svalBuilder.makeZeroVal(Ty);
- return state->assume(svalBuilder.evalEQ(state, *val, zero));
+ return State->assume(svalBuilder.evalEQ(State, *val, zero));
}
ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
@@ -393,6 +401,149 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
return stateNonNull;
}
+static std::optional<NonLoc> getIndex(ProgramStateRef State,
+ const ElementRegion *ER, CharKind CK) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ ASTContext &Ctx = SVB.getContext();
+
+ if (CK == CharKind::Regular) {
+ if (ER->getValueType() != Ctx.CharTy)
+ return {};
+ return ER->getIndex();
+ }
+
+ if (ER->getValueType() != Ctx.WideCharTy)
+ return {};
+
+ QualType SizeTy = Ctx.getSizeType();
+ NonLoc WideSize =
+ SVB.makeIntVal(Ctx.getTypeSizeInChars(Ctx.WideCharTy).getQuantity(),
+ SizeTy)
+ .castAs<NonLoc>();
+ SVal Offset =
+ SVB.evalBinOpNN(State, BO_Mul, ER->getIndex(), WideSize, SizeTy);
+ if (Offset.isUnknown())
+ return {};
+ return Offset.castAs<NonLoc>();
+}
+
+// Basically 1 -> 1st, 12 -> 12th, etc.
+static void printIdxWithOrdinalSuffix(llvm::raw_ostream &Os, unsigned Idx) {
+ Os << Idx << llvm::getOrdinalSuffix(Idx);
+}
+
+ProgramStateRef CStringChecker::checkInit(CheckerContext &C,
+ ProgramStateRef State,
+ AnyArgExpr Buffer, SVal Element,
+ SVal Size) const {
+
+ // If a previous check has failed, propagate the failure.
+ if (!State)
+ return nullptr;
+
+ const MemRegion *R = Element.getAsRegion();
+ const auto *ER = dyn_cast_or_null<ElementRegion>(R);
+ if (!ER)
+ return State;
+
+ const auto *SuperR = ER->getSuperRegion()->getAs<TypedValueRegion>();
+ if (!SuperR)
+ return State;
+
+ // FIXME: We ought to able to check objects as well. Maybe
+ // UninitializedObjectChecker could help?
+ if (!SuperR->getValueType()->isArrayType())
+ return State;
+
+ SValBuilder &SVB = C.getSValBuilder();
+ ASTContext &Ctx = SVB.getContext();
+
+ const QualType ElemTy = Ctx.getBaseElementType(SuperR->getValueType());
+ const NonLoc Zero = SVB.makeZeroArrayIndex();
+
+ std::optional<Loc> FirstElementVal =
+ State->getLValue(ElemTy, Zero, loc::MemRegionVal(SuperR)).getAs<Loc>();
+ if (!FirstElementVal)
+ return State;
+
+ // Ensure that we wouldn't read uninitialized value.
+ if (Filter.CheckCStringUninitializedRead &&
+ State->getSVal(*FirstElementVal).isUndef()) {
+ llvm::SmallString<258> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "The first element of the ";
+ printIdxWithOrdinalSuffix(OS, Buffer.ArgumentIndex + 1);
+ OS << " argument is undefined";
+ emitUninitializedReadBug(C, State, Buffer.Expression, OS.str());
+ return nullptr;
+ }
+
+ // We won't check whether the entire region is fully initialized -- lets just
+ // check that the first and the last element is. So, onto checking the last
+ // element:
+ const QualType IdxTy = SVB.getArrayIndexType();
+
+ NonLoc ElemSize =
+ SVB.makeIntVal(Ctx.getTypeSizeInChars(ElemTy).getQuantity(), IdxTy)
+ .castAs<NonLoc>();
+
+ // FIXME: Check that the size arg to the cstring function is divisible by
+ // size of the actual element type?
+
+ // The type of the argument to the cstring function is either char or wchar,
+ // but thats not the type of the original array (or memory region).
+ // Suppose the following:
+ // int t[5];
+ // memcpy(dst, t, sizeof(t) / sizeof(t[0]));
+ // When checking whether t is fully initialized, we see it as char array of
+ // size sizeof(int)*5. If we check the last element as a character, we read
+ // the last byte of an integer, which will be undefined. But just because
+ // that value is undefined, it doesn't mean that the element is uninitialized!
+ // For this reason, we need to retrieve the actual last element with the
+ // correct type.
+
+ // Divide the size argument to the cstring function by the actual element
+ // type. This value will be size of the array, or the index to the
+ // past-the-end element.
+ std::optional<NonLoc> Offset =
+ SVB.evalBinOpNN(State, clang::BO_Div, Size.castAs<NonLoc>(), ElemSize,
+ IdxTy)
+ .getAs<NonLoc>();
+
+ // Retrieve the index of the last element.
+ const NonLoc One = SVB.makeIntVal(1, IdxTy).castAs<NonLoc>();
+ SVal LastIdx = SVB.evalBinOpNN(State, BO_Sub, *Offset, One, IdxTy);
+
+ if (!Offset)
+ return State;
+
+ SVal LastElementVal =
+ State->getLValue(ElemTy, LastIdx, loc::MemRegionVal(SuperR));
+ if (!isa<Loc>(LastElementVal))
+ return State;
+
+ if (Filter.CheckCStringUninitializedRead &&
+ State->getSVal(LastElementVal.castAs<Loc>()).isUndef()) {
+ const llvm::APSInt *IdxInt = LastIdx.getAsInteger();
+ // If we can't get emit a sensible last element index, just bail out --
+ // prefer to emit nothing in favour of emitting garbage quality reports.
+ if (!IdxInt) {
+ C.addSink();
+ return nullptr;
+ }
+ llvm::SmallString<258> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "The last accessed element (at index ";
+ OS << IdxInt->getExtValue();
+ OS << ") in the ";
+ printIdxWithOrdinalSuffix(OS, Buffer.ArgumentIndex + 1);
+ OS << " argument is undefined";
+ emitUninitializedReadBug(C, State, Buffer.Expression, OS.str());
+ return nullptr;
+ }
+ return State;
+}
+
// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor?
ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
ProgramStateRef state,
@@ -413,38 +564,17 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
if (!ER)
return state;
- SValBuilder &svalBuilder = C.getSValBuilder();
- ASTContext &Ctx = svalBuilder.getContext();
-
// Get the index of the accessed element.
- NonLoc Idx = ER->getIndex();
-
- if (CK == CharKind::Regular) {
- if (ER->getValueType() != Ctx.CharTy)
- return state;
- } else {
- if (ER->getValueType() != Ctx.WideCharTy)
- return state;
-
- QualType SizeTy = Ctx.getSizeType();
- NonLoc WideSize =
- svalBuilder
- .makeIntVal(Ctx.getTypeSizeInChars(Ctx.WideCharTy).getQuantity(),
- SizeTy)
- .castAs<NonLoc>();
- SVal Offset = svalBuilder.evalBinOpNN(state, BO_Mul, Idx, WideSize, SizeTy);
- if (Offset.isUnknown())
- return state;
- Idx = Offset.castAs<NonLoc>();
- }
+ std::optional<NonLoc> Idx = getIndex(state, ER, CK);
+ if (!Idx)
+ return state;
// Get the size of the array.
const auto *superReg = cast<SubRegion>(ER->getSuperRegion());
DefinedOrUnknownSVal Size =
getDynamicExtent(state, superReg, C.getSValBuilder());
- ProgramStateRef StInBound, StOutBound;
- std::tie(StInBound, StOutBound) = state->assumeInBoundDual(Idx, Size);
+ auto [StInBound, StOutBound] = state->assumeInBoundDual(*Idx, Size);
if (StOutBound && !StInBound) {
// These checks are either enabled by the CString out-of-bounds checker
// explicitly or implicitly by the Malloc checker.
@@ -459,15 +589,6 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
return nullptr;
}
- // Ensure that we wouldn't read uninitialized value.
- if (Access == AccessKind::read) {
- if (Filter.CheckCStringUninitializedRead &&
- StInBound->getSVal(ER).isUndef()) {
- emitUninitializedReadBug(C, StInBound, Buffer.Expression);
- return nullptr;
- }
- }
-
// Array bound check succeeded. From this point forward the array bound
// should always succeed.
return StInBound;
@@ -502,6 +623,7 @@ CStringChecker::CheckBufferAccess(CheckerContext &C, ProgramStateRef State,
// Check if the first byte of the buffer is accessible.
State = CheckLocation(C, State, Buffer, BufStart, Access, CK);
+
if (!State)
return nullptr;
@@ -526,6 +648,8 @@ CStringChecker::CheckBufferAccess(CheckerContext &C, ProgramStateRef State,
SVal BufEnd =
svalBuilder.evalBinOpLN(State, BO_Add, *BufLoc, LastOffset, PtrTy);
State = CheckLocation(C, State, Buffer, BufEnd, Access, CK);
+ if (Access == AccessKind::read)
+ State = checkInit(C, State, Buffer, BufEnd, *Length);
// If the buffer isn't large enough, abort.
if (!State)
@@ -694,16 +818,17 @@ void CStringChecker::emitNullArgBug(CheckerContext &C, ProgramStateRef State,
void CStringChecker::emitUninitializedReadBug(CheckerContext &C,
ProgramStateRef State,
- const Expr *E) const {
+ const Expr *E,
+ StringRef Msg) const {
if (ExplodedNode *N = C.generateErrorNode(State)) {
- const char *Msg =
- "Bytes string function accesses uninitialized/garbage values";
if (!BT_UninitRead)
BT_UninitRead.reset(new BugType(Filter.CheckNameCStringUninitializedRead,
"Accessing unitialized/garbage values"));
auto Report =
std::make_unique<PathSensitiveBugReport>(*BT_UninitRead, Msg, N);
+ Report->addNote("Other elements might also be undefined",
+ Report->getLocation());
Report->addRange(E->getSourceRange());
bugreporter::trackExpressionValue(N, E, *Report);
C.emitReport(std::move(Report));
diff --git a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index 6fe929b1cb94a..693791c3aee8b 100644
--- a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -630,6 +630,17 @@ bool MemRegion::canPrintPrettyAsExpr() const {
return false;
}
+StringRef MemRegion::getKindStr() const {
+ switch (getKind()) {
+#define REGION(Id, Parent) \
+ case Id##Kind: \
+ return #Id;
+#include "clang/StaticAnalyzer/Core/PathSensitive/Regions.def"
+#undef REGION
+ }
+ llvm_unreachable("Unkown kind!");
+}
+
void MemRegion::printPretty(raw_ostream &os) const {
assert(canPrintPretty() && "This region cannot be printed pretty.");
os << "'";
diff --git a/clang/test/Analysis/bstring_UninitRead.c b/clang/test/Analysis/bstring_UninitRead.c
index c535e018e62c2..45e38dd316298 100644
--- a/clang/test/Analysis/bstring_UninitRead.c
+++ b/clang/test/Analysis/bstring_UninitRead.c
@@ -1,12 +1,11 @@
// RUN: %clang_analyze_cc1 -verify %s \
// RUN: -analyzer-checker=core,alpha.unix.cstring
-
-// This file is generally for the alpha.unix.cstring.UninitializedRead Checker, the reason for putting it into
-// the separate file because the checker is break the some existing test cases in bstring.c file , so we don't
-// wanna mess up with some existing test case so it's better to create separate file for it, this file also include
-// the broken test for the reference in future about the broken tests.
-
+//===----------------------------------------------------------------------===//
+// mempcpy() using character array. This is the easiest case, as memcpy
+// intepretrs the dst and src buffers as character arrays (regardless of their
+// actual type).
+//===----------------------------------------------------------------------===//
typedef typeof(sizeof(int)) size_t;
@@ -14,46 +13,120 @@ void clang_analyzer_eval(int);
void *memcpy(void *restrict s1, const void *restrict s2, size_t n);
-void top(char *dst) {
+void memcpy_array_fully_uninit(char *dst) {
+ char buf[10];
+ memcpy(dst, buf, 10); // expected-warning{{The first element of the 2nd argument is undefined}}
+ // expected-note at -1{{Other elements might also be undefined}}
+ (void)buf;
+}
+
+void memcpy_array_partially_uninit(char *dst) {
char buf[10];
- memcpy(dst, buf, 10); // expected-warning{{Bytes string function accesses uninitialized/garbage values}}
+ buf[0] = 'i';
+ memcpy(dst, buf, 10); // expected-warning{{The last accessed element (at index 9) in the 2nd argument is undefined}}
+ // expected-note at -1{{Other elements might also be undefined}}
+ (void)buf;
+}
+
+void memcpy_array_only_init_portion(char *dst) {
+ char buf[10];
+ buf[0] = 'i';
+ memcpy(dst, buf, 1);
+ (void)buf;
+}
+
+void memcpy_array_partially_init_error(char *dst) {
+ char buf[10];
+ buf[0] = 'i';
+ memcpy(dst, buf, 2); // expected-warning{{The last accessed element (at index 1) in the 2nd argument is undefined}}
+ // expected-note at -1{{Other elements might also be undefined}}
+ (void)buf;
+}
+
+// The interesting case here is that the portion we're copying is initialized,
+// but not the whole matrix. We need to be careful to extract buf[1], and not
+// buf when trying to peel region layers off from the source argument.
+void memcpy_array_from_matrix(char *dst) {
+ char buf[2][2];
+ buf[1][0] = 'i';
+ buf[1][1] = 'j';
+ // FIXME: This is a FP -- we mistakenly retrieve the first element of buf,
+ // instead of the first element of buf[1]. getLValueElement simply peels off
+ // another ElementRegion layer, when in this case it really shouldn't.
+ memcpy(dst, buf[1], 2); // expected-warning{{The first element of the 2nd argument is undefined}}
+ // expected-note at -1{{Other elements might also be undefined}}
(void)buf;
}
-//===----------------------------------------------------------------------===
-// mempcpy()
-//===----------------------------------------------------------------------===
+//===----------------------------------------------------------------------===//
+// mempcpy() using non-character arrays.
+//===----------------------------------------------------------------------===//
void *mempcpy(void *restrict s1, const void *restrict s2, size_t n);
-void mempcpy14() {
+void memcpy_int_array_fully_init() {
int src[] = {1, 2, 3, 4};
int dst[5] = {0};
int *p;
- p = mempcpy(dst, src, 4 * sizeof(int)); // expected-warning{{Bytes string function accesses uninitialized/garbage values}}
- // FIXME: This behaviour is actually surprising and needs to be fixed,
- // mempcpy seems to consider the very last byte of the src buffer uninitialized
- // and returning undef unfortunately. It should have returned unknown or a conjured value instead.
+ p = mempcpy(dst, src, 4 * sizeof(int));
+ clang_analyzer_eval(p == &dst[4]);
+}
- clang_analyzer_eval(p == &dst[4]); // no-warning (above is fatal)
+void memcpy_int_array_fully_init2(int *dest) {
+ int t[] = {1, 2, 3};
+ memcpy(dest, t, sizeof(t));
}
+//===----------------------------------------------------------------------===//
+// mempcpy() using nonarrays.
+//===----------------------------------------------------------------------===//
+
struct st {
int i;
int j;
};
-
-void mempcpy15() {
+void mempcpy_struct_partially_uninit() {
struct st s1 = {0};
struct st s2;
struct st *p1;
struct st *p2;
p1 = (&s2) + 1;
- p2 = mempcpy(&s2, &s1, sizeof(struct st)); // expected-warning{{Bytes string function accesses uninitialized/garbage values}}
- // FIXME: It seems same as mempcpy14() case.
-
- clang_analyzer_eval(p1 == p2); // no-warning (above is fatal)
+
+ // FIXME: Maybe ask UninitializedObjectChecker whether s1 is fully
+ // initialized?
+ p2 = mempcpy(&s2, &s1, sizeof(struct st));
+
+ clang_analyzer_eval(p1 == p2);
+}
+
+void mempcpy_struct_fully_uninit() {
+ struct st s1;
+ struct st s2;
+
+ // FIXME: Maybe ask UninitializedObjectChecker whether s1 is fully
+ // initialized?
+ mempcpy(&s2, &s1, sizeof(struct st));
+}
+
+// Creduced crash. In this case, an symbolicregion is wrapped in an
+// elementregion for the src argument.
+void *ga_copy_strings_from_0;
+void *memmove();
+void alloc();
+void ga_copy_strings() {
+ int i = 0;
+ for (;; ++i)
+ memmove(alloc, ((char **)ga_copy_strings_from_0)[i], 1);
+}
+
+// Creduced crash. In this case, retrieving the Loc for the first element failed.
+char mov_mdhd_language_map[][4] = {};
+int ff_mov_lang_to_iso639_code;
+char *ff_mov_lang_to_iso639_to;
+void ff_mov_lang_to_iso639() {
+ memcpy(ff_mov_lang_to_iso639_to,
+ mov_mdhd_language_map[ff_mov_lang_to_iso639_code], 4);
}
>From 7fb71d15cd3c2a185d4a60791e893d3c5721754d Mon Sep 17 00:00:00 2001
From: Ilya Leoshkevich <iii at linux.ibm.com>
Date: Thu, 4 Jul 2024 13:52:35 +0200
Subject: [PATCH 237/246] [SystemZ] Fix handling of target triples on Debian
(#95407) (#97546)
Debian's GCC installation has target triple "s390x-linux-gnu". Since,
unlike in Ubuntu, there is no /etc/lsb-release, config.guess detects
"s390x-ibm-linux". Generic_GCC::GCCInstallationDetector::init() only
tries to strip the "unknown" vendor, so GCC installation detection
fails.
Checking /etc/os-release in addition to /etc/lsb-release fixes this
problem. However, LLVM-built sanitizer runtimes still cannot be found:
the driver looks for them in lib/clang/19/lib/s390x-linux-gnu, but the
build system places them in lib/clang/19/lib/s390x-unknown-linux-gnu.
According to @MaskRay, the build system is doing the right thing [1]: on
the file system, full arch-vendor-os-environment triples must be used.
In order to satisfy both GCCInstallationDetector and this rule, use the
"s390x-unknown-linux-gnu" triple.
[1]
https://github.com/llvm/llvm-project/issues/95407#issuecomment-2167390240
---
llvm/cmake/config.guess | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/llvm/cmake/config.guess b/llvm/cmake/config.guess
index 2444ed7f5792b..96cc554f181ab 100644
--- a/llvm/cmake/config.guess
+++ b/llvm/cmake/config.guess
@@ -1028,11 +1028,7 @@ EOF
echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
s390:Linux:*:* | s390x:Linux:*:*)
- if [ "$(grep -Ei 'debian|ubuntu' /etc/lsb-release)" ]; then
- echo ${UNAME_MACHINE}-linux-gnu
- else
- echo ${UNAME_MACHINE}-ibm-linux
- fi
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
sh64*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
>From 42b193c962bd04b840b121e5fd27131038404925 Mon Sep 17 00:00:00 2001
From: Phoebe Wang <phoebe.wang at intel.com>
Date: Thu, 4 Jul 2024 20:02:48 +0800
Subject: [PATCH 238/246] [AMX] Error out when AMX DP instructions use same
registers (#97686)
Fixes #97522
---
.../lib/Target/X86/AsmParser/X86AsmParser.cpp | 8 ++++++
llvm/test/MC/X86/AMX/amx-error.s | 25 +++++++++++++++++++
2 files changed, 33 insertions(+)
create mode 100644 llvm/test/MC/X86/AMX/amx-error.s
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index dbea42d55b5fc..e49e96ceef6a4 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -3849,6 +3849,14 @@ bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) {
return Warning(Ops[0]->getStartLoc(), "mask, index, and destination "
"registers should be distinct");
}
+ } else if (isTCMMIMFP16PS(Opcode) || isTCMMRLFP16PS(Opcode) ||
+ isTDPBF16PS(Opcode) || isTDPFP16PS(Opcode) || isTDPBSSD(Opcode) ||
+ isTDPBSUD(Opcode) || isTDPBUSD(Opcode) || isTDPBUUD(Opcode)) {
+ unsigned SrcDest = Inst.getOperand(0).getReg();
+ unsigned Src1 = Inst.getOperand(2).getReg();
+ unsigned Src2 = Inst.getOperand(3).getReg();
+ if (SrcDest == Src1 || SrcDest == Src2 || Src1 == Src2)
+ return Error(Ops[0]->getStartLoc(), "all tmm registers must be distinct");
}
// Check that we aren't mixing AH/BH/CH/DH with REX prefix. We only need to
diff --git a/llvm/test/MC/X86/AMX/amx-error.s b/llvm/test/MC/X86/AMX/amx-error.s
new file mode 100644
index 0000000000000..ee2ac83545b5a
--- /dev/null
+++ b/llvm/test/MC/X86/AMX/amx-error.s
@@ -0,0 +1,25 @@
+// RUN: not llvm-mc -triple x86_64 %s 2>&1 | FileCheck %s
+
+// CHECK: error: all tmm registers must be distinct
+tcmmimfp16ps %tmm0, %tmm0, %tmm0
+
+// CHECK: error: all tmm registers must be distinct
+tcmmrlfp16ps %tmm1, %tmm0, %tmm1
+
+// CHECK: error: all tmm registers must be distinct
+tdpbf16ps %tmm2, %tmm2, %tmm0
+
+// CHECK: error: all tmm registers must be distinct
+tdpfp16ps %tmm3, %tmm0, %tmm0
+
+// CHECK: error: all tmm registers must be distinct
+tdpbssd %tmm0, %tmm0, %tmm0
+
+// CHECK: error: all tmm registers must be distinct
+tdpbsud %tmm1, %tmm0, %tmm1
+
+// CHECK: error: all tmm registers must be distinct
+tdpbusd %tmm2, %tmm2, %tmm0
+
+// CHECK: error: all tmm registers must be distinct
+tdpbuud %tmm3, %tmm0, %tmm0
>From 99f6ff9c0b5f2c009878c2ad5607bc7a3086956b Mon Sep 17 00:00:00 2001
From: Sergio Afonso <safonsof at amd.com>
Date: Thu, 4 Jul 2024 13:13:21 +0100
Subject: [PATCH 239/246] [Flang][OpenMP] Use InsertionGuard in
DataSharingProcessor (#97562)
This patch removes the introduction of `fir.undef` operations as a way
to keep track of insertion points inside of the `DataSharingProcessor`,
and it replaces them with an `InsertionGuard` to avoid creating such
operations inside of loop wrappers.
Leaving any `fir.undef` operation inside of a loop wrapper would result
in a verifier error, since they enforce strict requirements on the
contents of their code regions.
---
flang/lib/Lower/OpenMP/DataSharingProcessor.cpp | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
index e2b55fcc64062..7df3905c29990 100644
--- a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
@@ -72,11 +72,8 @@ void DataSharingProcessor::processStep2(mlir::Operation *op, bool isLoop) {
firOpBuilder.setInsertionPointAfter(op);
insertDeallocs();
} else {
- // insert dummy instruction to mark the insertion position
- mlir::Value undefMarker = firOpBuilder.create<fir::UndefOp>(
- op->getLoc(), firOpBuilder.getIndexType());
+ mlir::OpBuilder::InsertionGuard guard(firOpBuilder);
insertDeallocs();
- firOpBuilder.setInsertionPointAfter(undefMarker.getDefiningOp());
}
}
>From 6222c8f0305de1fdc5ff39f5f1d87fcfeebfa646 Mon Sep 17 00:00:00 2001
From: Nicholas Guy <67685292+NickGuy-Arm at users.noreply.github.com>
Date: Thu, 4 Jul 2024 13:32:42 +0100
Subject: [PATCH 240/246] [IR][LangRef] Add partial reduction add intrinsic
(#94499)
Adds the llvm.experimental.partial.reduce.add.* overloaded intrinsic,
this intrinsic represents add reductions that result in a narrower
vector.
---
llvm/docs/LangRef.rst | 31 +++++++
llvm/include/llvm/IR/Intrinsics.td | 6 ++
.../SelectionDAG/SelectionDAGBuilder.cpp | 32 +++++++
llvm/lib/IR/Verifier.cpp | 14 ++++
.../CodeGen/AArch64/partial-reduction-add.ll | 83 +++++++++++++++++++
5 files changed, 166 insertions(+)
create mode 100644 llvm/test/CodeGen/AArch64/partial-reduction-add.ll
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index c98332d3a24fc..b9f02d6b4b41e 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -19441,6 +19441,37 @@ will be on any later loop iteration.
This intrinsic will only return 0 if the input count is also 0. A non-zero input
count will produce a non-zero result.
+'``llvm.experimental.vector.partial.reduce.add.*``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic.
+
+::
+
+ declare <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v4i32.v8i32(<4 x i32> %a, <8 x i32> %b)
+ declare <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v4i32.v16i32(<4 x i32> %a, <16 x i32> %b)
+ declare <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv4i32.nxv8i32(<vscale x 4 x i32> %a, <vscale x 8 x i32> %b)
+ declare <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv4i32.nxv16i32(<vscale x 4 x i32> %a, <vscale x 16 x i32> %b)
+
+Overview:
+"""""""""
+
+The '``llvm.vector.experimental.partial.reduce.add.*``' intrinsics reduce the
+concatenation of the two vector operands down to the number of elements dictated
+by the result type. The result type is a vector type that matches the type of the
+first operand vector.
+
+Arguments:
+""""""""""
+
+Both arguments must be vectors of matching element types. The first argument type must
+match the result type, while the second argument type must have a vector length that is a
+positive integer multiple of the first vector/result type. The arguments must be either be
+both fixed or both scalable vectors.
+
+
'``llvm.experimental.vector.histogram.*``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index c7d383a5d0c0c..95dbd2854322d 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2640,6 +2640,12 @@ def int_vector_deinterleave2 : DefaultAttrsIntrinsic<[LLVMHalfElementsVectorType
[llvm_anyvector_ty],
[IntrNoMem]>;
+//===-------------- Intrinsics to perform partial reduction ---------------===//
+
+def int_experimental_vector_partial_reduce_add : DefaultAttrsIntrinsic<[LLVMMatchType<0>],
+ [llvm_anyvector_ty, llvm_anyvector_ty],
+ [IntrNoMem]>;
+
//===----------------- Pointer Authentication Intrinsics ------------------===//
//
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 8db2708d41a69..cc55d53597b65 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -104,6 +104,7 @@
#include "llvm/TargetParser/Triple.h"
#include "llvm/Transforms/Utils/Local.h"
#include <cstddef>
+#include <deque>
#include <iterator>
#include <limits>
#include <optional>
@@ -7976,6 +7977,37 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
setValue(&I, Trunc);
return;
}
+ case Intrinsic::experimental_vector_partial_reduce_add: {
+ SDValue OpNode = getValue(I.getOperand(1));
+ EVT ReducedTy = EVT::getEVT(I.getType());
+ EVT FullTy = OpNode.getValueType();
+
+ unsigned Stride = ReducedTy.getVectorMinNumElements();
+ unsigned ScaleFactor = FullTy.getVectorMinNumElements() / Stride;
+
+ // Collect all of the subvectors
+ std::deque<SDValue> Subvectors;
+ Subvectors.push_back(getValue(I.getOperand(0)));
+ for (unsigned i = 0; i < ScaleFactor; i++) {
+ auto SourceIndex = DAG.getVectorIdxConstant(i * Stride, sdl);
+ Subvectors.push_back(DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ReducedTy,
+ {OpNode, SourceIndex}));
+ }
+
+ // Flatten the subvector tree
+ while (Subvectors.size() > 1) {
+ Subvectors.push_back(DAG.getNode(ISD::ADD, sdl, ReducedTy,
+ {Subvectors[0], Subvectors[1]}));
+ Subvectors.pop_front();
+ Subvectors.pop_front();
+ }
+
+ assert(Subvectors.size() == 1 &&
+ "There should only be one subvector after tree flattening");
+
+ setValue(&I, Subvectors[0]);
+ return;
+ }
case Intrinsic::experimental_cttz_elts: {
auto DL = getCurSDLoc();
SDValue Op = getValue(I.getOperand(0));
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index c98f61d555140..44982f55e17de 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -6143,6 +6143,20 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
}
break;
}
+ case Intrinsic::experimental_vector_partial_reduce_add: {
+ VectorType *AccTy = cast<VectorType>(Call.getArgOperand(0)->getType());
+ VectorType *VecTy = cast<VectorType>(Call.getArgOperand(1)->getType());
+
+ unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
+ unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
+
+ Check((VecWidth % AccWidth) == 0,
+ "Invalid vector widths for partial "
+ "reduction. The width of the input vector "
+ "must be a positive integer multiple of "
+ "the width of the accumulator vector.");
+ break;
+ }
case Intrinsic::experimental_noalias_scope_decl: {
NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
break;
diff --git a/llvm/test/CodeGen/AArch64/partial-reduction-add.ll b/llvm/test/CodeGen/AArch64/partial-reduction-add.ll
new file mode 100644
index 0000000000000..ae681ee54e687
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/partial-reduction-add.ll
@@ -0,0 +1,83 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -force-vector-interleave=1 -o - %s | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-none-unknown-elf"
+
+define <4 x i32> @partial_reduce_add_fixed(<4 x i32> %accumulator, <4 x i32> %0) #0 {
+; CHECK-LABEL: partial_reduce_add_fixed:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: ret
+entry:
+ %partial.reduce = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v4i32.v4i32(<4 x i32> %accumulator, <4 x i32> %0)
+ ret <4 x i32> %partial.reduce
+}
+
+define <4 x i32> @partial_reduce_add_fixed_half(<4 x i32> %accumulator, <8 x i32> %0) #0 {
+; CHECK-LABEL: partial_reduce_add_fixed_half:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: add v0.4s, v2.4s, v0.4s
+; CHECK-NEXT: ret
+entry:
+ %partial.reduce = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v4i32.v8i32(<4 x i32> %accumulator, <8 x i32> %0)
+ ret <4 x i32> %partial.reduce
+}
+
+define <vscale x 4 x i32> @partial_reduce_add(<vscale x 4 x i32> %accumulator, <vscale x 4 x i32> %0) #0 {
+; CHECK-LABEL: partial_reduce_add:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+entry:
+ %partial.reduce = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv4i32.nxv4i32(<vscale x 4 x i32> %accumulator, <vscale x 4 x i32> %0)
+ ret <vscale x 4 x i32> %partial.reduce
+}
+
+define <vscale x 4 x i32> @partial_reduce_add_half(<vscale x 4 x i32> %accumulator, <vscale x 8 x i32> %0) #0 {
+; CHECK-LABEL: partial_reduce_add_half:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add z0.s, z0.s, z1.s
+; CHECK-NEXT: add z0.s, z2.s, z0.s
+; CHECK-NEXT: ret
+entry:
+ %partial.reduce = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv4i32.nxv8i32(<vscale x 4 x i32> %accumulator, <vscale x 8 x i32> %0)
+ ret <vscale x 4 x i32> %partial.reduce
+}
+
+define <vscale x 4 x i32> @partial_reduce_add_quart(<vscale x 4 x i32> %accumulator, <vscale x 16 x i32> %0) #0 {
+; CHECK-LABEL: partial_reduce_add_quart:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add z0.s, z0.s, z1.s
+; CHECK-NEXT: add z2.s, z2.s, z3.s
+; CHECK-NEXT: add z0.s, z4.s, z0.s
+; CHECK-NEXT: add z0.s, z2.s, z0.s
+; CHECK-NEXT: ret
+entry:
+ %partial.reduce = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv4i32.nxv16i32(<vscale x 4 x i32> %accumulator, <vscale x 16 x i32> %0)
+ ret <vscale x 4 x i32> %partial.reduce
+}
+
+define <vscale x 8 x i32> @partial_reduce_add_half_8(<vscale x 8 x i32> %accumulator, <vscale x 16 x i32> %0) #0 {
+; CHECK-LABEL: partial_reduce_add_half_8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add z0.s, z0.s, z2.s
+; CHECK-NEXT: add z1.s, z1.s, z3.s
+; CHECK-NEXT: add z0.s, z4.s, z0.s
+; CHECK-NEXT: add z1.s, z5.s, z1.s
+; CHECK-NEXT: ret
+entry:
+ %partial.reduce = call <vscale x 8 x i32> @llvm.experimental.vector.partial.reduce.add.nxv8i32.nxv8i32.nxv16i32(<vscale x 8 x i32> %accumulator, <vscale x 16 x i32> %0)
+ ret <vscale x 8 x i32> %partial.reduce
+}
+
+declare <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv4i32.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv4i32.nxv8i32(<vscale x 4 x i32>, <vscale x 8 x i32>)
+declare <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv4i32.nxv16i32(<vscale x 4 x i32>, <vscale x 16 x i32>)
+declare <vscale x 8 x i32> @llvm.experimental.vector.partial.reduce.add.nxv8i32.nxv8i32.nxv16i32(<vscale x 8 x i32>, <vscale x 16 x i32>)
+
+declare i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32>)
+declare i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32>)
+
+attributes #0 = { "target-features"="+sve2" }
>From f58930f705884dfac3bd8c481c827d027a6068cb Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Thu, 4 Jul 2024 14:41:12 +0200
Subject: [PATCH 241/246] [Mem2Reg] Don't use single store optimization for
potentially poison value (#97711)
If there is a single store, then loads must either load the stored value
or uninitialized memory (undef). If the stored value may be poison, then
replacing an uninitialized memory load with it would be incorrect. Fall
back to the generic code in that case.
This PR only fixes the case where there is a literal poison store -- the
case where the value is non-trivially poison will still get miscompiled
by phi simplification later, see #96631.
Fixes https://github.com/llvm/llvm-project/issues/97702.
---
llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp | 10 ++++++++--
llvm/test/Transforms/Mem2Reg/single-store.ll | 3 +--
2 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index 6e021a5e2d05a..cd5ab55c2122f 100644
--- a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -525,7 +525,14 @@ rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI,
SmallSet<DbgAssignIntrinsic *, 8> *DbgAssignsToDelete,
SmallSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) {
StoreInst *OnlyStore = Info.OnlyStore;
- bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
+ Value *ReplVal = OnlyStore->getOperand(0);
+ // Loads may either load the stored value or uninitialized memory (undef).
+ // If the stored value may be poison, then replacing an uninitialized memory
+ // load with it would be incorrect.
+ if (!isGuaranteedNotToBePoison(ReplVal))
+ return false;
+
+ bool StoringGlobalVal = !isa<Instruction>(ReplVal);
BasicBlock *StoreBB = OnlyStore->getParent();
int StoreIndex = -1;
@@ -565,7 +572,6 @@ rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI,
}
// Otherwise, we *can* safely rewrite this load.
- Value *ReplVal = OnlyStore->getOperand(0);
// If the replacement value is the load, this must occur in unreachable
// code.
if (ReplVal == LI)
diff --git a/llvm/test/Transforms/Mem2Reg/single-store.ll b/llvm/test/Transforms/Mem2Reg/single-store.ll
index b82e26158a361..f864227c49145 100644
--- a/llvm/test/Transforms/Mem2Reg/single-store.ll
+++ b/llvm/test/Transforms/Mem2Reg/single-store.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S -passes=mem2reg < %s | FileCheck %s
-; FIXME: This is a miscompile.
define i8 @single_store_literal_poison(i1 %cond) {
; CHECK-LABEL: define i8 @single_store_literal_poison(
; CHECK-SAME: i1 [[COND:%.*]]) {
@@ -9,7 +8,7 @@ define i8 @single_store_literal_poison(i1 %cond) {
; CHECK: [[IF]]:
; CHECK-NEXT: br label %[[EXIT]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: ret i8 poison
+; CHECK-NEXT: ret i8 undef
;
%a = alloca i8, align 1
br i1 %cond, label %if, label %exit
>From bbc6504b3d2f237ed7e84dcaecb228bf2124f72e Mon Sep 17 00:00:00 2001
From: Ariel-Burton <arielburton at yahoo.com>
Date: Thu, 4 Jul 2024 08:43:45 -0400
Subject: [PATCH 242/246] [NFC] [APFloat] Refactor IEEEFloat::toString (#97117)
This PR lifts the body of IEEEFloat::toString out to a standalone
function. We do this to facilitate code sharing with other floating
point types, e.g., the forthcoming support for HexFloat.
There is no change in functionality.
---
llvm/lib/Support/APFloat.cpp | 375 ++++++++++++++++++-----------------
1 file changed, 195 insertions(+), 180 deletions(-)
diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp
index 3017a9b976658..3664de71d06df 100644
--- a/llvm/lib/Support/APFloat.cpp
+++ b/llvm/lib/Support/APFloat.cpp
@@ -4118,6 +4118,199 @@ namespace {
exp += FirstSignificant;
buffer.erase(&buffer[0], &buffer[FirstSignificant]);
}
+
+ void toStringImpl(SmallVectorImpl<char> &Str, const bool isNeg, int exp,
+ APInt significand, unsigned FormatPrecision,
+ unsigned FormatMaxPadding, bool TruncateZero) {
+ const int semanticsPrecision = significand.getBitWidth();
+
+ if (isNeg)
+ Str.push_back('-');
+
+ // Set FormatPrecision if zero. We want to do this before we
+ // truncate trailing zeros, as those are part of the precision.
+ if (!FormatPrecision) {
+ // We use enough digits so the number can be round-tripped back to an
+ // APFloat. The formula comes from "How to Print Floating-Point Numbers
+ // Accurately" by Steele and White.
+ // FIXME: Using a formula based purely on the precision is conservative;
+ // we can print fewer digits depending on the actual value being printed.
+
+ // FormatPrecision = 2 + floor(significandBits / lg_2(10))
+ FormatPrecision = 2 + semanticsPrecision * 59 / 196;
+ }
+
+ // Ignore trailing binary zeros.
+ int trailingZeros = significand.countr_zero();
+ exp += trailingZeros;
+ significand.lshrInPlace(trailingZeros);
+
+ // Change the exponent from 2^e to 10^e.
+ if (exp == 0) {
+ // Nothing to do.
+ } else if (exp > 0) {
+ // Just shift left.
+ significand = significand.zext(semanticsPrecision + exp);
+ significand <<= exp;
+ exp = 0;
+ } else { /* exp < 0 */
+ int texp = -exp;
+
+ // We transform this using the identity:
+ // (N)(2^-e) == (N)(5^e)(10^-e)
+ // This means we have to multiply N (the significand) by 5^e.
+ // To avoid overflow, we have to operate on numbers large
+ // enough to store N * 5^e:
+ // log2(N * 5^e) == log2(N) + e * log2(5)
+ // <= semantics->precision + e * 137 / 59
+ // (log_2(5) ~ 2.321928 < 2.322034 ~ 137/59)
+
+ unsigned precision = semanticsPrecision + (137 * texp + 136) / 59;
+
+ // Multiply significand by 5^e.
+ // N * 5^0101 == N * 5^(1*1) * 5^(0*2) * 5^(1*4) * 5^(0*8)
+ significand = significand.zext(precision);
+ APInt five_to_the_i(precision, 5);
+ while (true) {
+ if (texp & 1)
+ significand *= five_to_the_i;
+
+ texp >>= 1;
+ if (!texp)
+ break;
+ five_to_the_i *= five_to_the_i;
+ }
+ }
+
+ AdjustToPrecision(significand, exp, FormatPrecision);
+
+ SmallVector<char, 256> buffer;
+
+ // Fill the buffer.
+ unsigned precision = significand.getBitWidth();
+ if (precision < 4) {
+ // We need enough precision to store the value 10.
+ precision = 4;
+ significand = significand.zext(precision);
+ }
+ APInt ten(precision, 10);
+ APInt digit(precision, 0);
+
+ bool inTrail = true;
+ while (significand != 0) {
+ // digit <- significand % 10
+ // significand <- significand / 10
+ APInt::udivrem(significand, ten, significand, digit);
+
+ unsigned d = digit.getZExtValue();
+
+ // Drop trailing zeros.
+ if (inTrail && !d)
+ exp++;
+ else {
+ buffer.push_back((char) ('0' + d));
+ inTrail = false;
+ }
+ }
+
+ assert(!buffer.empty() && "no characters in buffer!");
+
+ // Drop down to FormatPrecision.
+ // TODO: don't do more precise calculations above than are required.
+ AdjustToPrecision(buffer, exp, FormatPrecision);
+
+ unsigned NDigits = buffer.size();
+
+ // Check whether we should use scientific notation.
+ bool FormatScientific;
+ if (!FormatMaxPadding)
+ FormatScientific = true;
+ else {
+ if (exp >= 0) {
+ // 765e3 --> 765000
+ // ^^^
+ // But we shouldn't make the number look more precise than it is.
+ FormatScientific = ((unsigned) exp > FormatMaxPadding ||
+ NDigits + (unsigned) exp > FormatPrecision);
+ } else {
+ // Power of the most significant digit.
+ int MSD = exp + (int) (NDigits - 1);
+ if (MSD >= 0) {
+ // 765e-2 == 7.65
+ FormatScientific = false;
+ } else {
+ // 765e-5 == 0.00765
+ // ^ ^^
+ FormatScientific = ((unsigned) -MSD) > FormatMaxPadding;
+ }
+ }
+ }
+
+ // Scientific formatting is pretty straightforward.
+ if (FormatScientific) {
+ exp += (NDigits - 1);
+
+ Str.push_back(buffer[NDigits-1]);
+ Str.push_back('.');
+ if (NDigits == 1 && TruncateZero)
+ Str.push_back('0');
+ else
+ for (unsigned I = 1; I != NDigits; ++I)
+ Str.push_back(buffer[NDigits-1-I]);
+ // Fill with zeros up to FormatPrecision.
+ if (!TruncateZero && FormatPrecision > NDigits - 1)
+ Str.append(FormatPrecision - NDigits + 1, '0');
+ // For !TruncateZero we use lower 'e'.
+ Str.push_back(TruncateZero ? 'E' : 'e');
+
+ Str.push_back(exp >= 0 ? '+' : '-');
+ if (exp < 0)
+ exp = -exp;
+ SmallVector<char, 6> expbuf;
+ do {
+ expbuf.push_back((char) ('0' + (exp % 10)));
+ exp /= 10;
+ } while (exp);
+ // Exponent always at least two digits if we do not truncate zeros.
+ if (!TruncateZero && expbuf.size() < 2)
+ expbuf.push_back('0');
+ for (unsigned I = 0, E = expbuf.size(); I != E; ++I)
+ Str.push_back(expbuf[E-1-I]);
+ return;
+ }
+
+ // Non-scientific, positive exponents.
+ if (exp >= 0) {
+ for (unsigned I = 0; I != NDigits; ++I)
+ Str.push_back(buffer[NDigits-1-I]);
+ for (unsigned I = 0; I != (unsigned) exp; ++I)
+ Str.push_back('0');
+ return;
+ }
+
+ // Non-scientific, negative exponents.
+
+ // The number of digits to the left of the decimal point.
+ int NWholeDigits = exp + (int) NDigits;
+
+ unsigned I = 0;
+ if (NWholeDigits > 0) {
+ for (; I != (unsigned) NWholeDigits; ++I)
+ Str.push_back(buffer[NDigits-I-1]);
+ Str.push_back('.');
+ } else {
+ unsigned NZeros = 1 + (unsigned) -NWholeDigits;
+
+ Str.push_back('0');
+ Str.push_back('.');
+ for (unsigned Z = 1; Z != NZeros; ++Z)
+ Str.push_back('0');
+ }
+
+ for (; I != NDigits; ++I)
+ Str.push_back(buffer[NDigits-I-1]);
+
+ }
} // namespace
void IEEEFloat::toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision,
@@ -4152,193 +4345,15 @@ void IEEEFloat::toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision,
break;
}
- if (isNegative())
- Str.push_back('-');
-
// Decompose the number into an APInt and an exponent.
int exp = exponent - ((int) semantics->precision - 1);
APInt significand(
semantics->precision,
ArrayRef(significandParts(), partCountForBits(semantics->precision)));
- // Set FormatPrecision if zero. We want to do this before we
- // truncate trailing zeros, as those are part of the precision.
- if (!FormatPrecision) {
- // We use enough digits so the number can be round-tripped back to an
- // APFloat. The formula comes from "How to Print Floating-Point Numbers
- // Accurately" by Steele and White.
- // FIXME: Using a formula based purely on the precision is conservative;
- // we can print fewer digits depending on the actual value being printed.
-
- // FormatPrecision = 2 + floor(significandBits / lg_2(10))
- FormatPrecision = 2 + semantics->precision * 59 / 196;
- }
-
- // Ignore trailing binary zeros.
- int trailingZeros = significand.countr_zero();
- exp += trailingZeros;
- significand.lshrInPlace(trailingZeros);
-
- // Change the exponent from 2^e to 10^e.
- if (exp == 0) {
- // Nothing to do.
- } else if (exp > 0) {
- // Just shift left.
- significand = significand.zext(semantics->precision + exp);
- significand <<= exp;
- exp = 0;
- } else { /* exp < 0 */
- int texp = -exp;
-
- // We transform this using the identity:
- // (N)(2^-e) == (N)(5^e)(10^-e)
- // This means we have to multiply N (the significand) by 5^e.
- // To avoid overflow, we have to operate on numbers large
- // enough to store N * 5^e:
- // log2(N * 5^e) == log2(N) + e * log2(5)
- // <= semantics->precision + e * 137 / 59
- // (log_2(5) ~ 2.321928 < 2.322034 ~ 137/59)
-
- unsigned precision = semantics->precision + (137 * texp + 136) / 59;
-
- // Multiply significand by 5^e.
- // N * 5^0101 == N * 5^(1*1) * 5^(0*2) * 5^(1*4) * 5^(0*8)
- significand = significand.zext(precision);
- APInt five_to_the_i(precision, 5);
- while (true) {
- if (texp & 1) significand *= five_to_the_i;
-
- texp >>= 1;
- if (!texp) break;
- five_to_the_i *= five_to_the_i;
- }
- }
-
- AdjustToPrecision(significand, exp, FormatPrecision);
-
- SmallVector<char, 256> buffer;
-
- // Fill the buffer.
- unsigned precision = significand.getBitWidth();
- if (precision < 4) {
- // We need enough precision to store the value 10.
- precision = 4;
- significand = significand.zext(precision);
- }
- APInt ten(precision, 10);
- APInt digit(precision, 0);
-
- bool inTrail = true;
- while (significand != 0) {
- // digit <- significand % 10
- // significand <- significand / 10
- APInt::udivrem(significand, ten, significand, digit);
-
- unsigned d = digit.getZExtValue();
-
- // Drop trailing zeros.
- if (inTrail && !d) exp++;
- else {
- buffer.push_back((char) ('0' + d));
- inTrail = false;
- }
- }
-
- assert(!buffer.empty() && "no characters in buffer!");
-
- // Drop down to FormatPrecision.
- // TODO: don't do more precise calculations above than are required.
- AdjustToPrecision(buffer, exp, FormatPrecision);
-
- unsigned NDigits = buffer.size();
-
- // Check whether we should use scientific notation.
- bool FormatScientific;
- if (!FormatMaxPadding)
- FormatScientific = true;
- else {
- if (exp >= 0) {
- // 765e3 --> 765000
- // ^^^
- // But we shouldn't make the number look more precise than it is.
- FormatScientific = ((unsigned) exp > FormatMaxPadding ||
- NDigits + (unsigned) exp > FormatPrecision);
- } else {
- // Power of the most significant digit.
- int MSD = exp + (int) (NDigits - 1);
- if (MSD >= 0) {
- // 765e-2 == 7.65
- FormatScientific = false;
- } else {
- // 765e-5 == 0.00765
- // ^ ^^
- FormatScientific = ((unsigned) -MSD) > FormatMaxPadding;
- }
- }
- }
-
- // Scientific formatting is pretty straightforward.
- if (FormatScientific) {
- exp += (NDigits - 1);
-
- Str.push_back(buffer[NDigits-1]);
- Str.push_back('.');
- if (NDigits == 1 && TruncateZero)
- Str.push_back('0');
- else
- for (unsigned I = 1; I != NDigits; ++I)
- Str.push_back(buffer[NDigits-1-I]);
- // Fill with zeros up to FormatPrecision.
- if (!TruncateZero && FormatPrecision > NDigits - 1)
- Str.append(FormatPrecision - NDigits + 1, '0');
- // For !TruncateZero we use lower 'e'.
- Str.push_back(TruncateZero ? 'E' : 'e');
-
- Str.push_back(exp >= 0 ? '+' : '-');
- if (exp < 0) exp = -exp;
- SmallVector<char, 6> expbuf;
- do {
- expbuf.push_back((char) ('0' + (exp % 10)));
- exp /= 10;
- } while (exp);
- // Exponent always at least two digits if we do not truncate zeros.
- if (!TruncateZero && expbuf.size() < 2)
- expbuf.push_back('0');
- for (unsigned I = 0, E = expbuf.size(); I != E; ++I)
- Str.push_back(expbuf[E-1-I]);
- return;
- }
-
- // Non-scientific, positive exponents.
- if (exp >= 0) {
- for (unsigned I = 0; I != NDigits; ++I)
- Str.push_back(buffer[NDigits-1-I]);
- for (unsigned I = 0; I != (unsigned) exp; ++I)
- Str.push_back('0');
- return;
- }
-
- // Non-scientific, negative exponents.
-
- // The number of digits to the left of the decimal point.
- int NWholeDigits = exp + (int) NDigits;
-
- unsigned I = 0;
- if (NWholeDigits > 0) {
- for (; I != (unsigned) NWholeDigits; ++I)
- Str.push_back(buffer[NDigits-I-1]);
- Str.push_back('.');
- } else {
- unsigned NZeros = 1 + (unsigned) -NWholeDigits;
-
- Str.push_back('0');
- Str.push_back('.');
- for (unsigned Z = 1; Z != NZeros; ++Z)
- Str.push_back('0');
- }
+ toStringImpl(Str, isNegative(), exp, significand, FormatPrecision,
+ FormatMaxPadding, TruncateZero);
- for (; I != NDigits; ++I)
- Str.push_back(buffer[NDigits-I-1]);
}
bool IEEEFloat::getExactInverse(APFloat *inv) const {
>From 3cab132e94d3c63dbcf20d2acc4879b2b98a0de9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Endre=20F=C3=BCl=C3=B6p?= <endre.fulop at sigmatechnology.com>
Date: Thu, 4 Jul 2024 15:02:28 +0200
Subject: [PATCH 243/246] [clang][analyzer][doc] Migrate checkers-related docs
from HTML to RST (#97032)
Documentation for the checkers is kept up to date in RST files.
This patch removes duplication by replacing the HTML docs with links to
docs generated from the RST.
---
clang/www/analyzer/alpha_checks.html | 939 +-----------
clang/www/analyzer/available_checks.html | 1743 +---------------------
2 files changed, 14 insertions(+), 2668 deletions(-)
diff --git a/clang/www/analyzer/alpha_checks.html b/clang/www/analyzer/alpha_checks.html
index 501a9bcbc82a9..1ee44c7d355ba 100644
--- a/clang/www/analyzer/alpha_checks.html
+++ b/clang/www/analyzer/alpha_checks.html
@@ -2,7 +2,9 @@
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
- <title>Alpha Checks</title>
+ <title>Alpha Checkers documentation has moved to clang.llvm.org</title>
+ <link rel="canonical" href="https://clang.llvm.org/docs/analyzer/checkers.html#experimental-checkers"/>
+ <meta http-equiv="refresh" content="0;url=https://clang.llvm.org/docs/analyzer/checkers.html#experimental-checkers" />
<link type="text/css" rel="stylesheet" href="menu.css">
<link type="text/css" rel="stylesheet" href="content.css">
<script type="text/javascript" src="scripts/menu.js"></script>
@@ -17,938 +19,11 @@
<!--#include virtual="menu.html.incl"-->
<div id="content">
-<h1>Alpha Checkers</h1>
-Experimental checkers in addition to the <a href = "available_checks.html">
-Default Checkers</a>. These are checkers with known issues or limitations that
-keep them from being on by default. They are likely to have false positives.
-Bug reports are welcome but will likely not be investigated for some time.
-Patches welcome!
-<ul>
-<li><a href="#clone_alpha_checkers">Clone Alpha Checkers</a></li>
-<li><a href="#core_alpha_checkers">Core Alpha Checkers</a></li>
-<li><a href="#cplusplus_alpha_checkers">C++ Alpha Checkers</a></li>
-<li><a href="#llvm_alpha_checkers">LLVM Checkers</a></li>
-<li><a href="#valist_alpha_checkers">Variable Argument Alpha Checkers</a></li>
-<li><a href="#deadcode_alpha_checkers">Dead Code Alpha Checkers</a></li>
-<li><a href="#osx_alpha_checkers">OS X Alpha Checkers</a></li>
-<li><a href="#security_alpha_checkers">Security Alpha Checkers</a></li>
-<li><a href="#unix_alpha_checkers">Unix Alpha Checkers</a></li>
-<li><a href="#nondeterminism_alpha_checkers">Non-determinism Alpha Checkers</a></li>
-</ul>
-<!-- ============================= clone alpha ============================= -->
-
-<h3 id="clone_alpha_checkers">Clone Alpha Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="alpha.clone.CloneChecker"><div class="namedescr expandable"><span class="name">
-alpha.clone.CloneChecker</span><span class="lang">
-(C, C++, ObjC)</span><div class="descr">
-Reports similar pieces of code.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void log();
-
-int max(int a, int b) { // warn
- log();
- if (a > b)
- return a;
- return b;
-}
-
-int maxClone(int x, int y) { // similar code here
- log();
- if (x > y)
- return x;
- return y;
-}
-</pre></div></div></td></tr>
-</tbody></table>
-
-<!-- ============================= core alpha ============================= -->
-<h3 id="core_alpha_checkers">Core Alpha Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="alpha.core.BoolAssignment"><div class="namedescr expandable"><span class="name">
-alpha.core.BoolAssignment</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warn about assigning non-{0,1} values to boolean variables.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- BOOL b = -1; // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.core.CastSize"><div class="namedescr expandable"><span class="name">
-alpha.core.CastSize</span><span class="lang">
-(C)</span><div class="descr">
-Check when casting a malloc'ed type T, whether the size is a multiple of the
-size of T (Works only with <span class="name">unix.Malloc</span>
-or <span class="name">alpha.unix.MallocWithAnnotations</span>
-checks enabled).</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int *x = (int *)malloc(11); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.core.CastToStruct"><div class="namedescr expandable"><span class="name">
-alpha.core.CastToStruct</span><span class="lang">
-(C, C++)</span><div class="descr">
-Check for cast from non-struct pointer to struct pointer.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-// C
-struct s {};
-
-void test(int *p) {
- struct s *ps = (struct s *) p; // warn
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-// C++
-class c {};
-
-void test(int *p) {
- c *pc = (c *) p; // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.core.Conversion"><div class="namedescr expandable"><span class="name">
-alpha.core.Conversion</span><span class="lang">
-(C, C++, ObjC)</span><div class="descr">
-Loss of sign or precision in implicit conversions</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test(unsigned U, signed S) {
- if (S > 10) {
- if (U < S) {
- }
- }
- if (S < -10) {
- if (U < S) { // warn (loss of sign)
- }
- }
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-void test() {
- long long A = 1LL << 60;
- short X = A; // warn (loss of precision)
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.core.DynamicTypeChecker"><div class="namedescr expandable"><span class="name">
-alpha.core.DynamicTypeChecker</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check for cases where the dynamic and the static type of an
-object are unrelated.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-id date = [NSDate date];
-
-// Warning: Object has a dynamic type 'NSDate *' which is
-// incompatible with static type 'NSNumber *'"
-NSNumber *number = date;
-[number doubleValue];
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.core.FixedAddr"><div class="namedescr expandable"><span class="name">
-alpha.core.FixedAddr</span><span class="lang">
-(C)</span><div class="descr">
-Check for assignment of a fixed address to a pointer.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int *p;
- p = (int *) 0x10000; // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.core.IdenticalExpr"><div class="namedescr expandable"><span class="name">
-alpha.core.IdenticalExpr</span><span class="lang">
-(C, C++)</span><div class="descr">
-Warn about suspicious uses of identical expressions.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-// C
-void test() {
- int a = 5;
- int b = a | 4 | a; // warn: identical expr on both sides
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-// C++
-bool f(void);
-
-void test(bool b) {
- int i = 10;
- if (f()) { // warn: true and false branches are identical
- do {
- i--;
- } while (f());
- } else {
- do {
- i--;
- } while (f());
- }
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.core.PointerArithm"><div class="namedescr expandable"><span class="name">
-alpha.core.PointerArithm</span><span class="lang">
-(C)</span><div class="descr">
-Check for pointer arithmetic on locations other than array
-elements.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int x;
- int *p;
- p = &x + 1; // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.core.PointerSub"><div class="namedescr expandable"><span class="name">
-alpha.core.PointerSub</span><span class="lang">
-(C)</span><div class="descr">
-Check for pointer subtractions on two pointers pointing to different memory
-chunks.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int x, y;
- int d = &y - &x; // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.core.StackAddressAsyncEscape"><div class="namedescr expandable"><span class="name">
-alpha.core.StackAddressAsyncEscape</span><span class="lang">
-(C)</span><div class="descr">
-Check that addresses to stack memory do not escape the function that involves
-<code>dispatch_after</code> or <code>dispatch_async</code>. This checker is
-a part of core.StackAddressEscape, but is
-<a href=https://reviews.llvm.org/D41042>temporarily disabled</a> until some
-false positives are fixed.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-dispatch_block_t test_block_inside_block_async_leak() {
- int x = 123;
- void (^inner)(void) = ^void(void) {
- int y = x;
- ++y;
- };
- void (^outer)(void) = ^void(void) {
- int z = x;
- ++z;
- inner();
- };
- return outer; // warn: address of stack-allocated block is captured by a
- // returned block
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.core.TestAfterDivZero"><div class="namedescr expandable"><span class="name">
-alpha.core.TestAfterDivZero</span><span class="lang">
-(C, C++, ObjC)</span><div class="descr">
-Check for division by variable that is later compared against 0.
-Either the comparison is useless or there is division by zero.
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test(int x) {
- var = 77 / x;
- if (x == 0) { } // warn
-}
-</pre></div></div></td></tr>
-
-
-</tbody></table>
-
-<!-- =========================== cplusplus alpha =========================== -->
-<h3 id="cplusplus_alpha_checkers">C++ Alpha Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-<tbody>
-
-
-<tr><td><a id="alpha.cplusplus.DeleteWithNonVirtualDtor"><div class="namedescr expandable"><span class="name">
-alpha.cplusplus.DeleteWithNonVirtualDtor</span><span class="lang">
-(C++)</span><div class="descr">
-Reports destructions of polymorphic objects with a non-virtual destructor in
-their base class
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-NonVirtual *create() {
- NonVirtual *x = new NVDerived(); // note: Casting from 'NVDerived' to
- // 'NonVirtual' here
- return x;
-}
-
-void sink(NonVirtual *x) {
- delete x; // warn: destruction of a polymorphic object with no virtual
- // destructor
-}
-</pre></div></div></td></tr>
-
-<tr><td><a id="alpha.cplusplus.InvalidatedIterator"><div class="namedescr expandable"><span class="name">
-alpha.cplusplus.InvalidatedIterator</span><span class="lang">
-(C++)</span><div class="descr">
-Check for use of invalidated iterators.
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void bad_copy_assign_operator_list1(std::list<int> &L1,
- const std::list<int> &L2) {
- auto i0 = L1.cbegin();
- L1 = L2;
- *i0; // warn: invalidated iterator accessed
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.cplusplus.IteratorRange"><div class="namedescr expandable"><span class="name">
-alpha.cplusplus.IteratorRange</span><span class="lang">
-(C++)</span><div class="descr">
-Check for iterators used outside their valid ranges.
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void simple_bad_end(const std::vector<int> &v) {
- auto i = v.end();
- *i; // warn: iterator accessed outside of its range
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.cplusplus.MismatchedIterator"><div class="namedescr expandable"><span class="name">
-alpha.cplusplus.MismatchedIterator</span><span class="lang">
-(C++)</span><div class="descr">
-Check for use of iterators of different containers where iterators of the same
-container are expected.
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void bad_insert3(std::vector<int> &v1, std::vector<int> &v2) {
- v2.insert(v1.cbegin(), v2.cbegin(), v2.cend()); // warn: container accessed
- // using foreign
- // iterator argument
- v1.insert(v1.cbegin(), v1.cbegin(), v2.cend()); // warn: iterators of
- // different containers
- // used where the same
- // container is
- // expected
- v1.insert(v1.cbegin(), v2.cbegin(), v1.cend()); // warn: iterators of
- // different containers
- // used where the same
- // container is
- // expected
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.cplusplus.Move"><div class="namedescr expandable"><span class="name">
-alpha.cplusplus.Move</span><span class="lang">
-(C++)</span><div class="descr">
-Method calls on a moved-from object and copying a moved-from object will be
-reported.
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-struct A {
- void foo() {}
-};
-
-void f() {
- A a;
- A b = std::move(a); // note: 'a' became 'moved-from' here
- a.foo(); // warn: method call on a 'moved-from' object 'a'
-}
-</pre></div></div></td></tr>
-
-
-</tbody></table>
-
-
-<!-- =========================== dead code alpha =========================== -->
-<h3 id="deadcode_alpha_checkers">Dead Code Alpha Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="alpha.deadcode.UnreachableCode"><div class="namedescr expandable"><span class="name">
-alpha.deadcode.UnreachableCode</span><span class="lang">
-(C, C++, ObjC)</span><div class="descr">
-Check unreachable code.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-// C
-int test() {
- int x = 1;
- while(x);
- return x; // warn
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-// C++
-void test() {
- int a = 2;
-
- while (a > 1)
- a--;
-
- if (a > 1)
- a++; // warn
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-// Objective-C
-void test(id x) {
- return;
- [x retain]; // warn
-}
-</pre></div></div></td></tr>
-</tbody></table>
-
-<!-- =========================== llvm alpha =========================== -->
-<h3 id="llvm_alpha_checkers">LLVM Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="alpha.llvm.Conventions"><div class="namedescr expandable"><span class="name">
-alpha.llvm.Conventions</span><span class="lang">
-(C)</span><div class="descr">
-Check code for LLVM codebase conventions:
-<ul>
- <li>A <code>StringRef</code> should not be bound to a temporary std::string
- whose lifetime is shorter than the <code>StringRef</code>'s.</li>
- <li>Clang AST nodes should not have fields that can allocate memory.</li>
-</ul>
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-<!-- TODO: Add examples, as currently it's hard to get this checker working. -->
-</pre></div></div></td></tr>
-
-</tbody></table>
-
-
-<!-- ============================== OS X alpha ============================== -->
-<h3 id="osx_alpha_checkers">OS X Alpha Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="alpha.osx.cocoa.DirectIvarAssignment"><div class="namedescr expandable"><span class="name">
-alpha.osx.cocoa.DirectIvarAssignment</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check that Objective C properties follow the following rule: the property
-should be set with the setter, not though a direct assignment.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
- at interface MyClass : NSObject {}
- at property (readonly) id A;
-- (void) foo;
- at end
-
- at implementation MyClass
-- (void) foo {
- _A = 0; // warn
-}
- at end
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.osx.cocoa.DirectIvarAssignmentForAnnotatedFunctions"><div class="namedescr expandable"><span class="name">
-alpha.osx.cocoa.DirectIvarAssignmentForAnnotatedFunctions</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check for direct assignments to instance variables in the methods annotated
-with <code>objc_no_direct_instance_variable_assignment</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
- at interface MyClass : NSObject {}
- at property (readonly) id A;
-- (void) fAnnotated __attribute__((
- annotate("objc_no_direct_instance_variable_assignment")));
-- (void) fNotAnnotated;
- at end
-
- at implementation MyClass
-- (void) fAnnotated {
- _A = 0; // warn
-}
-- (void) fNotAnnotated {
- _A = 0; // no warn
-}
- at end
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.osx.cocoa.InstanceVariableInvalidation"><div class="namedescr expandable"><span class="name">
-alpha.osx.cocoa.InstanceVariableInvalidation</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check that the invalidatable instance variables are invalidated in the methods
-annotated with <code>objc_instance_variable_invalidator</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
- at protocol Invalidation <NSObject>
-- (void) invalidate
- __attribute__((annotate("objc_instance_variable_invalidator")));
- at end
-
- at interface InvalidationImpObj : NSObject <Invalidation>
- at end
-
- at interface SubclassInvalidationImpObj : InvalidationImpObj {
- InvalidationImpObj *var;
-}
-- (void)invalidate;
- at end
-
- at implementation SubclassInvalidationImpObj
-- (void) invalidate {}
- at end
-// warn: var needs to be invalidated or set to nil
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.osx.cocoa.MissingInvalidationMethod"><div class="namedescr expandable"><span class="name">
-alpha.osx.cocoa.MissingInvalidationMethod</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check that the invalidation methods are present in classes that contain
-invalidatable instance variables.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
- at protocol Invalidation <NSObject>
-- (void)invalidate
- __attribute__((annotate("objc_instance_variable_invalidator")));
- at end
-
- at interface NeedInvalidation : NSObject <Invalidation>
- at end
-
- at interface MissingInvalidationMethodDecl : NSObject {
- NeedInvalidation *Var; // warn
-}
- at end
-
- at implementation MissingInvalidationMethodDecl
- at end
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.osx.cocoa.localizability.PluralMisuseChecker"><div class="namedescr expandable"><span class="name">
-alpha.osx.cocoa.localizability.PluralMisuseChecker</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warns against using one vs. many plural pattern in code
-when generating localized strings.
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-NSString *reminderText =
- NSLocalizedString(@"None", @"Indicates no reminders");
-if (reminderCount == 1) {
- // Warning: Plural cases are not supported across all languages.
- // Use a .stringsdict file instead
- reminderText =
- NSLocalizedString(@"1 Reminder", @"Indicates single reminder");
-} else if (reminderCount >= 2) {
- // Warning: Plural cases are not supported across all languages.
- // Use a .stringsdict file instead
- reminderText =
- [NSString stringWithFormat:
- NSLocalizedString(@"%@ Reminders", @"Indicates multiple reminders"),
- reminderCount];
-}
-</pre></div></div></td></tr>
-
-</tbody></table>
-
-<!-- =========================== security alpha =========================== -->
-<h3 id="security_alpha_checkers">Security Alpha Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="alpha.security.ArrayBound"><div class="namedescr expandable"><span class="name">
-alpha.security.ArrayBound</span><span class="lang">
-(C)</span><div class="descr">
-Warn about buffer overflows (older checker).</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- char *s = "";
- char c = s[1]; // warn
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-struct seven_words {
- int c[7];
-};
-
-void test() {
- struct seven_words a, *p;
- p = &a;
- p[0] = a;
- p[1] = a;
- p[2] = a; // warn
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-// note: requires unix.Malloc or
-// alpha.unix.MallocWithAnnotations checks enabled.
-void test() {
- int *p = malloc(12);
- p[3] = 4; // warn
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-void test() {
- char a[2];
- int *b = (int*)a;
- b[1] = 3; // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.security.ArrayBoundV2"><div class="namedescr expandable"><span class="name">
-alpha.security.ArrayBoundV2</span><span class="lang">
-(C)</span><div class="descr">
-Warn about buffer overflows (newer checker).</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- char *s = "";
- char c = s[1]; // warn
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-void test() {
- int buf[100];
- int *p = buf;
- p = p + 99;
- p[1] = 1; // warn
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-// note: compiler has internal check for this.
-// Use -Wno-array-bounds to suppress compiler warning.
-void test() {
- int buf[100][100];
- buf[0][-1] = 1; // warn
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-// note: requires alpha.security.taint check turned on.
-void test() {
- char s[] = "abc";
- int x = getchar();
- char c = s[x]; // warn: index is tainted
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.security.MallocOverflow"><div class="namedescr expandable"><span class="name">
-alpha.security.MallocOverflow</span><span class="lang">
-(C)</span><div class="descr">
-Check for overflows in the arguments to <code>malloc()</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test(int n) {
- void *p = malloc(n * sizeof(int)); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.security.MmapWriteExec"><div class="namedescr expandable"><span class="name">
-alpha.security.MmapWriteExec</span><span class="lang">
-(C)</span><div class="descr">
-Warn on <code>mmap()<code> calls that are both writable and executable.
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test(int n) {
- void *c = mmap(NULL, 32, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANON, -1, 0);
- // warn: Both PROT_WRITE and PROT_EXEC flags are set. This can lead to
- // exploitable memory regions, which could be overwritten with malicious
- // code
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.security.ReturnPtrRange"><div class="namedescr expandable"><span class="name">
-alpha.security.ReturnPtrRange</span><span class="lang">
-(C)</span><div class="descr">
-Check for an out-of-bound pointer being returned to callers.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-static int A[10];
-
-int *test() {
- int *p = A + 10;
- return p; // warn
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-int test(void) {
- int x;
- return x; // warn: undefined or garbage returned
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.security.taint.TaintPropagation"><div class="namedescr expandable"><span class="name">
-alpha.security.taint.TaintPropagation</span><span class="lang">
-(C)</span><div class="descr">
-Generate taint information used by other checkers.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- char x = getchar(); // 'x' marked as tainted
- system(&x); // warn: untrusted data is passed to a system call
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-// note: compiler internally checks if the second param to
-// sprintf is a string literal or not.
-// Use -Wno-format-security to suppress compiler warning.
-void test() {
- char s[10], buf[10];
- fscanf(stdin, "%s", s); // 's' marked as tainted
-
- sprintf(buf, s); // warn: untrusted data as a format string
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-void test() {
- size_t ts;
- scanf("%zd", &ts); // 'ts' marked as tainted
- int *p = (int *)malloc(ts * sizeof(int));
- // warn: untrusted data as buffer size
-}
-</pre></div></div></td></tr>
-
-</tbody></table>
-
-<!-- ============================= unix alpha ============================= -->
-<h3 id="unix_alpha_checkers">Unix Alpha Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-<tbody>
-
-
-<tr><td><a id="alpha.unix.Chroot"><div class="namedescr expandable"><span class="name">
-alpha.unix.Chroot</span><span class="lang">
-(C)</span><div class="descr">
-Check improper use of <code>chroot</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void f();
-
-void test() {
- chroot("/usr/local");
- f(); // warn: no call of chdir("/") immediately after chroot
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.unix.PthreadLock"><div class="namedescr expandable"><span class="name">
-alpha.unix.PthreadLock</span><span class="lang">
-(C)</span><div class="descr">
-Simple lock -> unlock checker; applies to:<div class=functions>
-pthread_mutex_lock<br>
-pthread_rwlock_rdlock<br>
-pthread_rwlock_wrlock<br>
-lck_mtx_lock<br>
-lck_rw_lock_exclusive<br>
-lck_rw_lock_shared<br>
-pthread_mutex_trylock<br>
-pthread_rwlock_tryrdlock<br>
-pthread_rwlock_tryrwlock<br>
-lck_mtx_try_lock<br>
-lck_rw_try_lock_exclusive<br>
-lck_rw_try_lock_shared<br>
-pthread_mutex_unlock<br>
-pthread_rwlock_unlock<br>
-lck_mtx_unlock<br>
-lck_rw_done</div></div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-pthread_mutex_t mtx;
-
-void test() {
- pthread_mutex_lock(&mtx);
- pthread_mutex_lock(&mtx);
- // warn: this lock has already been acquired
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-lck_mtx_t lck1, lck2;
-
-void test() {
- lck_mtx_lock(&lck1);
- lck_mtx_lock(&lck2);
- lck_mtx_unlock(&lck1);
- // warn: this was not the most recently acquired lock
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-lck_mtx_t lck1, lck2;
-
-void test() {
- if (lck_mtx_try_lock(&lck1) == 0)
- return;
-
- lck_mtx_lock(&lck2);
- lck_mtx_unlock(&lck1);
- // warn: this was not the most recently acquired lock
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.unix.SimpleStream"><div class="namedescr expandable"><span class="name">
-alpha.unix.SimpleStream</span><span class="lang">
-(C)</span><div class="descr">
-Check for misuses of stream APIs:<div class=functions>
-fopen<br>
-fclose</div>(demo checker, the subject of the demo
-(<a href="https://llvm.org/devmtg/2012-11/Zaks-Rose-Checker24Hours.pdf">Slides</a>
-,<a href="https://youtu.be/kdxlsP5QVPw">Video</a>)
-by Anna Zaks and Jordan Rose presented at the <a href="https://llvm.org/devmtg/2012-11/">
-2012 LLVM Developers' Meeting).</a></div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- FILE *F = fopen("myfile.txt", "w");
-} // warn: opened file is never closed
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-void test() {
- FILE *F = fopen("myfile.txt", "w");
-
- if (F)
- fclose(F);
-
- fclose(F); // warn: closing a previously closed file stream
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.unix.cstring.BufferOverlap"><div class="namedescr expandable"><span class="name">
-alpha.unix.cstring.BufferOverlap</span><span class="lang">
-(C)</span><div class="descr">
-Checks for overlap in two buffer arguments; applies to:<div class=functions>
-memcpy<br>
-mempcpy</div></div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int a[4] = {0};
- memcpy(a + 2, a + 1, 8); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="alpha.unix.cstring.NotNullTerminated"><div class="namedescr expandable"><span class="name">
-alpha.unix.cstring.NotNullTerminated</span><span class="lang">
-(C)</span><div class="descr">
-Check for arguments which are not null-terminated strings; applies
-to:<div class=functions>
-strlen<br>
-strnlen<br>
-strcpy<br>
-strncpy<br>
-strcat<br>
-strncat</div></div></div></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int y = strlen((char *)&test); // warn
-}
-</pre></div></div></a></td></tr>
-
-
-<tr><td><a id="alpha.unix.cstring.OutOfBounds"><div class="namedescr expandable"><span class="name">
-alpha.unix.cstring.OutOfBounds</span><span class="lang">
-(C)</span><div class="descr">
-Check for out-of-bounds access in string functions; applies
-to:<div class=functions>
-strncopy<br>
-strncat</div></div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test(char *y) {
- char x[4];
- if (strlen(y) == 4)
- strncpy(x, y, 5); // warn
-}
-</pre></div></div></td></tr>
-
-</tbody></table>
-
-<!-- =========================== nondeterminism alpha =========================== -->
-<h3 id="nondeterminism_alpha_checkers">Non-determinism Alpha Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="alpha.nondeterminism.PointerIteration"><div class="namedescr expandable"><span class="name">
-alpha.nondeterminism.PointerIteration</span><span class="lang">
-(C++)</span><div class="descr">
-Check for non-determinism caused by iterating unordered containers of pointers.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-// C++
-void test() {
- int a = 1, b = 2;
- std::unordered_set<int *> UnorderedPtrSet = {&a, &b};
-
- for (auto i : UnorderedPtrSet) // warn
- f(i);
-}
-</pre></div></div></td></tr>
-<tr><td><a id="alpha.nondeterminism.PointerSorting"><div class="namedescr expandable"><span class="name">
-alpha.nondeterminism.PointerSorting</span><span class="lang">
-(C++)</span><div class="descr">
-Check for non-determinism caused by sorting of pointers.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-// C++
-void test() {
- int a = 1, b = 2;
- std::vector<int *> V = {&a, &b};
- std::sort(V.begin(), V.end()); // warn
-}
-</pre></div></div></td></tr>
-</tbody></table>
+<h1>The clangd documentation has moved to clang.llvm.org</h1>
+<p style="color:red; font-size:200%">This page is deprecated and will be removed in release 21.0</p>
+<a href="https://clang.llvm.org/docs/analyzer/checkers.html#experimental-checkers">The new site<a>
+<script>window.location='https://clang.llvm.org/docs/analyzer/checkers.html#experimental-checkers'</script>
</div> <!-- page -->
</div> <!-- content -->
diff --git a/clang/www/analyzer/available_checks.html b/clang/www/analyzer/available_checks.html
index c23865e57e87d..7be155a5854e8 100644
--- a/clang/www/analyzer/available_checks.html
+++ b/clang/www/analyzer/available_checks.html
@@ -2,7 +2,9 @@
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
- <title>Available Checkers</title>
+ <title>Available Checkers documentation has moved to clang.llvm.org</title>
+ <link rel="canonical" href="https://clang.llvm.org/docs/analyzer/checkers.html"/>
+ <meta http-equiv="refresh" content="0;url=https://clang.llvm.org/docs/analyzer/checkers.html" />
<link type="text/css" rel="stylesheet" href="menu.css">
<link type="text/css" rel="stylesheet" href="content.css">
<script type="text/javascript" src="scripts/menu.js"></script>
@@ -17,1742 +19,11 @@
<!--#include virtual="menu.html.incl"-->
<div id="content">
-<h1>Available Checkers</h1>
-The analyzer performs checks that are categorized into families or "checkers". The
-default set of checkers covers a variety of checks targeted at finding security
-and API usage bugs, dead code, and other logic errors. See the
-<a href = "#default_checkers">Default Checkers</a> list below. In addition to
-these, the analyzer contains a number of <a href = "alpha_checks.html">
-Experimental (Alpha) Checkers</a>.
-<h3>Writeups with examples of some of the bugs that the analyzer finds</h3>
-<ul>
-<li><a href="http://www.mobileorchard.com/bug-finding-with-clang-5-resources-to-get-you-started/">Bug Finding With Clang: 5 Resources To Get You Started</a></li>
-<li><a href="https://fruitstandsoftware.mrrooni.com/blog/blog/2008/08/04/finding-memory-leaks-with-the-llvmclang-static-analyzer/">Finding Memory Leaks With The LLVM/Clang Static Analyzer</a></li>
-<li><a href="https://weblog.rogueamoeba.com/2008/07/14/the-clang-static-analyzer/">Under the Microscope - The Clang Static Analyzer</a></li>
-<li><a href="https://www.mikeash.com/pyblog/friday-qa-2009-03-06-using-the-clang-static-analyzer.html">Mike Ash - Using the Clang Static Analyzer</a></li>
-</ul>
-
-<h2 id="default_checkers">Default Checkers</h2>
-<ul>
-<li><a href="#core_checkers">Core Checkers</a> model core language features and perform general-purpose checks such as division by zero, null pointer dereference, usage of uninitialized values, etc.</li>
-<li><a href="#cplusplus_checkers">C++ Checkers</a> perform C++-specific checks</li>
-<li><a href="#deadcode_checkers">Dead Code Checkers</a> check for unused code</li>
-<li><a href="#nullability_checkers">Nullability Checkers</a> </li>
-<li><a href="#optin_checkers">Optin Checkers</a> </li>
-<li><a href="#osx_checkers">OS X Checkers</a> perform Objective-C-specific checks and check the use of Apple's SDKs (OS X and iOS)</li>
-<li><a href="#security_checkers">Security Checkers</a> check for insecure API usage and perform checks based on the CERT Secure Coding Standards</li>
-<li><a href="#unix_checkers">Unix Checkers</a> check the use of Unix and POSIX APIs</li>
-</ul>
-
-<!-- =========================== core =========================== -->
-<h3 id="core_checkers">Core Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="core.CallAndMessage"><div class="namedescr expandable"><span class="name">
-core.CallAndMessage</span><span class="lang">
-(C, C++, ObjC)</span><div class="descr">
-Check for logical errors for function calls and Objective-C message expressions
-(e.g., uninitialized arguments, null function pointers).</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-// C
-struct S {
- int x;
-};
-
-void f(struct S s);
-
-void test() {
- struct S s;
- f(s); // warn: passed-by-value arg contain uninitialized data
-}
-</pre></div>
-<div class="example"><pre>
-// C
-void test() {
- void (*foo)(void);
- foo(); // warn: function pointer is uninitialized
-}
-</pre></div>
-<div class="example"><pre>
-// C
-void test() {
- void (*foo)(void);
- foo = 0;
- foo(); // warn: function pointer is null
-}
-</pre></div>
-<div class="example"><pre>
-// C++
-class C {
-public:
- void f();
-};
-
-void test() {
- C *pc;
- pc->f(); // warn: object pointer is uninitialized
-}
-</pre></div>
-<div class="example"><pre>
-// C++
-class C {
-public:
- void f();
-};
-
-void test() {
- C *pc = 0;
- pc->f(); // warn: object pointer is null
-}
-</pre></div>
-<div class="example"><pre>
-// Objective-C
- at interface MyClass : NSObject
- at property (readwrite,assign) id x;
-- (long double)longDoubleM;
- at end
-
-void test() {
- MyClass *obj1;
- long double ld1 = [obj1 longDoubleM];
- // warn: receiver is uninitialized
-}
-</pre></div>
-<div class="example"><pre>
-// Objective-C
- at interface MyClass : NSObject
- at property (readwrite,assign) id x;
-- (long double)longDoubleM;
- at end
-
-void test() {
- MyClass *obj1;
- id i = obj1.x; // warn: uninitialized object pointer
-}
-</pre></div>
-<div class="example"><pre>
-// Objective-C
- at interface Subscriptable : NSObject
-- (id)objectAtIndexedSubscript:(unsigned int)index;
- at end
-
- at interface MyClass : Subscriptable
- at property (readwrite,assign) id x;
-- (long double)longDoubleM;
- at end
-
-void test() {
- MyClass *obj1;
- id i = obj1[0]; // warn: uninitialized object pointer
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="core.DivideZero"><div class="namedescr expandable"><span class="name">
-core.DivideZero</span><span class="lang">
-(C, C++, ObjC)</span><div class="descr">
-Check for division by zero.</div></div></a>co</td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test(int z) {
- if (z == 0)
- int x = 1 / z; // warn
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- int x = 1;
- int y = x % 0; // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="core.NonNullParamChecker"><div class="namedescr expandable"><span class="name">
-core.NonNullParamChecker</span><span class="lang">
-(C, C++, ObjC)</span><div class="descr">
-Check for null pointers passed as arguments to a function whose arguments are
-marked with the <code>nonnull</code> attribute.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-int f(int *p) __attribute__((nonnull));
-
-void test(int *p) {
- if (!p)
- f(p); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="core.NullDereference"><div class="namedescr expandable"><span class="name">
-core.NullDereference</span><span class="lang">
-(C, C++, ObjC)</span><div class="descr">
-Check for dereferences of null pointers.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-// C
-void test(int *p) {
- if (p)
- return;
-
- int x = p[0]; // warn
-}
-</pre></div>
-<div class="example"><pre>
-// C
-void test(int *p) {
- if (!p)
- *p = 0; // warn
-}
-</pre></div>
-<div class="example"><pre>
-// C++
-class C {
-public:
- int x;
-};
-
-void test() {
- C *pc = 0;
- int k = pc->x; // warn
-}
-</pre></div>
-<div class="example"><pre>
-// Objective-C
- at interface MyClass {
- at public
- int x;
-}
- at end
-
-void test() {
- MyClass *obj = 0;
- obj->x = 1; // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="core.StackAddressEscape"><div class="namedescr expandable"><span class="name">
-core.StackAddressEscape</span><span class="lang">
-(C)</span><div class="descr">
-Check that addresses of stack memory do not escape the function.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-char const *p;
-
-void test() {
- char const str[] = "string";
- p = str; // warn
-}
-</pre></div>
-<div class="example"><pre>
-void* test() {
- return __builtin_alloca(12); // warn
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- static int *x;
- int y;
- x = &y; // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="core.UndefinedBinaryOperatorResult"><div class="namedescr expandable"><span class="name">
-core.UndefinedBinaryOperatorResult</span><span class="lang">
-(C)</span><div class="descr">
-Check for undefined results of binary operators.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int x;
- int y = x + 1; // warn: left operand is garbage
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="core.VLASize"><div class="namedescr expandable"><span class="name">
-core.VLASize</span><span class="lang">
-(C)</span><div class="descr">
-Check for declarations of VLA of undefined or zero size.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int x;
- int vla1[x]; // warn: garbage as size
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- int x = 0;
- int vla2[x]; // warn: zero size
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="core.uninitialized.ArraySubscript"><div class="namedescr expandable"><span class="name">
-core.uninitialized.ArraySubscript</span><span class="lang">
-(C)</span><div class="descr">
-Check for uninitialized values used as array subscripts.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int i, a[10];
- int x = a[i]; // warn: array subscript is undefined
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="core.uninitialized.Assign"><div class="namedescr expandable"><span class="name">
-core.uninitialized.Assign</span><span class="lang">
-(C)</span><div class="descr">
-Check for assigning uninitialized values.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int x;
- x |= 1; // warn: left expression is uninitialized
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="core.uninitialized.Branch"><div class="namedescr expandable"><span class="name">
-core.uninitialized.Branch</span><span class="lang">
-(C)</span><div class="descr">
-Check for uninitialized values used as branch conditions.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int x;
- if (x) // warn
- return;
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="core.uninitialized.CapturedBlockVariable"><div class="namedescr expandable"><span class="name">
-core.uninitialized.CapturedBlockVariable</span><span class="lang">
-(C)</span><div class="descr">
-Check for blocks that capture uninitialized values.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int x;
- ^{ int y = x; }(); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="core.uninitialized.UndefReturn"><div class="namedescr expandable"><span class="name">
-core.uninitialized.UndefReturn</span><span class="lang">
-(C)</span><div class="descr">
-Check for uninitialized values being returned to the caller.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-int test() {
- int x;
- return x; // warn
-}
-</pre></div></div></td></tr>
-
-</tbody></table>
-
-<!-- =========================== C++ =========================== -->
-<h3 id="cplusplus_checkers">C++ Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-
-<tr><td><a id="cplusplus.ArrayDelete"><div class="namedescr expandable"><span class="name">
-cplusplus.ArrayDelete</span><span class="lang">
-(C++)</span><div class="descr">
-Reports destructions of arrays of polymorphic objects that are destructed as
-their base class. If the type of an array-pointer is different from the type of
-its underlying objects, calling <code>delete[]</code> is undefined.
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-class Base {
-public:
- virtual ~Base() {}
-};
-class Derived : public Base {};
-
-Base *create() {
- Base *x = new Derived[10]; // note: Casting from 'Derived' to 'Base' here
- return x;
-}
-
-void sink(Base *x) {
- delete[] x; // warn: Deleting an array of 'Derived' objects as their base class 'Base' undefined
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="cplusplus.NewDelete"><div class="namedescr expandable"><span class="name">
-cplusplus.NewDelete</span><span class="lang">
-(C++)</span><div class="descr">
-Check for double-free, use-after-free and offset problems involving C++ <code>
-delete</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void f(int *p);
-
-void testUseMiddleArgAfterDelete(int *p) {
- delete p;
- f(p); // warn: use after free
-}
-</pre></div>
-<div class="example"><pre>
-class SomeClass {
-public:
- void f();
-};
-
-void test() {
- SomeClass *c = new SomeClass;
- delete c;
- c->f(); // warn: use after free
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- int *p = (int *)__builtin_alloca(sizeof(int));
- delete p; // warn: deleting memory allocated by alloca
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- int *p = new int;
- delete p;
- delete p; // warn: attempt to free released
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- int i;
- delete &i; // warn: delete address of local
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- int *p = new int[1];
- delete[] (++p);
- // warn: argument to 'delete[]' is offset by 4 bytes
- // from the start of memory allocated by 'new[]'
-}
-</pre></div></div></td></tr>
-
-<tr><td><a id="cplusplus.NewDeleteLeaks"><div class="namedescr expandable"><span class="name">
-cplusplus.NewDeleteLeaks</span><span class="lang">
-(C++)</span><div class="descr">
-Check for memory leaks. Traces memory managed by <code>new</code>/<code>
-delete</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int *p = new int;
-} // warn
-</pre></div></div></td></tr>
-
-</tbody></table>
-
-<!-- =========================== dead code =========================== -->
-<h3 id="deadcode_checkers">Dead Code Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="deadcode.DeadStores"><div class="namedescr expandable"><span class="name">
-deadcode.DeadStores</span><span class="lang">
-(C)</span><div class="descr">
-Check for values stored to variables that are never read afterwards.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int x;
- x = 1; // warn
-}
-</pre></div></div></td></tr>
-
-</tbody></table>
-
-<!-- =========================== nullability =========================== -->
-<h3 id="nullability_checkers">Nullability Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="nullability.NullPassedToNonnull"><div class="namedescr expandable"><span class="name">
-nullability.NullPassedToNonnull</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warns when a null pointer is passed to a pointer which has a
-_Nonnull type.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-if (name != nil)
- return;
-// Warning: nil passed to a callee that requires a non-null 1st parameter
-NSString *greeting = [@"Hello " stringByAppendingString:name];
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="nullability.NullReturnedFromNonnull"><div class="namedescr expandable"><span class="name">
-nullability.NullReturnedFromNonnull</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warns when a null pointer is returned from a function that has
-_Nonnull return type.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-- (nonnull id)firstChild {
- id result = nil;
- if ([_children count] > 0)
- result = _children[0];
-
- // Warning: nil returned from a method that is expected
- // to return a non-null value
- return result;
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="nullability.NullableDereferenced"><div class="namedescr expandable"><span class="name">
-nullability.NullableDereferenced</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warns when a nullable pointer is dereferenced.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-struct LinkedList {
- int data;
- struct LinkedList *next;
-};
-
-struct LinkedList * _Nullable getNext(struct LinkedList *l);
-
-void updateNextData(struct LinkedList *list, int newData) {
- struct LinkedList *next = getNext(list);
- // Warning: Nullable pointer is dereferenced
- next->data = 7;
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="nullability.NullablePassedToNonnull"><div class="namedescr expandable"><span class="name">
-nullability.NullablePassedToNonnull</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warns when a nullable pointer is passed to a pointer which has a _Nonnull type.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-typedef struct Dummy { int val; } Dummy;
-Dummy *_Nullable returnsNullable();
-void takesNonnull(Dummy *_Nonnull);
-
-void test() {
- Dummy *p = returnsNullable();
- takesNonnull(p); // warn
-}
-</pre></div></div></td></tr>
-
-</tbody></table>
-
-<!-- =========================== optin =========================== -->
-<h3 id="optin_checkers">Optin Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tr><td><a id="cplusplus.UninitializedObject"><div class="namedescr expandable"><span class="name">
-cplusplus.UninitializedObject</span><span class="lang">
-(C++)</span><div class="descr">
-This checker reports uninitialized fields in objects created after a constructor
-call. It doesn't only find direct uninitialized fields, but rather makes a deep
-inspection of the object, analyzing all of it's fields subfields. <br>
-The checker regards inherited fields as direct fields, so one will recieve
-warnings for uninitialized inherited data members as well. <br>
-<br>
-It has several options:
-<ul>
- <li>
- "<code>Pedantic</code>" (boolean). If its not set or is set to false, the
- checker won't emit warnings for objects that don't have at least one
- initialized field. This may be set with <br>
- <code>-analyzer-config cplusplus.UninitializedObject:Pedantic=true</code>.
- </li>
- <li>
- "<code>NotesAsWarnings</code>" (boolean). If set to true, the checker will
- emit a warning for each uninitalized field, as opposed to emitting one
- warning per constructor call, and listing the uninitialized fields that
- belongs to it in notes. Defaults to false. <br>
- <code>-analyzer-config cplusplus.UninitializedObject:NotesAsWarnings=true</code>.
- </li>
- <li>
- "<code>CheckPointeeInitialization</code>" (boolean). If set to false, the
- checker will not analyze the pointee of pointer/reference fields, and will
- only check whether the object itself is initialized. Defaults to false. <br>
- <code>-analyzer-config cplusplus.UninitializedObject:CheckPointeeInitialization=true</code>.
- </li>
- <li>
- "<code>IgnoreRecordsWithField</code>" (string). If supplied, the checker
- will not analyze structures that have a field with a name or type name that
- matches the given pattern. Defaults to <code>""</code>.
-
- <code>-analyzer-config cplusplus.UninitializedObject:IgnoreRecordsWithField="[Tt]ag|[Kk]ind"</code>.
- </li>
-</ul></div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-// With Pedantic and CheckPointeeInitialization set to true
-
-struct A {
- struct B {
- int x; // note: uninitialized field 'this->b.x'
- // note: uninitialized field 'this->bptr->x'
- int y; // note: uninitialized field 'this->b.y'
- // note: uninitialized field 'this->bptr->y'
- };
- int *iptr; // note: uninitialized pointer 'this->iptr'
- B b;
- B *bptr;
- char *cptr; // note: uninitialized pointee 'this->cptr'
-
- A (B *bptr, char *cptr) : bptr(bptr), cptr(cptr) {}
-};
-
-void f() {
- A::B b;
- char c;
- A a(&b, &c); // warning: 6 uninitialized fields
- // after the constructor call
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-// With Pedantic set to false and
-// CheckPointeeInitialization set to true
-// (every field is uninitialized)
-
-struct A {
- struct B {
- int x;
- int y;
- };
- int *iptr;
- B b;
- B *bptr;
- char *cptr;
-
- A (B *bptr, char *cptr) : bptr(bptr), cptr(cptr) {}
-};
-
-void f() {
- A::B b;
- char c;
- A a(&b, &c); // no warning
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-// With Pedantic and CheckPointeeInitialization set to false
-// (pointees are regarded as initialized)
-
-struct A {
- struct B {
- int x; // note: uninitialized field 'this->b.x'
- int y; // note: uninitialized field 'this->b.y'
- };
- int *iptr; // note: uninitialized pointer 'this->iptr'
- B b;
- B *bptr;
- char *cptr;
-
- A (B *bptr, char *cptr) : bptr(bptr), cptr(cptr) {}
-};
-
-void f() {
- A::B b;
- char c;
- A a(&b, &c); // warning: 3 uninitialized fields
- // after the constructor call
-}
-</pre></div></div></td></tr>
-
-
-<tbody>
-<tr><td><a id="optin.cplusplus.VirtualCall"><div class="namedescr expandable"><span class="name">
-optin.cplusplus.VirtualCall</span><span class="lang">
-(C++)</span><div class="descr">
-Check virtual member function calls during construction or
-destruction.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-class A {
-public:
- A() {
- f(); // warn
- }
- virtual void f();
-};
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-class A {
-public:
- ~A() {
- this->f(); // warn
- }
- virtual void f();
-};
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="optin.mpi.MPI-Checker"><div class="namedescr expandable"><span class="name">
-optin.mpi.MPI-Checker</span><span class="lang">
-(C)</span><div class="descr">
-Checks MPI code</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- double buf = 0;
- MPI_Request sendReq1;
- MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM,
- 0, MPI_COMM_WORLD, &sendReq1);
-} // warn: request 'sendReq1' has no matching wait.
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-void test() {
- double buf = 0;
- MPI_Request sendReq;
- MPI_Isend(&buf, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &sendReq);
- MPI_Irecv(&buf, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &sendReq); // warn
- MPI_Isend(&buf, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &sendReq); // warn
- MPI_Wait(&sendReq, MPI_STATUS_IGNORE);
-}
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
-void missingNonBlocking() {
- int rank = 0;
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- MPI_Request sendReq1[10][10][10];
- MPI_Wait(&sendReq1[1][7][9], MPI_STATUS_IGNORE); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="optin.osx.cocoa.localizability.EmptyLocalizationContextChecker"><div class="namedescr expandable"><span class="name">
-optin.osx.cocoa.localizability.EmptyLocalizationContextChecker</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check that NSLocalizedString macros include a comment for context.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-- (void)test {
- NSString *string = NSLocalizedString(@"LocalizedString", nil); // warn
- NSString *string2 = NSLocalizedString(@"LocalizedString", @" "); // warn
- NSString *string3 = NSLocalizedStringWithDefaultValue(
- @"LocalizedString", nil, [[NSBundle alloc] init], nil,@""); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="optin.osx.cocoa.localizability.NonLocalizedStringChecker"><div class="namedescr expandable"><span class="name">
-optin.osx.cocoa.localizability.NonLocalizedStringChecker</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warns about uses of non-localized NSStrings passed to UI methods
-expecting localized NSStrings</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-NSString *alarmText =
- NSLocalizedString(@"Enabled", @"Indicates alarm is turned on");
-if (!isEnabled) {
- alarmText = @"Disabled";
-}
-UILabel *alarmStateLabel = [[UILabel alloc] init];
-
-// Warning: User-facing text should use localized string macro
-[alarmStateLabel setText:alarmText];
-</pre></div></div></td></tr>
-
-</tbody></table>
-
-<!-- =========================== OS X =========================== -->
-<h3 id="osx_checkers">OS X Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="osx.API"><div class="namedescr expandable"><span class="name">
-osx.API</span><span class="lang">
-(C)</span><div class="descr">
-Check for proper uses of various Apple APIs:<div class=functions>
-dispatch_once</div></div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- dispatch_once_t pred = 0;
- dispatch_once(&pred, ^(){}); // warn: dispatch_once uses local
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.NumberObjectConversion"><div class="namedescr expandable"><span class="name">
-osx.NumberObjectConversion</span><span class="lang">
-(C, C++, ObjC)</span><div class="descr">
-Check for erroneous conversions of objects representing numbers
-into numbers</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-NSNumber *photoCount = [albumDescriptor objectForKey:@"PhotoCount"];
-// Warning: Comparing a pointer value of type 'NSNumber *'
-// to a scalar integer value
-if (photoCount > 0) {
- [self displayPhotos];
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.SecKeychainAPI"><div class="namedescr expandable"><span class="name">
-osx.SecKeychainAPI</span><span class="lang">
-(C)</span><div class="descr">
-Check for improper uses of the Security framework's Keychain APIs:<div class=functions>
-SecKeychainItemCopyContent<br>
-SecKeychainFindGenericPassword<br>
-SecKeychainFindInternetPassword<br>
-SecKeychainItemFreeContent<br>
-SecKeychainItemCopyAttributesAndData<br>
-SecKeychainItemFreeAttributesAndData</div></div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- unsigned int *ptr = 0;
- UInt32 length;
-
- SecKeychainItemFreeContent(ptr, &length);
- // warn: trying to free data which has not been allocated
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- unsigned int *ptr = 0;
- UInt32 *length = 0;
- void *outData;
-
- OSStatus st =
- SecKeychainItemCopyContent(2, ptr, ptr, length, outData);
- // warn: data is not released
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- unsigned int *ptr = 0;
- UInt32 *length = 0;
- void *outData;
-
- OSStatus st =
- SecKeychainItemCopyContent(2, ptr, ptr, length, &outData);
-
- SecKeychainItemFreeContent(ptr, outData);
- // warn: only call free if a non-NULL buffer was returned
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- unsigned int *ptr = 0;
- UInt32 *length = 0;
- void *outData;
-
- OSStatus st =
- SecKeychainItemCopyContent(2, ptr, ptr, length, &outData);
-
- st = SecKeychainItemCopyContent(2, ptr, ptr, length, &outData);
- // warn: release data before another call to the allocator
-
- if (st == noErr)
- SecKeychainItemFreeContent(ptr, outData);
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- SecKeychainItemRef itemRef = 0;
- SecKeychainAttributeInfo *info = 0;
- SecItemClass *itemClass = 0;
- SecKeychainAttributeList *attrList = 0;
- UInt32 *length = 0;
- void *outData = 0;
-
- OSStatus st =
- SecKeychainItemCopyAttributesAndData(itemRef, info,
- itemClass, &attrList,
- length, &outData);
-
- SecKeychainItemFreeContent(attrList, outData);
- // warn: deallocator doesn't match the allocator
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.AtSync"><div class="namedescr expandable"><span class="name">
-osx.cocoa.AtSync</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check for nil pointers used as mutexes for <code>@synchronized</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test(id x) {
- if (!x)
- @synchronized(x) {} // warn: nil value used as mutex
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- id y;
- @synchronized(y) {} // warn: uninitialized value used as mutex
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.ClassRelease"><div class="namedescr expandable"><span class="name">
-osx.cocoa.ClassRelease</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check for sending <code>retain</code>, <code>release</code>, or <code>
-autorelease</code> directly to a class.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
- at interface MyClass : NSObject
- at end
-
-void test(void) {
- [MyClass release]; // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.Dealloc"><div class="namedescr expandable"><span class="name">
-osx.cocoa.Dealloc</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warn about Objective-C classes that lack a correct implementation
-of <code>-dealloc</code>.
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
- at interface MyObject : NSObject {
- id _myproperty;
-}
- at end
-
- at implementation MyObject // warn: lacks 'dealloc'
- at end
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
- at interface MyObject : NSObject {}
- at property(assign) id myproperty;
- at end
-
- at implementation MyObject // warn: does not send 'dealloc' to super
-- (void)dealloc {
- self.myproperty = 0;
-}
- at end
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
- at interface MyObject : NSObject {
- id _myproperty;
-}
- at property(retain) id myproperty;
- at end
-
- at implementation MyObject
- at synthesize myproperty = _myproperty;
- // warn: var was retained but wasn't released
-- (void)dealloc {
- [super dealloc];
-}
- at end
-</pre></div><div class="separator"></div>
-<div class="example"><pre>
- at interface MyObject : NSObject {
- id _myproperty;
-}
- at property(assign) id myproperty;
- at end
-
- at implementation MyObject
- at synthesize myproperty = _myproperty;
- // warn: var wasn't retained but was released
-- (void)dealloc {
- [_myproperty release];
- [super dealloc];
-}
- at end
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.IncompatibleMethodTypes"><div class="namedescr expandable"><span class="name">
-osx.cocoa.IncompatibleMethodTypes</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check for an incompatible type signature when overriding an Objective-C method.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
- at interface MyClass1 : NSObject
-- (int)foo;
- at end
-
- at implementation MyClass1
-- (int)foo { return 1; }
- at end
-
- at interface MyClass2 : MyClass1
-- (float)foo;
- at end
-
- at implementation MyClass2
-- (float)foo { return 1.0; } // warn
- at end
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.MissingSuperCall"><div class="namedescr expandable"><span class="name">
-osx.cocoa.MissingSuperCall</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warn about Objective-C methods that lack a necessary call to super. (Note: The
-compiler now has a warning for methods annotated with <code>objc_requires_super</code>
-attribute. The checker exists to check methods in the Cocoa frameworks
-that haven't yet adopted this attribute.)</div></div></a></td>
-<td><div class="example"><pre>
- at interface Test : UIViewController
- at end
- at implementation test
-- (void)viewDidLoad {} // warn
- at end
-</pre></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.NSAutoreleasePool"><div class="namedescr expandable"><span class="name">
-osx.cocoa.NSAutoreleasePool</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warn for suboptimal uses of NSAutoreleasePool in Objective-C
-GC mode (<code>-fobjc-gc</code> compiler option).</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
- [pool release]; // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.NSError"><div class="namedescr expandable"><span class="name">
-osx.cocoa.NSError</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check usage of <code>NSError**</code> parameters.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
- at interface A : NSObject
-- (void)foo:(NSError **)error;
- at end
-
- at implementation A
-- (void)foo:(NSError **)error {
- // warn: method accepting NSError** should have a non-void
- // return value
-}
- at end
-</pre></div>
-<div class="example"><pre>
- at interface A : NSObject
-- (BOOL)foo:(NSError **)error;
- at end
-
- at implementation A
-- (BOOL)foo:(NSError **)error {
- *error = 0; // warn: potential null dereference
- return 0;
-}
- at end
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.NilArg"><div class="namedescr expandable"><span class="name">
-osx.cocoa.NilArg</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check for prohibited nil arguments in specific Objective-C method calls:<div class=functions>
-- caseInsensitiveCompare:<br>
-- compare:<br>
-- compare:options:<br>
-- compare:options:range:<br>
-- compare:options:range:locale:<br>
-- componentsSeparatedByCharactersInSet:<br>
-- initWithFormat:</div></div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-NSComparisonResult test(NSString *s) {
- NSString *aString = nil;
- return [s caseInsensitiveCompare:aString];
- // warn: argument to 'NSString' method
- // 'caseInsensitiveCompare:' cannot be nil
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.ObjCGenerics"><div class="namedescr expandable"><span class="name">
-osx.cocoa.ObjCGenerics</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check for type errors when using Objective-C generics</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-NSMutableArray<NSString *> *names = [NSMutableArray array];
-NSMutableArray *birthDates = names;
-
-// Warning: Conversion from value of type 'NSDate *'
-// to incompatible type 'NSString *'
-[birthDates addObject: [NSDate date]];
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.RetainCount"><div class="namedescr expandable"><span class="name">
-osx.cocoa.RetainCount</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check for leaks and violations of the Cocoa Memory Management rules.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- NSString *s = [[NSString alloc] init]; // warn
-}
-</pre></div>
-<div class="example"><pre>
-CFStringRef test(char *bytes) {
- return CFStringCreateWithCStringNoCopy(
- 0, bytes, NSNEXTSTEPStringEncoding, 0); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.SelfInit"><div class="namedescr expandable"><span class="name">
-osx.cocoa.SelfInit</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check that <code>self</code> is properly initialized inside an initializer
-method.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
- at interface MyObj : NSObject {
- id x;
-}
-- (id)init;
- at end
-
- at implementation MyObj
-- (id)init {
- [super init];
- x = 0; // warn: instance variable used while 'self' is not
- // initialized
- return 0;
-}
- at end
-</pre></div>
-<div class="example"><pre>
- at interface MyObj : NSObject
-- (id)init;
- at end
-
- at implementation MyObj
-- (id)init {
- [super init];
- return self; // warn: returning uninitialized 'self'
-}
- at end
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.SuperDealloc"><div class="namedescr expandable"><span class="name">
-osx.cocoa.SuperDealloc</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warn about improper use of '[super dealloc]' in Objective-C</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
- at interface SuperDeallocThenReleaseIvarClass : NSObject {
- NSObject *_ivar;
-}
- at end
-
- at implementation SuperDeallocThenReleaseIvarClass
-- (void)dealloc {
- [super dealloc];
- [_ivar release]; // warn
-}
- at end
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.UnusedIvars"><div class="namedescr expandable"><span class="name">
-osx.cocoa.UnusedIvars</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warn about private ivars that are never used.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
- at interface MyObj : NSObject {
- at private
- id x; // warn
-}
- at end
-
- at implementation MyObj
- at end
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.cocoa.VariadicMethodTypes"><div class="namedescr expandable"><span class="name">
-osx.cocoa.VariadicMethodTypes</span><span class="lang">
-(ObjC)</span><div class="descr">
-Check for passing non-Objective-C types to variadic collection initialization
-methods that expect only Objective-C types.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- [NSSet setWithObjects:@"Foo", "Bar", nil];
- // warn: argument should be an ObjC pointer type, not 'char *'
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.coreFoundation.CFError"><div class="namedescr expandable"><span class="name">
-osx.coreFoundation.CFError</span><span class="lang">
-(C)</span><div class="descr">
-Check usage of <code>CFErrorRef*</code> parameters.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test(CFErrorRef *error) {
- // warn: function accepting CFErrorRef* should have a
- // non-void return
-}
-</pre></div>
-<div class="example"><pre>
-int foo(CFErrorRef *error) {
- *error = 0; // warn: potential null dereference
- return 0;
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.coreFoundation.CFNumber"><div class="namedescr expandable"><span class="name">
-osx.coreFoundation.CFNumber</span><span class="lang">
-(C)</span><div class="descr">
-Check for improper uses of <code>CFNumberCreate</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-CFNumberRef test(unsigned char x) {
- return CFNumberCreate(0, kCFNumberSInt16Type, &x);
- // warn: 8 bit integer is used to initialize a 16 bit integer
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.coreFoundation.CFRetainRelease"><div class="namedescr expandable"><span class="name">
-osx.coreFoundation.CFRetainRelease</span><span class="lang">
-(C)</span><div class="descr">
-Check for null arguments to <code>CFRetain</code>, <code>CFRelease</code>,
-<code>CFMakeCollectable</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test(CFTypeRef p) {
- if (!p)
- CFRetain(p); // warn
-}
-</pre></div>
-<div class="example"><pre>
-void test(int x, CFTypeRef p) {
- if (p)
- return;
-
- CFRelease(p); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.coreFoundation.containers.OutOfBounds"><div class="namedescr expandable"><span class="name">
-osx.coreFoundation.containers.OutOfBounds</span><span class="lang">
-(C)</span><div class="descr">
-Checks for index out-of-bounds when using <code>CFArray</code> API.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- CFArrayRef A = CFArrayCreate(0, 0, 0, &kCFTypeArrayCallBacks);
- CFArrayGetValueAtIndex(A, 0); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="osx.coreFoundation.containers.PointerSizedValues"><div class="namedescr expandable"><span class="name">
-osx.coreFoundation.containers.PointerSizedValues</span><span class="lang">
-(C)</span><div class="descr">
-Warns if <code>CFArray</code>, <code>CFDictionary</code>, <code>CFSet</code> are
-created with non-pointer-size values.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int x[] = { 1 };
- CFArrayRef A = CFArrayCreate(0, (const void **)x, 1,
- &kCFTypeArrayCallBacks); // warn
-}
-</pre></div></div></td></tr>
-
-</tbody></table>
-
-<!-- =========================== security =========================== -->
-<h3 id="security_checkers">Security Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="security.FloatLoopCounter"><div class="namedescr expandable"><span class="name">
-security.FloatLoopCounter</span><span class="lang">
-(C)</span><div class="descr">
-Warn on using a floating point value as a loop counter (CERT: FLP30-C,
-FLP30-CPP).</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- for (float x = 0.1f; x <= 1.0f; x += 0.1f) {} // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="security.insecureAPI.UncheckedReturn"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.UncheckedReturn</span><span class="lang">
-(C)</span><div class="descr">
-Warn on uses of functions whose return values must be always checked:<div class=functions>
-setuid<br>
-setgid<br>
-seteuid<br>
-setegid<br>
-setreuid<br>
-setregid</div></div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- setuid(1); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="security.insecureAPI.bcmp"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.bcmp</span><span class="lang">
-(C)</span><div class="descr">
-Warn on uses of the <code>bcmp</code> function.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- bcmp(ptr0, ptr1, n); // warn
-}
-</pre></div></div></td></tr>
-
-<tr><td><a id="security.insecureAPI.bcopy"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.bcopy</span><span class="lang">
-(C)</span><div class="descr">
-Warn on uses of the <code>bcopy</code> function.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- bcopy(src, dst, n); // warn
-}
-</pre></div></div></td></tr>
-
-<tr><td><a id="security.insecureAPI.bzero"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.bzero</span><span class="lang">
-(C)</span><div class="descr">
-Warn on uses of the <code>bzero</code> function.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- bzero(ptr, n); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="security.insecureAPI.getpw"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.getpw</span><span class="lang">
-(C)</span><div class="descr">
-Warn on uses of the <code>getpw</code> function.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- char buff[1024];
- getpw(2, buff); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="security.insecureAPI.gets"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.gets</span><span class="lang">
-(C)</span><div class="descr">
-Warn on uses of the <code>gets</code> function.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- char buff[1024];
- gets(buff); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="security.insecureAPI.mkstemp"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.mkstemp</span><span class="lang">
-(C)</span><div class="descr">
-Warn when <code>mktemp</code>, <code>mkstemp</code>, <code>mkstemps</code> or
-<code>mkdtemp</code> is passed fewer than 6
-X's in the format string.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- mkstemp("XX"); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="security.insecureAPI.mktemp"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.mktemp</span><span class="lang">
-(C)</span><div class="descr">
-Warn on uses of the <code>mktemp</code> function.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- char *x = mktemp("/tmp/zxcv"); // warn: insecure, use mkstemp
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="security.insecureAPI.rand"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.rand</span><span class="lang">
-(C)</span><div class="descr">
-Warn on uses of inferior random number generating functions (only if <code>arc4random</code>
-function is available):<div class=functions>
-drand48<br>
-erand48<br>
-jrand48<br>
-lcong48<br>
-lrand48<br>
-mrand48<br>
-nrand48<br>
-random<br>
-rand_r</div></div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- random(); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="security.insecureAPI.strcpy"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.strcpy</span><span class="lang">
-(C)</span><div class="descr">
-Warn on uses of the <code>strcpy</code> and <code>strcat</code> functions.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- char x[4];
- char *y = "abcd";
-
- strcpy(x, y); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="security.insecureAPI.vfork"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.vfork</span><span class="lang">
-(C)</span><div class="descr">
-Warn on uses of the <code>vfork</code> function.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- vfork(); // warn
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="security.insecureAPI.decodeValueOfObjCType"><div class="namedescr expandable"><span class="name">
-security.insecureAPI.decodeValueOfObjCType</span><span class="lang">
-(ObjC)</span><div class="descr">
-Warn on uses of the <code>-[NSCoder decodeValueOfObjCType:at:]</code> method.
-The safe alternative is <code>-[NSCoder decodeValueOfObjCType:at:size:]</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test(NSCoder *decoder) {
- // This would be a vulnerability on 64-bit platforms
- // but not on 32-bit platforms.
- NSUInteger x;
- [decoder decodeValueOfObjCType:"I" at:&x]; // warn
-}
-</pre></div></div></td></tr>
-
-</tbody></table>
-
-<!-- =========================== unix =========================== -->
-<h3 id="unix_checkers">Unix Checkers</h3>
-<table class="checkers">
-<colgroup><col class="namedescr"><col class="example"></colgroup>
-<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
-
-<tbody>
-<tr><td><a id="unix.API"><div class="namedescr expandable"><span class="name">
-unix.API</span><span class="lang">
-(C)</span><div class="descr">
-Check calls to various UNIX/POSIX functions:<div class=functions>
-open<br>
-pthread_once<br>
-calloc<br>
-malloc<br>
-realloc<br>
-alloca<br></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-// Currently the check is performed for apple targets only.
-void test(const char *path) {
- int fd = open(path, O_CREAT);
- // warn: call to 'open' requires a third argument when the
- // 'O_CREAT' flag is set
-}
-</pre></div>
-<div class="example"><pre>
-void f();
-
-void test() {
- pthread_once_t pred = {0x30B1BCBA, {0}};
- pthread_once(&pred, f);
- // warn: call to 'pthread_once' uses the local variable
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- void *p = malloc(0); // warn: allocation size of 0 bytes
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- void *p = calloc(0, 42); // warn: allocation size of 0 bytes
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- void *p = malloc(1);
- p = realloc(p, 0); // warn: allocation size of 0 bytes
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- void *p = alloca(0); // warn: allocation size of 0 bytes
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- void *p = valloc(0); // warn: allocation size of 0 bytes
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="unix.Malloc"><div class="namedescr expandable"><span class="name">
-unix.Malloc</span><span class="lang">
-(C)</span><div class="descr">
-Check for memory leaks, double free, and use-after-free and offset problems
-involving <code>malloc</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- int *p = malloc(1);
- free(p);
- free(p); // warn: attempt to free released memory
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- int *p = malloc(sizeof(int));
- free(p);
- *p = 1; // warn: use after free
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- int *p = malloc(1);
- if (p)
- return; // warn: memory is never released
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- int a[] = { 1 };
- free(a); // warn: argument is not allocated by malloc
-}
-</pre></div>
-<div class="example"><pre>
-void test() {
- int *p = malloc(sizeof(char));
- p = p - 1;
- free(p); // warn: argument to free() is offset by -4 bytes
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="unix.MallocSizeof"><div class="namedescr expandable"><span class="name">
-unix.MallocSizeof</span><span class="lang">
-(C)</span><div class="descr">
-Check for dubious <code>malloc</code>, <code>calloc</code> or
-<code>realloc</code> arguments involving <code>sizeof</code>.</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- long *p = malloc(sizeof(short));
- // warn: result is converted to 'long *', which is
- // incompatible with operand type 'short'
- free(p);
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="unix.MismatchedDeallocator"><div class="namedescr expandable"><span class="name">
-unix.MismatchedDeallocator</span><span class="lang">
-(C, C++, ObjC)</span><div class="descr">
-Check for mismatched deallocators (e.g. passing a pointer allocating
-with <code>new</code> to <code>free()</code>).</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-// C, C++
-void test() {
- int *p = (int *)malloc(sizeof(int));
- delete p; // warn
-}
-</pre></div>
-<div class="example"><pre>
-// C, C++
-void __attribute((ownership_returns(malloc))) *user_malloc(size_t);
-
-void test() {
- int *p = (int *)user_malloc(sizeof(int));
- delete p; // warn
-}
-</pre></div>
-<div class="example"><pre>
-// C, C++
-void test() {
- int *p = new int;
- free(p); // warn
-}
-</pre></div>
-<div class="example"><pre>
-// C, C++
-void test() {
- int *p = new int[1];
- realloc(p, sizeof(long)); // warn
-}
-</pre></div>
-<div class="example"><pre>
-// C, C++
-template <typename T>
-struct SimpleSmartPointer {
- T *ptr;
-
- explicit SimpleSmartPointer(T *p = 0) : ptr(p) {}
- ~SimpleSmartPointer() {
- delete ptr; // warn
- }
-};
-
-void test() {
- SimpleSmartPointer<int> a((int *)malloc(4));
-}
-</pre></div>
-<div class="example"><pre>
-// C++
-void test() {
- int *p = (int *)operator new(0);
- delete[] p; // warn
-}
-</pre></div>
-<div class="example"><pre>
-// Objective-C, C++
-void test(NSUInteger dataLength) {
- int *p = new int;
- NSData *d = [NSData dataWithBytesNoCopy:p
- length:sizeof(int) freeWhenDone:1];
- // warn +dataWithBytesNoCopy:length:freeWhenDone: cannot take
- // ownership of memory allocated by 'new'
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="unix.Vfork"><div class="namedescr expandable"><span class="name">
-unix.Vfork</span><span class="lang">
-(C)</span><div class="descr">
-Check for proper usage of vfork</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-int test(int x) {
- pid_t pid = vfork(); // warn
- if (pid != 0)
- return 0;
-
- switch (x) {
- case 0:
- pid = 1;
- execl("", "", 0);
- _exit(1);
- break;
- case 1:
- x = 0; // warn: this assignment is prohibited
- break;
- case 2:
- foo(); // warn: this function call is prohibited
- break;
- default:
- return 0; // warn: return is prohibited
- }
-
- while(1);
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="unix.cstring.BadSizeArg"><div class="namedescr expandable"><span class="name">
-unix.cstring.BadSizeArg</span><span class="lang">
-(C)</span><div class="descr">
-Check the size argument passed to <code>strncat</code> for common erroneous
-patterns. Use <code>-Wno-strncat-size</code> compiler option to mute other
-<code>strncat</code>-related compiler warnings.
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-void test() {
- char dest[3];
- strncat(dest, "***", sizeof(dest));
- // warn: potential buffer overflow
-}
-</pre></div></div></td></tr>
-
-
-<tr><td><a id="unix.cstring.NullArg"><div class="namedescr expandable"><span class="name">
-unix.cstring.NullArg</span><span class="lang">
-(C)</span><div class="descr">
-Check for null pointers being passed as arguments to C string functions:<div class=functions>
-strlen<br>
-strnlen<br>
-strcpy<br>
-strncpy<br>
-strcat<br>
-strncat<br>
-strcmp<br>
-strncmp<br>
-strcasecmp<br>
-strncasecmp</div></div></div></a></td>
-<td><div class="example"><pre>
-int test() {
- return strlen(0); // warn
-}
-</pre></div></td></tr>
-
-</tbody></table>
+<h1>The clangd documentation has moved to clang.llvm.org</h1>
+<p style="color:red; font-size:200%">This page is deprecated and will be removed in release 21.0</p>
+<a href="https://clang.llvm.org/docs/analyzer/checkers.html">The new site<a>
+<script>window.location='https://clang.llvm.org/docs/analyzer/checkers.html'</script>
</div> <!-- page -->
</div> <!-- content -->
>From 218f71d43283c2916aac6923edf4df6c0024676a Mon Sep 17 00:00:00 2001
From: Shan Huang <52285902006 at stu.ecnu.edu.cn>
Date: Thu, 4 Jul 2024 21:06:25 +0800
Subject: [PATCH 244/246] [DebugInfo][LowerConstantIntrinsics] Fix the missing
debug location of new branch instruction (#97145)
Fix #97142 .
---
.../Scalar/LowerConstantIntrinsics.cpp | 5 +-
.../preserving-debugloc-br.ll | 48 +++++++++++++++++++
2 files changed, 52 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/Transforms/LowerConstantIntrinsics/preserving-debugloc-br.ll
diff --git a/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp
index 939c36164f781..bd7895feb64a7 100644
--- a/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp
@@ -85,8 +85,11 @@ static bool replaceConditionalBranchesOnConstant(Instruction *II,
if (Target && Target != Other) {
BasicBlock *Source = BI->getParent();
Other->removePredecessor(Source);
+
+ Instruction *NewBI = BranchInst::Create(Target, Source);
+ NewBI->setDebugLoc(BI->getDebugLoc());
BI->eraseFromParent();
- BranchInst::Create(Target, Source);
+
if (DTU)
DTU->applyUpdates({{DominatorTree::Delete, Source, Other}});
if (pred_empty(Other))
diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/preserving-debugloc-br.ll b/llvm/test/Transforms/LowerConstantIntrinsics/preserving-debugloc-br.ll
new file mode 100644
index 0000000000000..0302c9d6435ca
--- /dev/null
+++ b/llvm/test/Transforms/LowerConstantIntrinsics/preserving-debugloc-br.ll
@@ -0,0 +1,48 @@
+; RUN: opt -S -passes=lower-constant-intrinsics < %s | FileCheck %s
+
+; Check that LowerConstantIntrinsics's replaceConditionalBranchesOnConstant() correctly
+; propagates the debug location from the old br instruction to the new one.
+
+; Function Attrs: nounwind
+define i32 @test_branch(i32 %in) !dbg !5 {
+; CHECK-LABEL: define i32 @test_branch(
+; CHECK: br label %[[FALSE:.*]], !dbg [[DBG8:![0-9]+]]
+; CHECK: [[FALSE]]:
+;
+ %v = call i1 @llvm.is.constant.i32(i32 %in), !dbg !8
+ br i1 %v, label %True, label %False, !dbg !9
+
+True: ; preds = %0
+ %call1 = tail call i32 @subfun_1(), !dbg !10
+ ret i32 %call1, !dbg !11
+
+False: ; preds = %0
+ %call2 = tail call i32 @subfun_2(), !dbg !12
+ ret i32 %call2, !dbg !13
+}
+
+declare i32 @subfun_1()
+declare i32 @subfun_2()
+
+declare i1 @llvm.is.constant.i32(i32)
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!2, !3}
+!llvm.module.flags = !{!4}
+
+; CHECK: [[DBG8]] = !DILocation(line: 2,
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = !DIFile(filename: "main.ll", directory: "/")
+!2 = !{i32 6}
+!3 = !{i32 0}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "test_branch", linkageName: "test_branch", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!6 = !DISubroutineType(types: !7)
+!7 = !{}
+!8 = !DILocation(line: 1, column: 1, scope: !5)
+!9 = !DILocation(line: 2, column: 1, scope: !5)
+!10 = !DILocation(line: 3, column: 1, scope: !5)
+!11 = !DILocation(line: 4, column: 1, scope: !5)
+!12 = !DILocation(line: 5, column: 1, scope: !5)
+!13 = !DILocation(line: 6, column: 1, scope: !5)
>From e0f4d27a50c6b524d6cf24c347038a18112ce517 Mon Sep 17 00:00:00 2001
From: Shan Huang <52285902006 at stu.ecnu.edu.cn>
Date: Thu, 4 Jul 2024 21:06:44 +0800
Subject: [PATCH 245/246] [DebugInfo][LoopFlatten] Fix missing debug location
update for new br instruction (#97085)
Fix #97084 .
---
llvm/lib/Transforms/Scalar/LoopFlatten.cpp | 6 +-
.../LoopFlatten/preserving_debugloc_br.ll | 74 +++++++++++++++++++
2 files changed, 78 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/Transforms/LoopFlatten/preserving_debugloc_br.ll
diff --git a/llvm/lib/Transforms/Scalar/LoopFlatten.cpp b/llvm/lib/Transforms/Scalar/LoopFlatten.cpp
index 715f63b836c68..d5e91d3c1decf 100644
--- a/llvm/lib/Transforms/Scalar/LoopFlatten.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopFlatten.cpp
@@ -783,8 +783,10 @@ static bool DoFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
// Replace the inner loop backedge with an unconditional branch to the exit.
BasicBlock *InnerExitBlock = FI.InnerLoop->getExitBlock();
BasicBlock *InnerExitingBlock = FI.InnerLoop->getExitingBlock();
- InnerExitingBlock->getTerminator()->eraseFromParent();
- BranchInst::Create(InnerExitBlock, InnerExitingBlock);
+ Instruction *Term = InnerExitingBlock->getTerminator();
+ Instruction *BI = BranchInst::Create(InnerExitBlock, InnerExitingBlock);
+ BI->setDebugLoc(Term->getDebugLoc());
+ Term->eraseFromParent();
// Update the DomTree and MemorySSA.
DT->deleteEdge(InnerExitingBlock, FI.InnerLoop->getHeader());
diff --git a/llvm/test/Transforms/LoopFlatten/preserving_debugloc_br.ll b/llvm/test/Transforms/LoopFlatten/preserving_debugloc_br.ll
new file mode 100644
index 0000000000000..eb15c3eecf7da
--- /dev/null
+++ b/llvm/test/Transforms/LoopFlatten/preserving_debugloc_br.ll
@@ -0,0 +1,74 @@
+; RUN: opt -S -passes="loop(loop-flatten)" < %s | FileCheck %s
+
+; Check that LoopFlatten's DoFlattenLoopPair() propagates the debug location of the
+; original terminator to the new branch instruction.
+
+define i32 @test1(i32 %val, ptr nocapture %A) !dbg !5 {
+; CHECK-LABEL: define i32 @test1(
+; CHECK-LABEL: for.body3:
+; CHECK: br label %for.inc6, !dbg [[DBG22:![0-9]+]]
+; CHECK-LABEL: for.inc6:
+;
+entry:
+ br label %for.body, !dbg !8
+
+for.body: ; preds = %for.inc6, %entry
+ %i.018 = phi i32 [ 0, %entry ], [ %inc7, %for.inc6 ], !dbg !9
+ %mul = mul nuw nsw i32 %i.018, 20, !dbg !10
+ br label %for.body3, !dbg !11
+
+for.body3: ; preds = %for.body3, %for.body
+ %j.017 = phi i32 [ 0, %for.body ], [ %inc, %for.body3 ], !dbg !12
+ %add = add nuw nsw i32 %j.017, %mul, !dbg !13
+ %arrayidx = getelementptr inbounds i16, ptr %A, i32 %add, !dbg !14
+ %0 = load i16, ptr %arrayidx, align 2, !dbg !15
+ %conv16 = zext i16 %0 to i32, !dbg !16
+ %add4 = add i32 %conv16, %val, !dbg !17
+ %conv5 = trunc i32 %add4 to i16, !dbg !18
+ store i16 %conv5, ptr %arrayidx, align 2, !dbg !19
+ %inc = add nuw nsw i32 %j.017, 1, !dbg !20
+ %exitcond = icmp ne i32 %inc, 20, !dbg !21
+ br i1 %exitcond, label %for.body3, label %for.inc6, !dbg !22
+
+for.inc6: ; preds = %for.body3
+ %inc7 = add nuw nsw i32 %i.018, 1, !dbg !23
+ %exitcond19 = icmp ne i32 %inc7, 10, !dbg !24
+ br i1 %exitcond19, label %for.body, label %for.end8, !dbg !25
+
+for.end8: ; preds = %for.inc6
+ ret i32 10, !dbg !26
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!2, !3}
+!llvm.module.flags = !{!4}
+
+; CHECK: [[DBG22]] = !DILocation(line: 15,
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = !DIFile(filename: "temp.ll", directory: "/")
+!2 = !{i32 19}
+!3 = !{i32 0}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "test1", linkageName: "test1", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!6 = !DISubroutineType(types: !7)
+!7 = !{}
+!8 = !DILocation(line: 1, column: 1, scope: !5)
+!9 = !DILocation(line: 2, column: 1, scope: !5)
+!10 = !DILocation(line: 3, column: 1, scope: !5)
+!11 = !DILocation(line: 4, column: 1, scope: !5)
+!12 = !DILocation(line: 5, column: 1, scope: !5)
+!13 = !DILocation(line: 6, column: 1, scope: !5)
+!14 = !DILocation(line: 7, column: 1, scope: !5)
+!15 = !DILocation(line: 8, column: 1, scope: !5)
+!16 = !DILocation(line: 9, column: 1, scope: !5)
+!17 = !DILocation(line: 10, column: 1, scope: !5)
+!18 = !DILocation(line: 11, column: 1, scope: !5)
+!19 = !DILocation(line: 12, column: 1, scope: !5)
+!20 = !DILocation(line: 13, column: 1, scope: !5)
+!21 = !DILocation(line: 14, column: 1, scope: !5)
+!22 = !DILocation(line: 15, column: 1, scope: !5)
+!23 = !DILocation(line: 16, column: 1, scope: !5)
+!24 = !DILocation(line: 17, column: 1, scope: !5)
+!25 = !DILocation(line: 18, column: 1, scope: !5)
+!26 = !DILocation(line: 19, column: 1, scope: !5)
>From d26d2f06826d3a92499980bf818ccd18cbdc89fe Mon Sep 17 00:00:00 2001
From: Sergio Afonso <safonsof at amd.com>
Date: Wed, 3 Jul 2024 11:59:23 +0100
Subject: [PATCH 246/246] [Flang][OpenMP] NFC: Remove unused argument for
omp.target lowering
This patch removes the `outerCombined` argument from `genTargetOp()` and the
`processReduction` argument from `genTargetClauses()`, as they aren't used.
---
flang/lib/Lower/OpenMP/OpenMP.cpp | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index 9ad092a1a00bd..5b2cf7930e4a3 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -1136,7 +1136,7 @@ static void genSingleClauses(lower::AbstractConverter &converter,
static void genTargetClauses(
lower::AbstractConverter &converter, semantics::SemanticsContext &semaCtx,
lower::StatementContext &stmtCtx, const List<Clause> &clauses,
- mlir::Location loc, bool processHostOnlyClauses, bool processReduction,
+ mlir::Location loc, bool processHostOnlyClauses,
mlir::omp::TargetClauseOps &clauseOps,
llvm::SmallVectorImpl<const semantics::Symbol *> &mapSyms,
llvm::SmallVectorImpl<mlir::Location> &mapLocs,
@@ -1678,7 +1678,7 @@ static mlir::omp::TargetOp
genTargetOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval,
mlir::Location loc, const ConstructQueue &queue,
- ConstructQueue::iterator item, bool outerCombined = false) {
+ ConstructQueue::iterator item) {
fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
lower::StatementContext stmtCtx;
@@ -1692,10 +1692,9 @@ genTargetOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
llvm::SmallVector<mlir::Location> mapLocs, devicePtrLocs, deviceAddrLocs;
llvm::SmallVector<mlir::Type> mapTypes, devicePtrTypes, deviceAddrTypes;
genTargetClauses(converter, semaCtx, stmtCtx, item->clauses, loc,
- processHostOnlyClauses, /*processReduction=*/outerCombined,
- clauseOps, mapSyms, mapLocs, mapTypes, deviceAddrSyms,
- deviceAddrLocs, deviceAddrTypes, devicePtrSyms,
- devicePtrLocs, devicePtrTypes);
+ processHostOnlyClauses, clauseOps, mapSyms, mapLocs,
+ mapTypes, deviceAddrSyms, deviceAddrLocs, deviceAddrTypes,
+ devicePtrSyms, devicePtrLocs, devicePtrTypes);
llvm::SmallVector<const semantics::Symbol *> privateSyms;
DataSharingProcessor dsp(converter, semaCtx, item->clauses, eval,
@@ -2101,8 +2100,7 @@ static void genOMPDispatch(lower::AbstractConverter &converter,
genSingleOp(converter, symTable, semaCtx, eval, loc, queue, item);
break;
case llvm::omp::Directive::OMPD_target:
- genTargetOp(converter, symTable, semaCtx, eval, loc, queue, item,
- /*outerCombined=*/false);
+ genTargetOp(converter, symTable, semaCtx, eval, loc, queue, item);
break;
case llvm::omp::Directive::OMPD_target_data:
genTargetDataOp(converter, symTable, semaCtx, eval, loc, queue, item);
More information about the llvm-branch-commits
mailing list