[llvm] [NVPTX] Add NVPTXIncreaseAligmentPass to improve vectorization (PR #144958)
Alex MacLean via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 25 07:52:38 PDT 2025
https://github.com/AlexMaclean updated https://github.com/llvm/llvm-project/pull/144958
>From 0e92b678df767c463fb5e7f78aaf2b6d26088b49 Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Thu, 19 Jun 2025 15:29:12 +0000
Subject: [PATCH 1/3] [NVPTX] Add NVPTXIncreaseAligmentPass to improve
vectorization
---
llvm/lib/Target/NVPTX/CMakeLists.txt | 1 +
llvm/lib/Target/NVPTX/NVPTX.h | 7 +
.../Target/NVPTX/NVPTXIncreaseAlignment.cpp | 131 ++++++++++++++++++
llvm/lib/Target/NVPTX/NVPTXPassRegistry.def | 1 +
llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp | 2 +
.../CodeGen/NVPTX/call-with-alloca-buffer.ll | 2 +-
.../CodeGen/NVPTX/increase-local-align.ll | 85 ++++++++++++
7 files changed, 228 insertions(+), 1 deletion(-)
create mode 100644 llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp
create mode 100644 llvm/test/CodeGen/NVPTX/increase-local-align.ll
diff --git a/llvm/lib/Target/NVPTX/CMakeLists.txt b/llvm/lib/Target/NVPTX/CMakeLists.txt
index 693f0d0b35edc..9d91100d35b3a 100644
--- a/llvm/lib/Target/NVPTX/CMakeLists.txt
+++ b/llvm/lib/Target/NVPTX/CMakeLists.txt
@@ -26,6 +26,7 @@ set(NVPTXCodeGen_sources
NVPTXISelLowering.cpp
NVPTXLowerAggrCopies.cpp
NVPTXLowerAlloca.cpp
+ NVPTXIncreaseAlignment.cpp
NVPTXLowerArgs.cpp
NVPTXLowerUnreachable.cpp
NVPTXMCExpr.cpp
diff --git a/llvm/lib/Target/NVPTX/NVPTX.h b/llvm/lib/Target/NVPTX/NVPTX.h
index b7c5a0a5c9983..a0d5cc5fd8e87 100644
--- a/llvm/lib/Target/NVPTX/NVPTX.h
+++ b/llvm/lib/Target/NVPTX/NVPTX.h
@@ -55,6 +55,7 @@ FunctionPass *createNVPTXTagInvariantLoadsPass();
MachineFunctionPass *createNVPTXPeephole();
MachineFunctionPass *createNVPTXProxyRegErasurePass();
MachineFunctionPass *createNVPTXForwardParamsPass();
+FunctionPass *createNVPTXIncreaseLocalAlignmentPass();
void initializeNVVMReflectLegacyPassPass(PassRegistry &);
void initializeGenericToNVVMLegacyPassPass(PassRegistry &);
@@ -76,6 +77,7 @@ void initializeNVPTXAAWrapperPassPass(PassRegistry &);
void initializeNVPTXExternalAAWrapperPass(PassRegistry &);
void initializeNVPTXPeepholePass(PassRegistry &);
void initializeNVPTXTagInvariantLoadLegacyPassPass(PassRegistry &);
+void initializeNVPTXIncreaseLocalAlignmentLegacyPassPass(PassRegistry &);
struct NVVMIntrRangePass : PassInfoMixin<NVVMIntrRangePass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
@@ -111,6 +113,11 @@ struct NVPTXTagInvariantLoadsPass : PassInfoMixin<NVPTXTagInvariantLoadsPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
+struct NVPTXIncreaseLocalAlignmentPass
+ : PassInfoMixin<NVPTXIncreaseLocalAlignmentPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
namespace NVPTX {
enum DrvInterface {
NVCL,
diff --git a/llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp b/llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp
new file mode 100644
index 0000000000000..4078ef340970f
--- /dev/null
+++ b/llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp
@@ -0,0 +1,131 @@
+//===-- NVPTXIncreaseAlignment.cpp - Increase alignment for local arrays --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A simple pass that looks at local memory arrays that are statically
+// sized and sets an appropriate alignment for them. This enables vectorization
+// of loads/stores to these arrays if not explicitly specified by the client.
+//
+// TODO: Ideally we should do a bin-packing of local arrays to maximize
+// alignments while minimizing holes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTX.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/MathExtras.h"
+
+using namespace llvm;
+
+static cl::opt<bool>
+ MaxLocalArrayAlignment("nvptx-use-max-local-array-alignment",
+ cl::init(false), cl::Hidden,
+ cl::desc("Use maximum alignment for local memory"));
+
+static constexpr Align MaxPTXArrayAlignment = Align::Constant<16>();
+
+/// Get the maximum useful alignment for an array. This is more likely to
+/// produce holes in the local memory.
+///
+/// Choose an alignment large enough that the entire array could be loaded with
+/// a single vector load (if possible). Cap the alignment at
+/// MaxPTXArrayAlignment.
+static Align getAggressiveArrayAlignment(const unsigned ArraySize) {
+ return std::min(MaxPTXArrayAlignment, Align(PowerOf2Ceil(ArraySize)));
+}
+
+/// Get the alignment of arrays that reduces the chances of leaving holes when
+/// arrays are allocated within a contiguous memory buffer (like shared memory
+/// and stack). Holes are still possible before and after the array allocation.
+///
+/// Choose the largest alignment such that the array size is a multiple of the
+/// alignment. If all elements of the buffer are allocated in order of
+/// alignment (higher to lower) no holes will be left.
+static Align getConservativeArrayAlignment(const unsigned ArraySize) {
+ return commonAlignment(MaxPTXArrayAlignment, ArraySize);
+}
+
+/// Find a better alignment for local arrays
+static bool updateAllocaAlignment(const DataLayout &DL, AllocaInst *Alloca) {
+ // Looking for statically sized local arrays
+ if (!Alloca->isStaticAlloca())
+ return false;
+
+ // For now, we only support array allocas
+ if (!(Alloca->isArrayAllocation() || Alloca->getAllocatedType()->isArrayTy()))
+ return false;
+
+ const auto ArraySize = Alloca->getAllocationSize(DL);
+ if (!(ArraySize && ArraySize->isFixed()))
+ return false;
+
+ const auto ArraySizeValue = ArraySize->getFixedValue();
+ const Align PreferredAlignment =
+ MaxLocalArrayAlignment ? getAggressiveArrayAlignment(ArraySizeValue)
+ : getConservativeArrayAlignment(ArraySizeValue);
+
+ if (PreferredAlignment > Alloca->getAlign()) {
+ Alloca->setAlignment(PreferredAlignment);
+ return true;
+ }
+
+ return false;
+}
+
+static bool runSetLocalArrayAlignment(Function &F) {
+ bool Changed = false;
+ const DataLayout &DL = F.getParent()->getDataLayout();
+
+ BasicBlock &EntryBB = F.getEntryBlock();
+ for (Instruction &I : EntryBB)
+ if (AllocaInst *Alloca = dyn_cast<AllocaInst>(&I))
+ Changed |= updateAllocaAlignment(DL, Alloca);
+
+ return Changed;
+}
+
+namespace {
+struct NVPTXIncreaseLocalAlignmentLegacyPass : public FunctionPass {
+ static char ID;
+ NVPTXIncreaseLocalAlignmentLegacyPass() : FunctionPass(ID) {}
+
+ bool runOnFunction(Function &F) override;
+ StringRef getPassName() const override {
+ return "NVPTX Increase Local Alignment";
+ }
+};
+} // namespace
+
+char NVPTXIncreaseLocalAlignmentLegacyPass::ID = 0;
+INITIALIZE_PASS(NVPTXIncreaseLocalAlignmentLegacyPass,
+ "nvptx-increase-local-alignment",
+ "Increase alignment for statically sized alloca arrays", false,
+ false)
+
+FunctionPass *llvm::createNVPTXIncreaseLocalAlignmentPass() {
+ return new NVPTXIncreaseLocalAlignmentLegacyPass();
+}
+
+bool NVPTXIncreaseLocalAlignmentLegacyPass::runOnFunction(Function &F) {
+ return runSetLocalArrayAlignment(F);
+}
+
+PreservedAnalyses
+NVPTXIncreaseLocalAlignmentPass::run(Function &F, FunctionAnalysisManager &AM) {
+ bool Changed = runSetLocalArrayAlignment(F);
+
+ if (!Changed)
+ return PreservedAnalyses::all();
+
+ PreservedAnalyses PA;
+ PA.preserveSet<CFGAnalyses>();
+ return PA;
+}
diff --git a/llvm/lib/Target/NVPTX/NVPTXPassRegistry.def b/llvm/lib/Target/NVPTX/NVPTXPassRegistry.def
index ee37c9826012c..827cb7bba7018 100644
--- a/llvm/lib/Target/NVPTX/NVPTXPassRegistry.def
+++ b/llvm/lib/Target/NVPTX/NVPTXPassRegistry.def
@@ -40,4 +40,5 @@ FUNCTION_PASS("nvvm-intr-range", NVVMIntrRangePass())
FUNCTION_PASS("nvptx-copy-byval-args", NVPTXCopyByValArgsPass())
FUNCTION_PASS("nvptx-lower-args", NVPTXLowerArgsPass(*this))
FUNCTION_PASS("nvptx-tag-invariant-loads", NVPTXTagInvariantLoadsPass())
+FUNCTION_PASS("nvptx-increase-local-alignment", NVPTXIncreaseLocalAlignmentPass())
#undef FUNCTION_PASS
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index ef310e5828f22..c4b629514087e 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -392,6 +392,8 @@ void NVPTXPassConfig::addIRPasses() {
// but EarlyCSE can do neither of them.
if (getOptLevel() != CodeGenOptLevel::None) {
addEarlyCSEOrGVNPass();
+ // Increase alignment for local arrays to improve vectorization.
+ addPass(createNVPTXIncreaseLocalAlignmentPass());
if (!DisableLoadStoreVectorizer)
addPass(createLoadStoreVectorizerPass());
addPass(createSROAPass());
diff --git a/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll b/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
index a2175dd009f5f..799016390424a 100644
--- a/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
+++ b/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
@@ -20,7 +20,7 @@ define ptx_kernel void @kernel_func(ptr %a) {
entry:
%buf = alloca [16 x i8], align 4
-; CHECK: .local .align 4 .b8 __local_depot0[16]
+; CHECK: .local .align 16 .b8 __local_depot0[16]
; CHECK: mov.b64 %SPL
; CHECK: ld.param.b64 %rd[[A_REG:[0-9]+]], [kernel_func_param_0]
diff --git a/llvm/test/CodeGen/NVPTX/increase-local-align.ll b/llvm/test/CodeGen/NVPTX/increase-local-align.ll
new file mode 100644
index 0000000000000..605c4b5b2b77d
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/increase-local-align.ll
@@ -0,0 +1,85 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=nvptx-increase-local-alignment < %s | FileCheck %s --check-prefixes=COMMON,DEFAULT
+; RUN: opt -S -passes=nvptx-increase-local-alignment -nvptx-use-max-local-array-alignment < %s | FileCheck %s --check-prefixes=COMMON,MAX
+target triple = "nvptx64-nvidia-cuda"
+
+define void @test1() {
+; COMMON-LABEL: define void @test1() {
+; COMMON-NEXT: [[A:%.*]] = alloca i8, align 1
+; COMMON-NEXT: ret void
+;
+ %a = alloca i8, align 1
+ ret void
+}
+
+define void @test2() {
+; DEFAULT-LABEL: define void @test2() {
+; DEFAULT-NEXT: [[A:%.*]] = alloca [63 x i8], align 1
+; DEFAULT-NEXT: ret void
+;
+; MAX-LABEL: define void @test2() {
+; MAX-NEXT: [[A:%.*]] = alloca [63 x i8], align 16
+; MAX-NEXT: ret void
+;
+ %a = alloca [63 x i8], align 1
+ ret void
+}
+
+define void @test3() {
+; COMMON-LABEL: define void @test3() {
+; COMMON-NEXT: [[A:%.*]] = alloca [64 x i8], align 16
+; COMMON-NEXT: ret void
+;
+ %a = alloca [64 x i8], align 1
+ ret void
+}
+
+define void @test4() {
+; DEFAULT-LABEL: define void @test4() {
+; DEFAULT-NEXT: [[A:%.*]] = alloca i8, i32 63, align 1
+; DEFAULT-NEXT: ret void
+;
+; MAX-LABEL: define void @test4() {
+; MAX-NEXT: [[A:%.*]] = alloca i8, i32 63, align 16
+; MAX-NEXT: ret void
+;
+ %a = alloca i8, i32 63, align 1
+ ret void
+}
+
+define void @test5() {
+; COMMON-LABEL: define void @test5() {
+; COMMON-NEXT: [[A:%.*]] = alloca i8, i32 64, align 16
+; COMMON-NEXT: ret void
+;
+ %a = alloca i8, i32 64, align 1
+ ret void
+}
+
+define void @test6() {
+; COMMON-LABEL: define void @test6() {
+; COMMON-NEXT: [[A:%.*]] = alloca i8, align 32
+; COMMON-NEXT: ret void
+;
+ %a = alloca i8, align 32
+ ret void
+}
+
+define void @test7() {
+; COMMON-LABEL: define void @test7() {
+; COMMON-NEXT: [[A:%.*]] = alloca i32, align 2
+; COMMON-NEXT: ret void
+;
+ %a = alloca i32, align 2
+ ret void
+}
+
+define void @test8() {
+; COMMON-LABEL: define void @test8() {
+; COMMON-NEXT: [[A:%.*]] = alloca [2 x i32], align 8
+; COMMON-NEXT: ret void
+;
+ %a = alloca [2 x i32], align 2
+ ret void
+}
+
>From 791bb1d48770527ec3d3a3979c0c29d1e8776824 Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Wed, 25 Jun 2025 03:05:39 +0000
Subject: [PATCH 2/3] address comments
---
.../Target/NVPTX/NVPTXIncreaseAlignment.cpp | 52 +++++++++++++------
1 file changed, 35 insertions(+), 17 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp b/llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp
index 4078ef340970f..f6048b25db91e 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
//
// A simple pass that looks at local memory arrays that are statically
-// sized and sets an appropriate alignment for them. This enables vectorization
+// sized and potentially increases their alignment. This enables vectorization
// of loads/stores to these arrays if not explicitly specified by the client.
//
// TODO: Ideally we should do a bin-packing of local arrays to maximize
@@ -16,12 +16,15 @@
//===----------------------------------------------------------------------===//
#include "NVPTX.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/NVPTXAddrSpace.h"
using namespace llvm;
@@ -30,7 +33,20 @@ static cl::opt<bool>
cl::init(false), cl::Hidden,
cl::desc("Use maximum alignment for local memory"));
-static constexpr Align MaxPTXArrayAlignment = Align::Constant<16>();
+namespace {
+struct NVPTXIncreaseLocalAlignment {
+ const Align MaxAlign;
+
+ NVPTXIncreaseLocalAlignment(const TargetTransformInfo &TTI)
+ : MaxAlign(TTI.getLoadStoreVecRegBitWidth(NVPTXAS::ADDRESS_SPACE_LOCAL)) {
+ }
+
+ bool run(Function &F);
+ bool updateAllocaAlignment(AllocaInst *Alloca, const DataLayout &DL);
+ Align getAggressiveArrayAlignment(unsigned ArraySize);
+ Align getConservativeArrayAlignment(unsigned ArraySize);
+};
+} // namespace
/// Get the maximum useful alignment for an array. This is more likely to
/// produce holes in the local memory.
@@ -38,8 +54,9 @@ static constexpr Align MaxPTXArrayAlignment = Align::Constant<16>();
/// Choose an alignment large enough that the entire array could be loaded with
/// a single vector load (if possible). Cap the alignment at
/// MaxPTXArrayAlignment.
-static Align getAggressiveArrayAlignment(const unsigned ArraySize) {
- return std::min(MaxPTXArrayAlignment, Align(PowerOf2Ceil(ArraySize)));
+Align NVPTXIncreaseLocalAlignment::getAggressiveArrayAlignment(
+ const unsigned ArraySize) {
+ return std::min(MaxAlign, Align(PowerOf2Ceil(ArraySize)));
}
/// Get the alignment of arrays that reduces the chances of leaving holes when
@@ -49,20 +66,18 @@ static Align getAggressiveArrayAlignment(const unsigned ArraySize) {
/// Choose the largest alignment such that the array size is a multiple of the
/// alignment. If all elements of the buffer are allocated in order of
/// alignment (higher to lower) no holes will be left.
-static Align getConservativeArrayAlignment(const unsigned ArraySize) {
- return commonAlignment(MaxPTXArrayAlignment, ArraySize);
+Align NVPTXIncreaseLocalAlignment::getConservativeArrayAlignment(
+ const unsigned ArraySize) {
+ return commonAlignment(MaxAlign, ArraySize);
}
/// Find a better alignment for local arrays
-static bool updateAllocaAlignment(const DataLayout &DL, AllocaInst *Alloca) {
+bool NVPTXIncreaseLocalAlignment::updateAllocaAlignment(AllocaInst *Alloca,
+ const DataLayout &DL) {
// Looking for statically sized local arrays
if (!Alloca->isStaticAlloca())
return false;
- // For now, we only support array allocas
- if (!(Alloca->isArrayAllocation() || Alloca->getAllocatedType()->isArrayTy()))
- return false;
-
const auto ArraySize = Alloca->getAllocationSize(DL);
if (!(ArraySize && ArraySize->isFixed()))
return false;
@@ -80,14 +95,14 @@ static bool updateAllocaAlignment(const DataLayout &DL, AllocaInst *Alloca) {
return false;
}
-static bool runSetLocalArrayAlignment(Function &F) {
+bool NVPTXIncreaseLocalAlignment::run(Function &F) {
bool Changed = false;
- const DataLayout &DL = F.getParent()->getDataLayout();
+ const auto &DL = F.getParent()->getDataLayout();
BasicBlock &EntryBB = F.getEntryBlock();
for (Instruction &I : EntryBB)
if (AllocaInst *Alloca = dyn_cast<AllocaInst>(&I))
- Changed |= updateAllocaAlignment(DL, Alloca);
+ Changed |= updateAllocaAlignment(Alloca, DL);
return Changed;
}
@@ -115,12 +130,15 @@ FunctionPass *llvm::createNVPTXIncreaseLocalAlignmentPass() {
}
bool NVPTXIncreaseLocalAlignmentLegacyPass::runOnFunction(Function &F) {
- return runSetLocalArrayAlignment(F);
+ const auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+ return NVPTXIncreaseLocalAlignment(TTI).run(F);
}
PreservedAnalyses
-NVPTXIncreaseLocalAlignmentPass::run(Function &F, FunctionAnalysisManager &AM) {
- bool Changed = runSetLocalArrayAlignment(F);
+NVPTXIncreaseLocalAlignmentPass::run(Function &F,
+ FunctionAnalysisManager &FAM) {
+ const auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
+ bool Changed = NVPTXIncreaseLocalAlignment(TTI).run(F);
if (!Changed)
return PreservedAnalyses::all();
>From 755ef490d3b7c2879c74c4f82c27a3476481f327 Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Wed, 25 Jun 2025 14:52:23 +0000
Subject: [PATCH 3/3] address comments
---
.../Target/NVPTX/NVPTXIncreaseAlignment.cpp | 12 +-
.../CodeGen/NVPTX/increase-local-align.ll | 2 +-
llvm/test/CodeGen/NVPTX/lower-byval-args.ll | 14 +-
llvm/test/CodeGen/NVPTX/variadics-backend.ll | 12 +-
o.txt | 711 ++++++++++++++++++
5 files changed, 735 insertions(+), 16 deletions(-)
create mode 100644 o.txt
diff --git a/llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp b/llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp
index f6048b25db91e..1fb1e578994e9 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXIncreaseAlignment.cpp
@@ -33,13 +33,18 @@ static cl::opt<bool>
cl::init(false), cl::Hidden,
cl::desc("Use maximum alignment for local memory"));
+static Align getMaxLocalArrayAlignment(const TargetTransformInfo &TTI) {
+ const unsigned MaxBitWidth =
+ TTI.getLoadStoreVecRegBitWidth(NVPTXAS::ADDRESS_SPACE_LOCAL);
+ return Align(MaxBitWidth / 8);
+}
+
namespace {
struct NVPTXIncreaseLocalAlignment {
const Align MaxAlign;
NVPTXIncreaseLocalAlignment(const TargetTransformInfo &TTI)
- : MaxAlign(TTI.getLoadStoreVecRegBitWidth(NVPTXAS::ADDRESS_SPACE_LOCAL)) {
- }
+ : MaxAlign(getMaxLocalArrayAlignment(TTI)) {}
bool run(Function &F);
bool updateAllocaAlignment(AllocaInst *Alloca, const DataLayout &DL);
@@ -113,6 +118,9 @@ struct NVPTXIncreaseLocalAlignmentLegacyPass : public FunctionPass {
NVPTXIncreaseLocalAlignmentLegacyPass() : FunctionPass(ID) {}
bool runOnFunction(Function &F) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ }
StringRef getPassName() const override {
return "NVPTX Increase Local Alignment";
}
diff --git a/llvm/test/CodeGen/NVPTX/increase-local-align.ll b/llvm/test/CodeGen/NVPTX/increase-local-align.ll
index 605c4b5b2b77d..3dddcf384b81c 100644
--- a/llvm/test/CodeGen/NVPTX/increase-local-align.ll
+++ b/llvm/test/CodeGen/NVPTX/increase-local-align.ll
@@ -67,7 +67,7 @@ define void @test6() {
define void @test7() {
; COMMON-LABEL: define void @test7() {
-; COMMON-NEXT: [[A:%.*]] = alloca i32, align 2
+; COMMON-NEXT: [[A:%.*]] = alloca i32, align 4
; COMMON-NEXT: ret void
;
%a = alloca i32, align 2
diff --git a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
index 54495cf0d61f3..5e5888aac3ceb 100644
--- a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
+++ b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
@@ -135,7 +135,7 @@ define dso_local ptx_kernel void @escape_ptr(ptr nocapture noundef readnone %out
;
; PTX-LABEL: escape_ptr(
; PTX: {
-; PTX-NEXT: .local .align 4 .b8 __local_depot2[8];
+; PTX-NEXT: .local .align 8 .b8 __local_depot2[8];
; PTX-NEXT: .reg .b64 %SP;
; PTX-NEXT: .reg .b64 %SPL;
; PTX-NEXT: .reg .b32 %r<3>;
@@ -179,7 +179,7 @@ define dso_local ptx_kernel void @escape_ptr_gep(ptr nocapture noundef readnone
;
; PTX-LABEL: escape_ptr_gep(
; PTX: {
-; PTX-NEXT: .local .align 4 .b8 __local_depot3[8];
+; PTX-NEXT: .local .align 8 .b8 __local_depot3[8];
; PTX-NEXT: .reg .b64 %SP;
; PTX-NEXT: .reg .b64 %SPL;
; PTX-NEXT: .reg .b32 %r<3>;
@@ -194,7 +194,7 @@ define dso_local ptx_kernel void @escape_ptr_gep(ptr nocapture noundef readnone
; PTX-NEXT: st.local.b32 [%rd2+4], %r1;
; PTX-NEXT: ld.param.b32 %r2, [escape_ptr_gep_param_1];
; PTX-NEXT: st.local.b32 [%rd2], %r2;
-; PTX-NEXT: add.s64 %rd3, %rd1, 4;
+; PTX-NEXT: or.b64 %rd3, %rd1, 4;
; PTX-NEXT: { // callseq 1, 0
; PTX-NEXT: .param .b64 param0;
; PTX-NEXT: st.param.b64 [param0], %rd3;
@@ -224,7 +224,7 @@ define dso_local ptx_kernel void @escape_ptr_store(ptr nocapture noundef writeon
;
; PTX-LABEL: escape_ptr_store(
; PTX: {
-; PTX-NEXT: .local .align 4 .b8 __local_depot4[8];
+; PTX-NEXT: .local .align 8 .b8 __local_depot4[8];
; PTX-NEXT: .reg .b64 %SP;
; PTX-NEXT: .reg .b64 %SPL;
; PTX-NEXT: .reg .b32 %r<3>;
@@ -262,7 +262,7 @@ define dso_local ptx_kernel void @escape_ptr_gep_store(ptr nocapture noundef wri
;
; PTX-LABEL: escape_ptr_gep_store(
; PTX: {
-; PTX-NEXT: .local .align 4 .b8 __local_depot5[8];
+; PTX-NEXT: .local .align 8 .b8 __local_depot5[8];
; PTX-NEXT: .reg .b64 %SP;
; PTX-NEXT: .reg .b64 %SPL;
; PTX-NEXT: .reg .b32 %r<3>;
@@ -279,7 +279,7 @@ define dso_local ptx_kernel void @escape_ptr_gep_store(ptr nocapture noundef wri
; PTX-NEXT: st.local.b32 [%rd4+4], %r1;
; PTX-NEXT: ld.param.b32 %r2, [escape_ptr_gep_store_param_1];
; PTX-NEXT: st.local.b32 [%rd4], %r2;
-; PTX-NEXT: add.s64 %rd5, %rd3, 4;
+; PTX-NEXT: or.b64 %rd5, %rd3, 4;
; PTX-NEXT: st.global.b64 [%rd2], %rd5;
; PTX-NEXT: ret;
entry:
@@ -302,7 +302,7 @@ define dso_local ptx_kernel void @escape_ptrtoint(ptr nocapture noundef writeonl
;
; PTX-LABEL: escape_ptrtoint(
; PTX: {
-; PTX-NEXT: .local .align 4 .b8 __local_depot6[8];
+; PTX-NEXT: .local .align 8 .b8 __local_depot6[8];
; PTX-NEXT: .reg .b64 %SP;
; PTX-NEXT: .reg .b64 %SPL;
; PTX-NEXT: .reg .b32 %r<3>;
diff --git a/llvm/test/CodeGen/NVPTX/variadics-backend.ll b/llvm/test/CodeGen/NVPTX/variadics-backend.ll
index ddaa9fd831af7..d83b303d1f0db 100644
--- a/llvm/test/CodeGen/NVPTX/variadics-backend.ll
+++ b/llvm/test/CodeGen/NVPTX/variadics-backend.ll
@@ -208,7 +208,7 @@ declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias
define dso_local i32 @bar() {
; CHECK-PTX-LABEL: bar(
; CHECK-PTX: {
-; CHECK-PTX-NEXT: .local .align 8 .b8 __local_depot3[24];
+; CHECK-PTX-NEXT: .local .align 16 .b8 __local_depot3[32];
; CHECK-PTX-NEXT: .reg .b64 %SP;
; CHECK-PTX-NEXT: .reg .b64 %SPL;
; CHECK-PTX-NEXT: .reg .b16 %rs<5>;
@@ -226,12 +226,12 @@ define dso_local i32 @bar() {
; CHECK-PTX-NEXT: ld.global.nc.b8 %rs3, [__const_$_bar_$_s1+5];
; CHECK-PTX-NEXT: st.local.b8 [%rd2], %rs3;
; CHECK-PTX-NEXT: mov.b32 %r1, 1;
-; CHECK-PTX-NEXT: st.b32 [%SP+8], %r1;
+; CHECK-PTX-NEXT: st.b32 [%SP+16], %r1;
; CHECK-PTX-NEXT: mov.b16 %rs4, 1;
-; CHECK-PTX-NEXT: st.b8 [%SP+12], %rs4;
+; CHECK-PTX-NEXT: st.b8 [%SP+20], %rs4;
; CHECK-PTX-NEXT: mov.b64 %rd3, 1;
-; CHECK-PTX-NEXT: st.b64 [%SP+16], %rd3;
-; CHECK-PTX-NEXT: add.u64 %rd4, %SP, 8;
+; CHECK-PTX-NEXT: st.b64 [%SP+24], %rd3;
+; CHECK-PTX-NEXT: add.u64 %rd4, %SP, 16;
; CHECK-PTX-NEXT: { // callseq 1, 0
; CHECK-PTX-NEXT: .param .b32 param0;
; CHECK-PTX-NEXT: st.param.b32 [param0], 1;
@@ -371,7 +371,7 @@ entry:
define dso_local void @qux() {
; CHECK-PTX-LABEL: qux(
; CHECK-PTX: {
-; CHECK-PTX-NEXT: .local .align 8 .b8 __local_depot7[24];
+; CHECK-PTX-NEXT: .local .align 16 .b8 __local_depot7[32];
; CHECK-PTX-NEXT: .reg .b64 %SP;
; CHECK-PTX-NEXT: .reg .b64 %SPL;
; CHECK-PTX-NEXT: .reg .b32 %r<3>;
diff --git a/o.txt b/o.txt
new file mode 100644
index 0000000000000..65d35c8f05c83
--- /dev/null
+++ b/o.txt
@@ -0,0 +1,711 @@
+//
+// Generated by LLVM NVPTX Back-End
+//
+
+.version 7.1
+.target sm_70
+.address_size 64
+
+ // .globl test_fadd // -- Begin function test_fadd
+.visible .global .align 2 .b8 bfloat_array[8] = {1, 2, 3, 4, 5, 6, 7, 8};
+ // @test_fadd
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_fadd(
+ .param .align 2 .b8 test_fadd_param_0[2],
+ .param .align 2 .b8 test_fadd_param_1[2]
+)
+{
+ .reg .pred %p<2>;
+ .reg .b32 %r<12>;
+
+// %bb.0:
+ ld.param.b16 %r1, [test_fadd_param_1];
+ shl.b32 %r2, %r1, 16;
+ ld.param.b16 %r3, [test_fadd_param_0];
+ shl.b32 %r4, %r3, 16;
+ add.rn.f32 %r5, %r4, %r2;
+ bfe.u32 %r6, %r5, 16, 1;
+ add.s32 %r7, %r6, %r5;
+ add.s32 %r8, %r7, 32767;
+ setp.nan.f32 %p1, %r5, %r5;
+ or.b32 %r9, %r5, 4194304;
+ selp.b32 %r10, %r9, %r8, %p1;
+ shr.u32 %r11, %r10, 16;
+ st.param.b16 [func_retval0], %r11;
+ ret;
+ // -- End function
+}
+ // .globl test_fsub // -- Begin function test_fsub
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_fsub(
+ .param .align 2 .b8 test_fsub_param_0[2],
+ .param .align 2 .b8 test_fsub_param_1[2]
+) // @test_fsub
+{
+ .reg .pred %p<2>;
+ .reg .b32 %r<12>;
+
+// %bb.0:
+ ld.param.b16 %r1, [test_fsub_param_1];
+ shl.b32 %r2, %r1, 16;
+ ld.param.b16 %r3, [test_fsub_param_0];
+ shl.b32 %r4, %r3, 16;
+ sub.rn.f32 %r5, %r4, %r2;
+ bfe.u32 %r6, %r5, 16, 1;
+ add.s32 %r7, %r6, %r5;
+ add.s32 %r8, %r7, 32767;
+ setp.nan.f32 %p1, %r5, %r5;
+ or.b32 %r9, %r5, 4194304;
+ selp.b32 %r10, %r9, %r8, %p1;
+ shr.u32 %r11, %r10, 16;
+ st.param.b16 [func_retval0], %r11;
+ ret;
+ // -- End function
+}
+ // .globl test_faddx2 // -- Begin function test_faddx2
+.visible .func (.param .align 4 .b8 func_retval0[4]) test_faddx2(
+ .param .align 4 .b8 test_faddx2_param_0[4],
+ .param .align 4 .b8 test_faddx2_param_1[4]
+) // @test_faddx2
+{
+ .reg .pred %p<3>;
+ .reg .b16 %rs<5>;
+ .reg .b32 %r<22>;
+
+// %bb.0:
+ ld.param.v2.b16 {%rs1, %rs2}, [test_faddx2_param_0];
+ ld.param.v2.b16 {%rs3, %rs4}, [test_faddx2_param_1];
+ cvt.u32.u16 %r1, %rs4;
+ shl.b32 %r2, %r1, 16;
+ cvt.u32.u16 %r3, %rs2;
+ shl.b32 %r4, %r3, 16;
+ add.rn.f32 %r5, %r4, %r2;
+ bfe.u32 %r6, %r5, 16, 1;
+ add.s32 %r7, %r6, %r5;
+ add.s32 %r8, %r7, 32767;
+ setp.nan.f32 %p1, %r5, %r5;
+ or.b32 %r9, %r5, 4194304;
+ selp.b32 %r10, %r9, %r8, %p1;
+ cvt.u32.u16 %r11, %rs3;
+ shl.b32 %r12, %r11, 16;
+ cvt.u32.u16 %r13, %rs1;
+ shl.b32 %r14, %r13, 16;
+ add.rn.f32 %r15, %r14, %r12;
+ bfe.u32 %r16, %r15, 16, 1;
+ add.s32 %r17, %r16, %r15;
+ add.s32 %r18, %r17, 32767;
+ setp.nan.f32 %p2, %r15, %r15;
+ or.b32 %r19, %r15, 4194304;
+ selp.b32 %r20, %r19, %r18, %p2;
+ prmt.b32 %r21, %r20, %r10, 0x7632U;
+ st.param.b32 [func_retval0], %r21;
+ ret;
+ // -- End function
+}
+ // .globl test_fsubx2 // -- Begin function test_fsubx2
+.visible .func (.param .align 4 .b8 func_retval0[4]) test_fsubx2(
+ .param .align 4 .b8 test_fsubx2_param_0[4],
+ .param .align 4 .b8 test_fsubx2_param_1[4]
+) // @test_fsubx2
+{
+ .reg .pred %p<3>;
+ .reg .b16 %rs<5>;
+ .reg .b32 %r<22>;
+
+// %bb.0:
+ ld.param.v2.b16 {%rs1, %rs2}, [test_fsubx2_param_0];
+ ld.param.v2.b16 {%rs3, %rs4}, [test_fsubx2_param_1];
+ cvt.u32.u16 %r1, %rs4;
+ shl.b32 %r2, %r1, 16;
+ cvt.u32.u16 %r3, %rs2;
+ shl.b32 %r4, %r3, 16;
+ sub.rn.f32 %r5, %r4, %r2;
+ bfe.u32 %r6, %r5, 16, 1;
+ add.s32 %r7, %r6, %r5;
+ add.s32 %r8, %r7, 32767;
+ setp.nan.f32 %p1, %r5, %r5;
+ or.b32 %r9, %r5, 4194304;
+ selp.b32 %r10, %r9, %r8, %p1;
+ cvt.u32.u16 %r11, %rs3;
+ shl.b32 %r12, %r11, 16;
+ cvt.u32.u16 %r13, %rs1;
+ shl.b32 %r14, %r13, 16;
+ sub.rn.f32 %r15, %r14, %r12;
+ bfe.u32 %r16, %r15, 16, 1;
+ add.s32 %r17, %r16, %r15;
+ add.s32 %r18, %r17, 32767;
+ setp.nan.f32 %p2, %r15, %r15;
+ or.b32 %r19, %r15, 4194304;
+ selp.b32 %r20, %r19, %r18, %p2;
+ prmt.b32 %r21, %r20, %r10, 0x7632U;
+ st.param.b32 [func_retval0], %r21;
+ ret;
+ // -- End function
+}
+ // .globl test_fmulx2 // -- Begin function test_fmulx2
+.visible .func (.param .align 4 .b8 func_retval0[4]) test_fmulx2(
+ .param .align 4 .b8 test_fmulx2_param_0[4],
+ .param .align 4 .b8 test_fmulx2_param_1[4]
+) // @test_fmulx2
+{
+ .reg .pred %p<3>;
+ .reg .b16 %rs<5>;
+ .reg .b32 %r<22>;
+
+// %bb.0:
+ ld.param.v2.b16 {%rs1, %rs2}, [test_fmulx2_param_0];
+ ld.param.v2.b16 {%rs3, %rs4}, [test_fmulx2_param_1];
+ cvt.u32.u16 %r1, %rs4;
+ shl.b32 %r2, %r1, 16;
+ cvt.u32.u16 %r3, %rs2;
+ shl.b32 %r4, %r3, 16;
+ mul.rn.f32 %r5, %r4, %r2;
+ bfe.u32 %r6, %r5, 16, 1;
+ add.s32 %r7, %r6, %r5;
+ add.s32 %r8, %r7, 32767;
+ setp.nan.f32 %p1, %r5, %r5;
+ or.b32 %r9, %r5, 4194304;
+ selp.b32 %r10, %r9, %r8, %p1;
+ cvt.u32.u16 %r11, %rs3;
+ shl.b32 %r12, %r11, 16;
+ cvt.u32.u16 %r13, %rs1;
+ shl.b32 %r14, %r13, 16;
+ mul.rn.f32 %r15, %r14, %r12;
+ bfe.u32 %r16, %r15, 16, 1;
+ add.s32 %r17, %r16, %r15;
+ add.s32 %r18, %r17, 32767;
+ setp.nan.f32 %p2, %r15, %r15;
+ or.b32 %r19, %r15, 4194304;
+ selp.b32 %r20, %r19, %r18, %p2;
+ prmt.b32 %r21, %r20, %r10, 0x7632U;
+ st.param.b32 [func_retval0], %r21;
+ ret;
+ // -- End function
+}
+ // .globl test_fdiv // -- Begin function test_fdiv
+.visible .func (.param .align 4 .b8 func_retval0[4]) test_fdiv(
+ .param .align 4 .b8 test_fdiv_param_0[4],
+ .param .align 4 .b8 test_fdiv_param_1[4]
+) // @test_fdiv
+{
+ .reg .pred %p<3>;
+ .reg .b16 %rs<5>;
+ .reg .b32 %r<22>;
+
+// %bb.0:
+ ld.param.v2.b16 {%rs1, %rs2}, [test_fdiv_param_0];
+ ld.param.v2.b16 {%rs3, %rs4}, [test_fdiv_param_1];
+ cvt.u32.u16 %r1, %rs4;
+ shl.b32 %r2, %r1, 16;
+ cvt.u32.u16 %r3, %rs2;
+ shl.b32 %r4, %r3, 16;
+ div.rn.f32 %r5, %r4, %r2;
+ bfe.u32 %r6, %r5, 16, 1;
+ add.s32 %r7, %r6, %r5;
+ add.s32 %r8, %r7, 32767;
+ setp.nan.f32 %p1, %r5, %r5;
+ or.b32 %r9, %r5, 4194304;
+ selp.b32 %r10, %r9, %r8, %p1;
+ cvt.u32.u16 %r11, %rs3;
+ shl.b32 %r12, %r11, 16;
+ cvt.u32.u16 %r13, %rs1;
+ shl.b32 %r14, %r13, 16;
+ div.rn.f32 %r15, %r14, %r12;
+ bfe.u32 %r16, %r15, 16, 1;
+ add.s32 %r17, %r16, %r15;
+ add.s32 %r18, %r17, 32767;
+ setp.nan.f32 %p2, %r15, %r15;
+ or.b32 %r19, %r15, 4194304;
+ selp.b32 %r20, %r19, %r18, %p2;
+ prmt.b32 %r21, %r20, %r10, 0x7632U;
+ st.param.b32 [func_retval0], %r21;
+ ret;
+ // -- End function
+}
+ // .globl test_extract_0 // -- Begin function test_extract_0
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_extract_0(
+ .param .align 4 .b8 test_extract_0_param_0[4]
+) // @test_extract_0
+{
+ .reg .b16 %rs<2>;
+
+// %bb.0:
+ ld.param.b16 %rs1, [test_extract_0_param_0];
+ st.param.b16 [func_retval0], %rs1;
+ ret;
+ // -- End function
+}
+ // .globl test_extract_1 // -- Begin function test_extract_1
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_extract_1(
+ .param .align 4 .b8 test_extract_1_param_0[4]
+) // @test_extract_1
+{
+ .reg .b16 %rs<2>;
+
+// %bb.0:
+ ld.param.b16 %rs1, [test_extract_1_param_0+2];
+ st.param.b16 [func_retval0], %rs1;
+ ret;
+ // -- End function
+}
+ // .globl test_fpext_float // -- Begin function test_fpext_float
+.visible .func (.param .b32 func_retval0) test_fpext_float(
+ .param .align 2 .b8 test_fpext_float_param_0[2]
+) // @test_fpext_float
+{
+ .reg .b32 %r<3>;
+
+// %bb.0:
+ ld.param.b16 %r1, [test_fpext_float_param_0];
+ shl.b32 %r2, %r1, 16;
+ st.param.b32 [func_retval0], %r2;
+ ret;
+ // -- End function
+}
+ // .globl test_fptrunc_float // -- Begin function test_fptrunc_float
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_fptrunc_float(
+ .param .b32 test_fptrunc_float_param_0
+) // @test_fptrunc_float
+{
+ .reg .pred %p<2>;
+ .reg .b32 %r<8>;
+
+// %bb.0:
+ ld.param.b32 %r1, [test_fptrunc_float_param_0];
+ bfe.u32 %r2, %r1, 16, 1;
+ add.s32 %r3, %r2, %r1;
+ add.s32 %r4, %r3, 32767;
+ setp.nan.f32 %p1, %r1, %r1;
+ or.b32 %r5, %r1, 4194304;
+ selp.b32 %r6, %r5, %r4, %p1;
+ shr.u32 %r7, %r6, 16;
+ st.param.b16 [func_retval0], %r7;
+ ret;
+ // -- End function
+}
+ // .globl test_fadd_imm_1 // -- Begin function test_fadd_imm_1
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_fadd_imm_1(
+ .param .align 2 .b8 test_fadd_imm_1_param_0[2]
+) // @test_fadd_imm_1
+{
+ .reg .pred %p<2>;
+ .reg .b32 %r<10>;
+
+// %bb.0:
+ ld.param.b16 %r1, [test_fadd_imm_1_param_0];
+ shl.b32 %r2, %r1, 16;
+ add.rn.f32 %r3, %r2, 0f3F800000;
+ bfe.u32 %r4, %r3, 16, 1;
+ add.s32 %r5, %r4, %r3;
+ add.s32 %r6, %r5, 32767;
+ setp.nan.f32 %p1, %r3, %r3;
+ or.b32 %r7, %r3, 4194304;
+ selp.b32 %r8, %r7, %r6, %p1;
+ shr.u32 %r9, %r8, 16;
+ st.param.b16 [func_retval0], %r9;
+ ret;
+ // -- End function
+}
+ // .globl test_select_cc_bf16_f64 // -- Begin function test_select_cc_bf16_f64
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_select_cc_bf16_f64(
+ .param .b64 test_select_cc_bf16_f64_param_0,
+ .param .b64 test_select_cc_bf16_f64_param_1,
+ .param .align 2 .b8 test_select_cc_bf16_f64_param_2[2],
+ .param .align 2 .b8 test_select_cc_bf16_f64_param_3[2]
+) // @test_select_cc_bf16_f64
+{
+ .reg .pred %p<2>;
+ .reg .b16 %rs<4>;
+ .reg .b64 %rd<3>;
+
+// %bb.0:
+ ld.param.b64 %rd1, [test_select_cc_bf16_f64_param_0];
+ ld.param.b64 %rd2, [test_select_cc_bf16_f64_param_1];
+ setp.lt.f64 %p1, %rd1, %rd2;
+ ld.param.b16 %rs1, [test_select_cc_bf16_f64_param_2];
+ ld.param.b16 %rs2, [test_select_cc_bf16_f64_param_3];
+ selp.b16 %rs3, %rs1, %rs2, %p1;
+ st.param.b16 [func_retval0], %rs3;
+ ret;
+ // -- End function
+}
+ // .globl test_extload_bf16x8 // -- Begin function test_extload_bf16x8
+.visible .func (.param .align 32 .b8 func_retval0[32]) test_extload_bf16x8(
+ .param .b64 test_extload_bf16x8_param_0
+) // @test_extload_bf16x8
+{
+ .reg .b16 %rs<9>;
+ .reg .b32 %r<21>;
+ .reg .b64 %rd<2>;
+
+// %bb.0:
+ ld.param.b64 %rd1, [test_extload_bf16x8_param_0];
+ ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+ mov.b32 {%rs1, %rs2}, %r3;
+ mov.b32 {%rs3, %rs4}, %r4;
+ mov.b32 {%rs5, %rs6}, %r1;
+ mov.b32 {%rs7, %rs8}, %r2;
+ cvt.u32.u16 %r5, %rs8;
+ shl.b32 %r6, %r5, 16;
+ cvt.u32.u16 %r7, %rs7;
+ shl.b32 %r8, %r7, 16;
+ cvt.u32.u16 %r9, %rs6;
+ shl.b32 %r10, %r9, 16;
+ cvt.u32.u16 %r11, %rs5;
+ shl.b32 %r12, %r11, 16;
+ cvt.u32.u16 %r13, %rs4;
+ shl.b32 %r14, %r13, 16;
+ cvt.u32.u16 %r15, %rs3;
+ shl.b32 %r16, %r15, 16;
+ cvt.u32.u16 %r17, %rs2;
+ shl.b32 %r18, %r17, 16;
+ cvt.u32.u16 %r19, %rs1;
+ shl.b32 %r20, %r19, 16;
+ st.param.v4.b32 [func_retval0+16], {%r20, %r18, %r16, %r14};
+ st.param.v4.b32 [func_retval0], {%r12, %r10, %r8, %r6};
+ ret;
+ // -- End function
+}
+ // .globl test_fptosi_i16 // -- Begin function test_fptosi_i16
+.visible .func (.param .b32 func_retval0) test_fptosi_i16(
+ .param .align 2 .b8 test_fptosi_i16_param_0[2]
+) // @test_fptosi_i16
+{
+ .reg .b16 %rs<2>;
+ .reg .b32 %r<4>;
+
+// %bb.0:
+ ld.param.b16 %r1, [test_fptosi_i16_param_0];
+ shl.b32 %r2, %r1, 16;
+ cvt.rzi.s16.f32 %rs1, %r2;
+ cvt.u32.u16 %r3, %rs1;
+ st.param.b32 [func_retval0], %r3;
+ ret;
+ // -- End function
+}
+ // .globl test_fptoui_i16 // -- Begin function test_fptoui_i16
+.visible .func (.param .b32 func_retval0) test_fptoui_i16(
+ .param .align 2 .b8 test_fptoui_i16_param_0[2]
+) // @test_fptoui_i16
+{
+ .reg .b16 %rs<2>;
+ .reg .b32 %r<4>;
+
+// %bb.0:
+ ld.param.b16 %r1, [test_fptoui_i16_param_0];
+ shl.b32 %r2, %r1, 16;
+ cvt.rzi.u16.f32 %rs1, %r2;
+ cvt.u32.u16 %r3, %rs1;
+ st.param.b32 [func_retval0], %r3;
+ ret;
+ // -- End function
+}
+ // .globl test_sitofp_i16 // -- Begin function test_sitofp_i16
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_sitofp_i16(
+ .param .b32 test_sitofp_i16_param_0
+) // @test_sitofp_i16
+{
+ .reg .pred %p<2>;
+ .reg .b16 %rs<2>;
+ .reg .b32 %r<8>;
+
+// %bb.0:
+ ld.param.b16 %rs1, [test_sitofp_i16_param_0];
+ cvt.rn.f32.s16 %r1, %rs1;
+ bfe.u32 %r2, %r1, 16, 1;
+ add.s32 %r3, %r2, %r1;
+ add.s32 %r4, %r3, 32767;
+ setp.nan.f32 %p1, %r1, %r1;
+ or.b32 %r5, %r1, 4194304;
+ selp.b32 %r6, %r5, %r4, %p1;
+ shr.u32 %r7, %r6, 16;
+ st.param.b16 [func_retval0], %r7;
+ ret;
+ // -- End function
+}
+ // .globl test_uitofp_i8 // -- Begin function test_uitofp_i8
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_uitofp_i8(
+ .param .b32 test_uitofp_i8_param_0
+) // @test_uitofp_i8
+{
+ .reg .pred %p<2>;
+ .reg .b16 %rs<2>;
+ .reg .b32 %r<8>;
+
+// %bb.0:
+ ld.param.b8 %rs1, [test_uitofp_i8_param_0];
+ cvt.rn.f32.u16 %r1, %rs1;
+ bfe.u32 %r2, %r1, 16, 1;
+ add.s32 %r3, %r2, %r1;
+ add.s32 %r4, %r3, 32767;
+ setp.nan.f32 %p1, %r1, %r1;
+ or.b32 %r5, %r1, 4194304;
+ selp.b32 %r6, %r5, %r4, %p1;
+ shr.u32 %r7, %r6, 16;
+ st.param.b16 [func_retval0], %r7;
+ ret;
+ // -- End function
+}
+ // .globl test_uitofp_i1 // -- Begin function test_uitofp_i1
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_uitofp_i1(
+ .param .b32 test_uitofp_i1_param_0
+) // @test_uitofp_i1
+{
+ .reg .pred %p<3>;
+ .reg .b16 %rs<3>;
+ .reg .b32 %r<9>;
+
+// %bb.0:
+ ld.param.b8 %rs1, [test_uitofp_i1_param_0];
+ and.b16 %rs2, %rs1, 1;
+ setp.ne.b16 %p1, %rs2, 0;
+ selp.b32 %r1, 1, 0, %p1;
+ cvt.rn.f32.u32 %r2, %r1;
+ bfe.u32 %r3, %r2, 16, 1;
+ add.s32 %r4, %r3, %r2;
+ add.s32 %r5, %r4, 32767;
+ setp.nan.f32 %p2, %r2, %r2;
+ or.b32 %r6, %r2, 4194304;
+ selp.b32 %r7, %r6, %r5, %p2;
+ shr.u32 %r8, %r7, 16;
+ st.param.b16 [func_retval0], %r8;
+ ret;
+ // -- End function
+}
+ // .globl test_uitofp_i16 // -- Begin function test_uitofp_i16
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_uitofp_i16(
+ .param .b32 test_uitofp_i16_param_0
+) // @test_uitofp_i16
+{
+ .reg .pred %p<2>;
+ .reg .b16 %rs<2>;
+ .reg .b32 %r<8>;
+
+// %bb.0:
+ ld.param.b16 %rs1, [test_uitofp_i16_param_0];
+ cvt.rn.f32.u16 %r1, %rs1;
+ bfe.u32 %r2, %r1, 16, 1;
+ add.s32 %r3, %r2, %r1;
+ add.s32 %r4, %r3, 32767;
+ setp.nan.f32 %p1, %r1, %r1;
+ or.b32 %r5, %r1, 4194304;
+ selp.b32 %r6, %r5, %r4, %p1;
+ shr.u32 %r7, %r6, 16;
+ st.param.b16 [func_retval0], %r7;
+ ret;
+ // -- End function
+}
+ // .globl test_uitofp_i32 // -- Begin function test_uitofp_i32
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_uitofp_i32(
+ .param .b32 test_uitofp_i32_param_0
+) // @test_uitofp_i32
+{
+ .reg .pred %p<2>;
+ .reg .b32 %r<9>;
+
+// %bb.0:
+ ld.param.b32 %r1, [test_uitofp_i32_param_0];
+ cvt.rn.f32.u32 %r2, %r1;
+ bfe.u32 %r3, %r2, 16, 1;
+ add.s32 %r4, %r3, %r2;
+ add.s32 %r5, %r4, 32767;
+ setp.nan.f32 %p1, %r2, %r2;
+ or.b32 %r6, %r2, 4194304;
+ selp.b32 %r7, %r6, %r5, %p1;
+ shr.u32 %r8, %r7, 16;
+ st.param.b16 [func_retval0], %r8;
+ ret;
+ // -- End function
+}
+ // .globl test_uitofp_i64 // -- Begin function test_uitofp_i64
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_uitofp_i64(
+ .param .b64 test_uitofp_i64_param_0
+) // @test_uitofp_i64
+{
+ .reg .pred %p<2>;
+ .reg .b32 %r<8>;
+ .reg .b64 %rd<2>;
+
+// %bb.0:
+ ld.param.b64 %rd1, [test_uitofp_i64_param_0];
+ cvt.rn.f32.u64 %r1, %rd1;
+ bfe.u32 %r2, %r1, 16, 1;
+ add.s32 %r3, %r2, %r1;
+ add.s32 %r4, %r3, 32767;
+ setp.nan.f32 %p1, %r1, %r1;
+ or.b32 %r5, %r1, 4194304;
+ selp.b32 %r6, %r5, %r4, %p1;
+ shr.u32 %r7, %r6, 16;
+ st.param.b16 [func_retval0], %r7;
+ ret;
+ // -- End function
+}
+ // .globl test_roundeven // -- Begin function test_roundeven
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_roundeven(
+ .param .align 2 .b8 test_roundeven_param_0[2]
+) // @test_roundeven
+{
+ .reg .pred %p<2>;
+ .reg .b32 %r<10>;
+
+// %bb.0:
+ ld.param.b16 %r1, [test_roundeven_param_0];
+ shl.b32 %r2, %r1, 16;
+ cvt.rni.f32.f32 %r3, %r2;
+ bfe.u32 %r4, %r3, 16, 1;
+ add.s32 %r5, %r4, %r3;
+ add.s32 %r6, %r5, 32767;
+ setp.nan.f32 %p1, %r3, %r3;
+ or.b32 %r7, %r3, 4194304;
+ selp.b32 %r8, %r7, %r6, %p1;
+ shr.u32 %r9, %r8, 16;
+ st.param.b16 [func_retval0], %r9;
+ ret;
+ // -- End function
+}
+ // .globl test_maximum // -- Begin function test_maximum
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_maximum(
+ .param .align 2 .b8 test_maximum_param_0[2],
+ .param .align 2 .b8 test_maximum_param_1[2]
+) // @test_maximum
+{
+ .reg .pred %p<6>;
+ .reg .b16 %rs<8>;
+ .reg .b32 %r<7>;
+
+// %bb.0:
+ ld.param.b16 %rs1, [test_maximum_param_0];
+ ld.param.b16 %rs2, [test_maximum_param_1];
+ cvt.u32.u16 %r1, %rs2;
+ shl.b32 %r2, %r1, 16;
+ cvt.u32.u16 %r3, %rs1;
+ shl.b32 %r4, %r3, 16;
+ setp.gt.f32 %p1, %r4, %r2;
+ selp.b16 %rs3, %rs1, %rs2, %p1;
+ setp.nan.f32 %p2, %r4, %r2;
+ selp.b16 %rs4, 0x7FC0, %rs3, %p2;
+ setp.eq.s16 %p3, %rs1, 0;
+ selp.b16 %rs5, %rs1, %rs4, %p3;
+ setp.eq.s16 %p4, %rs2, 0;
+ selp.b16 %rs6, %rs2, %rs5, %p4;
+ cvt.u32.u16 %r5, %rs4;
+ shl.b32 %r6, %r5, 16;
+ setp.eq.f32 %p5, %r6, 0f00000000;
+ selp.b16 %rs7, %rs6, %rs4, %p5;
+ st.param.b16 [func_retval0], %rs7;
+ ret;
+ // -- End function
+}
+ // .globl test_maxnum // -- Begin function test_maxnum
+.visible .func (.param .align 2 .b8 func_retval0[2]) test_maxnum(
+ .param .align 2 .b8 test_maxnum_param_0[2],
+ .param .align 2 .b8 test_maxnum_param_1[2]
+) // @test_maxnum
+{
+ .reg .pred %p<2>;
+ .reg .b32 %r<12>;
+
+// %bb.0:
+ ld.param.b16 %r1, [test_maxnum_param_1];
+ shl.b32 %r2, %r1, 16;
+ ld.param.b16 %r3, [test_maxnum_param_0];
+ shl.b32 %r4, %r3, 16;
+ max.f32 %r5, %r4, %r2;
+ bfe.u32 %r6, %r5, 16, 1;
+ add.s32 %r7, %r6, %r5;
+ add.s32 %r8, %r7, 32767;
+ setp.nan.f32 %p1, %r5, %r5;
+ or.b32 %r9, %r5, 4194304;
+ selp.b32 %r10, %r9, %r8, %p1;
+ shr.u32 %r11, %r10, 16;
+ st.param.b16 [func_retval0], %r11;
+ ret;
+ // -- End function
+}
+ // .globl test_maximum_v2 // -- Begin function test_maximum_v2
+.visible .func (.param .align 4 .b8 func_retval0[4]) test_maximum_v2(
+ .param .align 4 .b8 test_maximum_v2_param_0[4],
+ .param .align 4 .b8 test_maximum_v2_param_1[4]
+) // @test_maximum_v2
+{
+ .reg .pred %p<11>;
+ .reg .b16 %rs<15>;
+ .reg .b32 %r<13>;
+
+// %bb.0:
+ ld.param.v2.b16 {%rs1, %rs2}, [test_maximum_v2_param_0];
+ ld.param.v2.b16 {%rs3, %rs4}, [test_maximum_v2_param_1];
+ cvt.u32.u16 %r1, %rs4;
+ shl.b32 %r2, %r1, 16;
+ cvt.u32.u16 %r3, %rs2;
+ shl.b32 %r4, %r3, 16;
+ setp.gt.f32 %p1, %r4, %r2;
+ selp.b16 %rs5, %rs2, %rs4, %p1;
+ setp.nan.f32 %p2, %r4, %r2;
+ selp.b16 %rs6, 0x7FC0, %rs5, %p2;
+ setp.eq.s16 %p3, %rs2, 0;
+ selp.b16 %rs7, %rs2, %rs6, %p3;
+ setp.eq.s16 %p4, %rs4, 0;
+ selp.b16 %rs8, %rs4, %rs7, %p4;
+ cvt.u32.u16 %r5, %rs6;
+ shl.b32 %r6, %r5, 16;
+ setp.eq.f32 %p5, %r6, 0f00000000;
+ selp.b16 %rs9, %rs8, %rs6, %p5;
+ cvt.u32.u16 %r7, %rs3;
+ shl.b32 %r8, %r7, 16;
+ cvt.u32.u16 %r9, %rs1;
+ shl.b32 %r10, %r9, 16;
+ setp.gt.f32 %p6, %r10, %r8;
+ selp.b16 %rs10, %rs1, %rs3, %p6;
+ setp.nan.f32 %p7, %r10, %r8;
+ selp.b16 %rs11, 0x7FC0, %rs10, %p7;
+ setp.eq.s16 %p8, %rs1, 0;
+ selp.b16 %rs12, %rs1, %rs11, %p8;
+ setp.eq.s16 %p9, %rs3, 0;
+ selp.b16 %rs13, %rs3, %rs12, %p9;
+ cvt.u32.u16 %r11, %rs11;
+ shl.b32 %r12, %r11, 16;
+ setp.eq.f32 %p10, %r12, 0f00000000;
+ selp.b16 %rs14, %rs13, %rs11, %p10;
+ st.param.v2.b16 [func_retval0], {%rs14, %rs9};
+ ret;
+ // -- End function
+}
+ // .globl test_maxnum_v2 // -- Begin function test_maxnum_v2
+.visible .func (.param .align 4 .b8 func_retval0[4]) test_maxnum_v2(
+ .param .align 4 .b8 test_maxnum_v2_param_0[4],
+ .param .align 4 .b8 test_maxnum_v2_param_1[4]
+) // @test_maxnum_v2
+{
+ .reg .pred %p<3>;
+ .reg .b16 %rs<5>;
+ .reg .b32 %r<22>;
+
+// %bb.0:
+ ld.param.v2.b16 {%rs1, %rs2}, [test_maxnum_v2_param_0];
+ ld.param.v2.b16 {%rs3, %rs4}, [test_maxnum_v2_param_1];
+ cvt.u32.u16 %r1, %rs4;
+ shl.b32 %r2, %r1, 16;
+ cvt.u32.u16 %r3, %rs2;
+ shl.b32 %r4, %r3, 16;
+ max.f32 %r5, %r4, %r2;
+ bfe.u32 %r6, %r5, 16, 1;
+ add.s32 %r7, %r6, %r5;
+ add.s32 %r8, %r7, 32767;
+ setp.nan.f32 %p1, %r5, %r5;
+ or.b32 %r9, %r5, 4194304;
+ selp.b32 %r10, %r9, %r8, %p1;
+ cvt.u32.u16 %r11, %rs3;
+ shl.b32 %r12, %r11, 16;
+ cvt.u32.u16 %r13, %rs1;
+ shl.b32 %r14, %r13, 16;
+ max.f32 %r15, %r14, %r12;
+ bfe.u32 %r16, %r15, 16, 1;
+ add.s32 %r17, %r16, %r15;
+ add.s32 %r18, %r17, 32767;
+ setp.nan.f32 %p2, %r15, %r15;
+ or.b32 %r19, %r15, 4194304;
+ selp.b32 %r20, %r19, %r18, %p2;
+ prmt.b32 %r21, %r20, %r10, 0x7632U;
+ st.param.b32 [func_retval0], %r21;
+ ret;
+ // -- End function
+}
More information about the llvm-commits
mailing list