[llvm] [NVPTX] support packed f32 instructions for sm_100+ (PR #126337)
Princeton Ferro via llvm-commits
llvm-commits at lists.llvm.org
Thu May 22 03:07:03 PDT 2025
https://github.com/Prince781 updated https://github.com/llvm/llvm-project/pull/126337
>From 361134975757432981a897106f13272a6a8deb44 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Sat, 8 Feb 2025 15:52:25 -0800
Subject: [PATCH 01/32] legalize v2f32 as i64 reg and add test cases
---
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 1 +
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 12 +-
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 6 +
llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td | 4 +-
llvm/lib/Target/NVPTX/NVPTXSubtarget.h | 4 +
llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 390 ++++++++++++++++++
6 files changed, 414 insertions(+), 3 deletions(-)
create mode 100644 llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index b05a4713e6340..b5263121aa28c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -1029,6 +1029,7 @@ static std::optional<unsigned> pickOpcodeForVT(
case MVT::i32:
return Opcode_i32;
case MVT::i64:
+ case MVT::v2f32:
return Opcode_i64;
case MVT::f16:
case MVT::bf16:
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 51f4682c5ba15..3279b33dfeb03 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -331,8 +331,8 @@ static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
// TargetLoweringBase::getVectorTypeBreakdown() which is invoked in
// ComputePTXValueVTs() cannot currently break down non-power-of-2 sized
// vectors.
- if ((Is16bitsType(EltVT.getSimpleVT())) && NumElts % 2 == 0 &&
- isPowerOf2_32(NumElts)) {
+ if ((Is16bitsType(EltVT.getSimpleVT()) || EltVT == MVT::f32) &&
+ NumElts % 2 == 0 && isPowerOf2_32(NumElts)) {
// Vectors with an even number of f16 elements will be passed to
// us as an array of v2f16/v2bf16 elements. We must match this so we
// stay in sync with Ins/Outs.
@@ -346,6 +346,9 @@ static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
case MVT::i16:
EltVT = MVT::v2i16;
break;
+ case MVT::f32:
+ EltVT = MVT::v2f32;
+ break;
default:
llvm_unreachable("Unexpected type");
}
@@ -612,6 +615,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
addRegisterClass(MVT::v2f16, &NVPTX::Int32RegsRegClass);
addRegisterClass(MVT::bf16, &NVPTX::Int16RegsRegClass);
addRegisterClass(MVT::v2bf16, &NVPTX::Int32RegsRegClass);
+ addRegisterClass(MVT::v2f32, &NVPTX::Int64RegsRegClass);
// Conversion to/from FP16/FP16x2 is always legal.
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
@@ -877,6 +881,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
if (getOperationAction(Op, MVT::bf16) == Promote)
AddPromotedToType(Op, MVT::bf16, MVT::f32);
+ if (STI.hasF32x2Instructions())
+ setOperationAction(Op, MVT::v2f32, Legal);
}
// On SM80, we select add/mul/sub as fma to avoid promotion to float
@@ -3568,6 +3574,8 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
// vectors which contain v2f16 or v2bf16 elements. So we must load
// using i32 here and then bitcast back.
LoadVT = MVT::i32;
+ else if (EltVT == MVT::v2f32)
+ LoadVT = MVT::i64;
EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts);
SDValue VecAddr =
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 5234fb0806189..45ffc95b0b6eb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -158,6 +158,7 @@ def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
def hasDotInstructions : Predicate<"Subtarget->hasDotInstructions()">;
def hasTcgen05Instructions : Predicate<"Subtarget->hasTcgen05Instructions()">;
+def hasF32x2Instructions : Predicate<"Subtarget->hasF32x2Instructions()">;
def True : Predicate<"true">;
def False : Predicate<"false">;
@@ -2858,6 +2859,9 @@ let hasSideEffects = false in {
def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
(ins Float32Regs:$s1, Float32Regs:$s2),
"mov.b64 \t$d, {{$s1, $s2}};", []>;
+ def V2F32toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Float32Regs:$s1, Float32Regs:$s2),
+ "mov.b64 \t$d, {{$s1, $s2}};", []>;
// unpack a larger int register to a set of smaller int registers
def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
@@ -2941,6 +2945,8 @@ def : Pat<(v2bf16 (build_vector bf16:$a, bf16:$b)),
(V2I16toI32 $a, $b)>;
def : Pat<(v2i16 (build_vector i16:$a, i16:$b)),
(V2I16toI32 $a, $b)>;
+def : Pat<(v2f32 (build_vector f32:$a, f32:$b)),
+ (V2F32toI64 $a, $b)>;
def: Pat<(v2i16 (scalar_to_vector i16:$a)),
(CVT_u32_u16 $a, CvtNONE)>;
diff --git a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
index 2eea9e9721cdf..f73d4976dd12d 100644
--- a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
@@ -60,7 +60,9 @@ def Int16Regs : NVPTXRegClass<[i16, f16, bf16], 16, (add (sequence "RS%u", 0, 4)
def Int32Regs : NVPTXRegClass<[i32, v2f16, v2bf16, v2i16, v4i8, f32], 32,
(add (sequence "R%u", 0, 4),
VRFrame32, VRFrameLocal32)>;
-def Int64Regs : NVPTXRegClass<[i64, f64], 64, (add (sequence "RL%u", 0, 4), VRFrame64, VRFrameLocal64)>;
+def Int64Regs : NVPTXRegClass<[i64, f64, v2f32], 64,
+ (add (sequence "RL%u", 0, 4),
+ VRFrame64, VRFrameLocal64)>;
// 128-bit regs are not defined as general regs in NVPTX. They are used for inlineASM only.
def Int128Regs : NVPTXRegClass<[i128], 128, (add (sequence "RQ%u", 0, 4))>;
diff --git a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
index 5136b1ee28502..f533f9d7a7f78 100644
--- a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
+++ b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
@@ -117,6 +117,10 @@ class NVPTXSubtarget : public NVPTXGenSubtargetInfo {
return HasTcgen05 && PTXVersion >= 86;
}
+ bool hasF32x2Instructions() const {
+ return SmVersion >= 100 && PTXVersion >= 86;
+ }
+
// Prior to CUDA 12.3 ptxas did not recognize that the trap instruction
// terminates a basic block. Instead, it would assume that control flow
// continued to the next instruction. The next instruction could be in the
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
new file mode 100644
index 0000000000000..f449fefe5763e
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -0,0 +1,390 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; ## Full FP32x2 support enabled by default.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
+; RUN: -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN: | FileCheck --check-prefixes=CHECK-O0 %s
+; RUN: %if ptxas %{ \
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
+; RUN: -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN: | %ptxas-verify -arch=sm_100 \
+; RUN: %}
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
+; RUN: -O3 -verify-machineinstrs \
+; RUN: | FileCheck --check-prefixes=CHECK-O3 %s
+; RUN: %if ptxas %{ \
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
+; RUN: -O3 -verify-machineinstrs \
+; RUN: | %ptxas-verify -arch=sm_100 \
+; RUN: %}
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "nvptx64-nvidia-cuda"
+
+define <2 x float> @test_ret_const() #0 {
+ ret <2 x float> <float 1.0, float 2.0>
+}
+
+define float @test_extract_0(<2 x float> %a) #0 {
+ %e = extractelement <2 x float> %a, i32 0
+ ret float %e
+}
+
+define float @test_extract_1(<2 x float> %a) #0 {
+ %e = extractelement <2 x float> %a, i32 1
+ ret float %e
+}
+
+; NOTE: disabled as -O3 miscompiles this into pointer arithmetic on
+; test_extract_i_param_0 where the symbol's address is not taken first (that
+; is, moved to a temporary)
+; define float @test_extract_i(<2 x float> %a, i64 %idx) #0 {
+; ; CHECK-LABEL: test_extract_i(
+; ; CHECK: {
+; ; CHECK-NEXT: .reg .pred %p<2>;
+; ; CHECK-NEXT: .reg .f32 %f<4>;
+; ; CHECK-NEXT: .reg .b64 %rd<2>;
+; ; CHECK-EMPTY:
+; ; CHECK-NEXT: // %bb.0:
+; ; CHECK-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_extract_i_param_0];
+; ; CHECK-NEXT: ld.param.u64 %rd1, [test_extract_i_param_1];
+; ; CHECK-NEXT: setp.eq.s64 %p1, %rd1, 0;
+; ; CHECK-NEXT: selp.f32 %f3, %f1, %f2, %p1;
+; ; CHECK-NEXT: st.param.f32 [func_retval0], %f3;
+; ; CHECK-NEXT: ret;
+; %e = extractelement <2 x float> %a, i64 %idx
+; ret float %e
+; }
+
+define <2 x float> @test_fadd(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fadd <2 x float> %a, %b
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fadd_imm_0(<2 x float> %a) #0 {
+ %r = fadd <2 x float> <float 1.0, float 2.0>, %a
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fadd_imm_1(<2 x float> %a) #0 {
+ %r = fadd <2 x float> %a, <float 1.0, float 2.0>
+ ret <2 x float> %r
+}
+
+define <4 x float> @test_fadd_v4(<4 x float> %a, <4 x float> %b) #0 {
+ %r = fadd <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+define <4 x float> @test_fadd_imm_0_v4(<4 x float> %a) #0 {
+ %r = fadd <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %a
+ ret <4 x float> %r
+}
+
+define <4 x float> @test_fadd_imm_1_v4(<4 x float> %a) #0 {
+ %r = fadd <4 x float> %a, <float 1.0, float 2.0, float 3.0, float 4.0>
+ ret <4 x float> %r
+}
+
+define <2 x float> @test_fsub(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fsub <2 x float> %a, %b
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fneg(<2 x float> %a) #0 {
+ %r = fsub <2 x float> <float 0.0, float 0.0>, %a
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fmul(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fmul <2 x float> %a, %b
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fma(<2 x float> %a, <2 x float> %b, <2 x float> %c) #0 {
+ %r = call <2 x float> @llvm.fma(<2 x float> %a, <2 x float> %b, <2 x float> %c)
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fdiv(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fdiv <2 x float> %a, %b
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_frem(<2 x float> %a, <2 x float> %b) #0 {
+ %r = frem <2 x float> %a, %b
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fadd_ftz(<2 x float> %a, <2 x float> %b) #2 {
+ %r = fadd <2 x float> %a, %b
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fadd_imm_0_ftz(<2 x float> %a) #2 {
+ %r = fadd <2 x float> <float 1.0, float 2.0>, %a
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fadd_imm_1_ftz(<2 x float> %a) #2 {
+ %r = fadd <2 x float> %a, <float 1.0, float 2.0>
+ ret <2 x float> %r
+}
+
+define <4 x float> @test_fadd_v4_ftz(<4 x float> %a, <4 x float> %b) #2 {
+ %r = fadd <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+define <4 x float> @test_fadd_imm_0_v4_ftz(<4 x float> %a) #2 {
+ %r = fadd <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %a
+ ret <4 x float> %r
+}
+
+define <4 x float> @test_fadd_imm_1_v4_ftz(<4 x float> %a) #2 {
+ %r = fadd <4 x float> %a, <float 1.0, float 2.0, float 3.0, float 4.0>
+ ret <4 x float> %r
+}
+
+define <2 x float> @test_fsub_ftz(<2 x float> %a, <2 x float> %b) #2 {
+ %r = fsub <2 x float> %a, %b
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fneg_ftz(<2 x float> %a) #2 {
+ %r = fsub <2 x float> <float 0.0, float 0.0>, %a
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fmul_ftz(<2 x float> %a, <2 x float> %b) #2 {
+ %r = fmul <2 x float> %a, %b
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fma_ftz(<2 x float> %a, <2 x float> %b, <2 x float> %c) #2 {
+ %r = call <2 x float> @llvm.fma(<2 x float> %a, <2 x float> %b, <2 x float> %c)
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fdiv_ftz(<2 x float> %a, <2 x float> %b) #2 {
+ %r = fdiv <2 x float> %a, %b
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 {
+ %r = frem <2 x float> %a, %b
+ ret <2 x float> %r
+}
+
+define void @test_ldst_v2f32(ptr %a, ptr %b) #0 {
+ %t1 = load <2 x float>, ptr %a
+ store <2 x float> %t1, ptr %b, align 32
+ ret void
+}
+
+define void @test_ldst_v3f32(ptr %a, ptr %b) #0 {
+ %t1 = load <3 x float>, ptr %a
+ store <3 x float> %t1, ptr %b, align 32
+ ret void
+}
+
+define void @test_ldst_v4f32(ptr %a, ptr %b) #0 {
+ %t1 = load <4 x float>, ptr %a
+ store <4 x float> %t1, ptr %b, align 32
+ ret void
+}
+
+define void @test_ldst_v8f32(ptr %a, ptr %b) #0 {
+ %t1 = load <8 x float>, ptr %a
+ store <8 x float> %t1, ptr %b, align 32
+ ret void
+}
+
+declare <2 x float> @test_callee(<2 x float> %a, <2 x float> %b) #0
+
+define <2 x float> @test_call(<2 x float> %a, <2 x float> %b) #0 {
+ %r = call <2 x float> @test_callee(<2 x float> %a, <2 x float> %b)
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_call_flipped(<2 x float> %a, <2 x float> %b) #0 {
+ %r = call <2 x float> @test_callee(<2 x float> %b, <2 x float> %a)
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_tailcall_flipped(<2 x float> %a, <2 x float> %b) #0 {
+ %r = tail call <2 x float> @test_callee(<2 x float> %b, <2 x float> %a)
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_select(<2 x float> %a, <2 x float> %b, i1 zeroext %c) #0 {
+ %r = select i1 %c, <2 x float> %a, <2 x float> %b
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_select_cc(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d) #0 {
+ %cc = fcmp une <2 x float> %c, %d
+ %r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
+ ret <2 x float> %r
+}
+
+define <2 x double> @test_select_cc_f64_f32(<2 x double> %a, <2 x double> %b, <2 x float> %c, <2 x float> %d) #0 {
+ %cc = fcmp une <2 x float> %c, %d
+ %r = select <2 x i1> %cc, <2 x double> %a, <2 x double> %b
+ ret <2 x double> %r
+}
+
+define <2 x float> @test_select_cc_f32_f64(<2 x float> %a, <2 x float> %b, <2 x double> %c, <2 x double> %d) #0 {
+ %cc = fcmp une <2 x double> %c, %d
+ %r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
+ ret <2 x float> %r
+}
+
+define <2 x i1> @test_fcmp_une(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp une <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ueq(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp ueq <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ugt(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp ugt <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_uge(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp uge <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ult(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp ult <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ule(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp ule <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_uno(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp uno <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_one(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp one <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_oeq(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp oeq <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ogt(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp ogt <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_oge(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp oge <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_olt(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp olt <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ole(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp ole <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ord(<2 x float> %a, <2 x float> %b) #0 {
+ %r = fcmp ord <2 x float> %a, %b
+ ret <2 x i1> %r
+}
+
+define <2 x i32> @test_fptosi_i32(<2 x float> %a) #0 {
+ %r = fptosi <2 x float> %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+define <2 x i64> @test_fptosi_i64(<2 x float> %a) #0 {
+ %r = fptosi <2 x float> %a to <2 x i64>
+ ret <2 x i64> %r
+}
+
+define <2 x i32> @test_fptoui_2xi32(<2 x float> %a) #0 {
+ %r = fptoui <2 x float> %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+define <2 x i64> @test_fptoui_2xi64(<2 x float> %a) #0 {
+ %r = fptoui <2 x float> %a to <2 x i64>
+ ret <2 x i64> %r
+}
+
+define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 {
+ %r = uitofp <2 x i32> %a to <2 x float>
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 {
+ %r = uitofp <2 x i64> %a to <2 x float>
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 {
+ %r = sitofp <2 x i32> %a to <2 x float>
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_sitofp_2xi64(<2 x i64> %a) #0 {
+ %r = sitofp <2 x i64> %a to <2 x float>
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x float> %b) #0 {
+ %c = uitofp <2 x i32> %a to <2 x float>
+ %r = fadd <2 x float> %b, %c
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
+ %r = fptrunc <2 x double> %a to <2 x float>
+ ret <2 x float> %r
+}
+
+define <2 x double> @test_fpext_2xdouble(<2 x float> %a) #0 {
+ %r = fpext <2 x float> %a to <2 x double>
+ ret <2 x double> %r
+}
+
+define <2 x i32> @test_bitcast_2xfloat_to_2xi32(<2 x float> %a) #0 {
+ %r = bitcast <2 x float> %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+define <2 x float> @test_bitcast_2xi32_to_2xfloat(<2 x i32> %a) #0 {
+ %r = bitcast <2 x i32> %a to <2 x float>
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_bitcast_double_to_2xfloat(double %a) #0 {
+ %r = bitcast double %a to <2 x float>
+ ret <2 x float> %r
+}
+
+define double @test_bitcast_2xfloat_to_double(<2 x float> %a) #0 {
+ %r = bitcast <2 x float> %a to double
+ ret double %r
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { "unsafe-fp-math" = "true" }
+attributes #2 = { "denormal-fp-math"="preserve-sign" }
>From f6b151dff2d4eb8cfad8a27aedc24078afb9c344 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Sat, 8 Feb 2025 16:27:33 -0800
Subject: [PATCH 02/32] support fadd, fsub, fmul, fma and load on v2f32
---
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 45ffc95b0b6eb..17fd047ef6337 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -235,6 +235,7 @@ def F64RT : RegTyInfo<f64, Float64Regs, f64imm, fpimm>;
def F16RT : RegTyInfo<f16, Int16Regs, f16imm, fpimm, supports_imm = 0>;
def BF16RT : RegTyInfo<bf16, Int16Regs, bf16imm, fpimm, supports_imm = 0>;
+def F32X2RT : RegTyInfo<v2f32, Int64Regs, ?, ?, supports_imm = 0>;
def F16X2RT : RegTyInfo<v2f16, Int32Regs, ?, ?, supports_imm = 0>;
def BF16X2RT : RegTyInfo<v2bf16, Int32Regs, ?, ?, supports_imm = 0>;
@@ -446,7 +447,18 @@ multiclass F3<string op_str, SDPatternOperator op_pat> {
(ins Float32Regs:$a, f32imm:$b),
op_str # ".f32 \t$dst, $a, $b;",
[(set f32:$dst, (op_pat f32:$a, fpimm:$b))]>;
-
+ def f32x2rr_ftz :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b),
+ op_str # ".ftz.f32x2 \t$dst, $a, $b;",
+ [(set v2f32:$dst, (op_pat v2f32:$a, v2f32:$b))]>,
+ Requires<[doF32FTZ, hasF32x2Instructions]>;
+ def f32x2rr :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b),
+ op_str # ".f32x2 \t$dst, $a, $b;",
+ [(set v2f32:$dst, (op_pat v2f32:$a, v2f32:$b))]>,
+ Requires<[hasF32x2Instructions]>;
def f16rr_ftz :
NVPTXInst<(outs Int16Regs:$dst),
(ins Int16Regs:$a, Int16Regs:$b),
@@ -478,7 +490,6 @@ multiclass F3<string op_str, SDPatternOperator op_pat> {
op_str # ".bf16 \t$dst, $a, $b;",
[(set bf16:$dst, (op_pat bf16:$a, bf16:$b))]>,
Requires<[hasBF16Math]>;
-
def bf16x2rr :
NVPTXInst<(outs Int32Regs:$dst),
(ins Int32Regs:$a, Int32Regs:$b),
@@ -1416,6 +1427,8 @@ defm BFMA16 : FMA<"fma.rn.bf16", BF16RT, [hasBF16Math]>;
defm BFMA16x2 : FMA<"fma.rn.bf16x2", BF16X2RT, [hasBF16Math]>;
defm FMA32_ftz : FMA<"fma.rn.ftz.f32", F32RT, [doF32FTZ]>;
defm FMA32 : FMA<"fma.rn.f32", F32RT>;
+defm FMA32x2_ftz : FMA<"fma.rn.ftz.f32x2", F32X2RT, [doF32FTZ]>;
+defm FMA32x2 : FMA<"fma.rn.f32x2", F32X2RT>;
defm FMA64 : FMA<"fma.rn.f64", F64RT>;
// sin/cos
>From d0621569cd46c41f998d6fbc2905cbccfa23dfd6 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Sat, 8 Feb 2025 16:49:15 -0800
Subject: [PATCH 03/32] set proxyreg for v2f32 = bitcast i64
---
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 2 ++
1 file changed, 2 insertions(+)
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 17fd047ef6337..5b9446d66f232 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -2452,6 +2452,8 @@ foreach vt = [v2f16, v2bf16, v2i16, v4i8] in {
def: Pat<(vt (ProxyReg vt:$src)), (ProxyRegI32 $src)>;
}
+def: Pat<(v2f32 (bitconvert i64:$src)), (ProxyRegI64 $src)>;
+
//
// Load / Store Handling
//
>From 9d4a12dc00a7db40c3ef835066b8e8073dc83f7e Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Sat, 8 Feb 2025 17:21:43 -0800
Subject: [PATCH 04/32] handle fdiv and other instructions where v2f32 is
illegal
Requires us to lower EXTRACT_VECTOR_ELT as well.
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 1 +
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 14 ++++++++++++++
2 files changed, 15 insertions(+)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 3279b33dfeb03..5722895a895d2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -965,6 +965,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
{ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS}) {
setOperationAction(Op, MVT::f16, Promote);
setOperationAction(Op, MVT::f32, Legal);
+ setOperationAction(Op, MVT::v2f32, Expand);
setOperationAction(Op, MVT::f64, Legal);
setOperationAction(Op, MVT::v2f16, Expand);
setOperationAction(Op, MVT::v2bf16, Expand);
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 5b9446d66f232..a13e421e429b9 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -2912,6 +2912,14 @@ let hasSideEffects = false in {
(ins Int64Regs:$s),
"{{ .reg .b32 tmp; mov.b64 {$low, tmp}, $s; }}",
[]>;
+ def I64toF32H : NVPTXInst<(outs Float32Regs:$high),
+ (ins Int64Regs:$s),
+ "{{ .reg .b32 tmp; mov.b64 {tmp, $high}, $s; }}",
+ []>;
+ def I64toF32L : NVPTXInst<(outs Float32Regs:$low),
+ (ins Int64Regs:$s),
+ "{{ .reg .b32 tmp; mov.b64 {$low, tmp}, $s; }}",
+ []>;
// PTX 7.1 lets you avoid a temp register and just use _ as a "sink" for the
// unused high/low part.
@@ -2954,6 +2962,12 @@ foreach vt = [v2f16, v2bf16, v2i16] in {
def : Pat<(extractelt vt:$src, 0), (I32toI16L $src)>;
def : Pat<(extractelt vt:$src, 1), (I32toI16H $src)>;
}
+
+def : Pat<(extractelt v2f32:$src, 0),
+ (I64toF32L $src)>;
+def : Pat<(extractelt v2f32:$src, 1),
+ (I64toF32H $src)>;
+
def : Pat<(v2f16 (build_vector f16:$a, f16:$b)),
(V2I16toI32 $a, $b)>;
def : Pat<(v2bf16 (build_vector bf16:$a, bf16:$b)),
>From f5721035d420a9d59acf017750bce5d8a1ef9292 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Sat, 8 Feb 2025 17:58:19 -0800
Subject: [PATCH 05/32] ProxyReg v2f32 -> ProxyRegI64
---
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 2 ++
1 file changed, 2 insertions(+)
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index a13e421e429b9..ef21412596e0b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -2452,6 +2452,8 @@ foreach vt = [v2f16, v2bf16, v2i16, v4i8] in {
def: Pat<(vt (ProxyReg vt:$src)), (ProxyRegI32 $src)>;
}
+def: Pat<(v2f32 (ProxyReg v2f32:$src)), (ProxyRegI64 $src)>;
+
def: Pat<(v2f32 (bitconvert i64:$src)), (ProxyRegI64 $src)>;
//
>From cc603d810a36ea19871cf8e62a2012ccc7806fa1 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Sat, 8 Feb 2025 18:15:52 -0800
Subject: [PATCH 06/32] support select v2f32
---
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 3 +++
1 file changed, 3 insertions(+)
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index ef21412596e0b..da0b7109e03a5 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -868,6 +868,9 @@ def : Pat<(vt (select i1:$p, vt:$a, vt:$b)),
(SELP_b32rr $a, $b, $p)>;
}
+def : Pat<(v2f32 (select i1:$p, v2f32:$a, v2f32:$b)),
+ (SELP_b64rr $a, $b, $p)>;
+
//-----------------------------------
// Test Instructions
//-----------------------------------
>From 6a4af7d96fc3f165f281e41719156ee2ff3f6b5b Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Sat, 8 Feb 2025 18:22:15 -0800
Subject: [PATCH 07/32] support v2f32 = bitconvert f64
---
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 2 ++
1 file changed, 2 insertions(+)
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index da0b7109e03a5..7b53c099d7e98 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -2587,6 +2587,8 @@ def: Pat<(vt (bitconvert (f32 Float32Regs:$a))),
def: Pat<(f32 (bitconvert vt:$a)),
(BITCONVERT_32_I2F $a)>;
}
+def: Pat<(v2f32 (bitconvert (f64 Float64Regs:$a))),
+ (BITCONVERT_64_F2I $a)>;
foreach vt = [f16, bf16] in {
def: Pat<(vt (bitconvert i16:$a)),
(vt Int16Regs:$a)>;
>From 5479c5a0d10b57da2404e3fb48ea7bb913960496 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Sat, 8 Feb 2025 18:40:20 -0800
Subject: [PATCH 08/32] support extract_vector_elt with dynamic indices
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 5 ++++-
llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 21 ++++---------------
2 files changed, 8 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 5722895a895d2..9170a73ff65af 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -652,6 +652,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f32, Custom);
+
// Custom conversions to/from v2i8.
setOperationAction(ISD::BITCAST, MVT::v2i8, Custom);
@@ -2318,7 +2320,8 @@ SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
return Op;
// Extract individual elements and select one of them.
- assert(Isv2x16VT(VectorVT) && "Unexpected vector type.");
+ assert((Isv2x16VT(VectorVT) || VectorVT == MVT::v2f32) &&
+ "Unexpected vector type.");
EVT EltVT = VectorVT.getVectorElementType();
SDLoc dl(Op.getNode());
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
index f449fefe5763e..37833a7b9fca5 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -37,23 +37,10 @@ define float @test_extract_1(<2 x float> %a) #0 {
; NOTE: disabled as -O3 miscompiles this into pointer arithmetic on
; test_extract_i_param_0 where the symbol's address is not taken first (that
; is, moved to a temporary)
-; define float @test_extract_i(<2 x float> %a, i64 %idx) #0 {
-; ; CHECK-LABEL: test_extract_i(
-; ; CHECK: {
-; ; CHECK-NEXT: .reg .pred %p<2>;
-; ; CHECK-NEXT: .reg .f32 %f<4>;
-; ; CHECK-NEXT: .reg .b64 %rd<2>;
-; ; CHECK-EMPTY:
-; ; CHECK-NEXT: // %bb.0:
-; ; CHECK-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_extract_i_param_0];
-; ; CHECK-NEXT: ld.param.u64 %rd1, [test_extract_i_param_1];
-; ; CHECK-NEXT: setp.eq.s64 %p1, %rd1, 0;
-; ; CHECK-NEXT: selp.f32 %f3, %f1, %f2, %p1;
-; ; CHECK-NEXT: st.param.f32 [func_retval0], %f3;
-; ; CHECK-NEXT: ret;
-; %e = extractelement <2 x float> %a, i64 %idx
-; ret float %e
-; }
+define float @test_extract_i(<2 x float> %a, i64 %idx) #0 {
+ %e = extractelement <2 x float> %a, i64 %idx
+ ret float %e
+}
define <2 x float> @test_fadd(<2 x float> %a, <2 x float> %b) #0 {
%r = fadd <2 x float> %a, %b
>From 0f3fdc2b6f1a26cce8fb0f98f1ccd29e3eb6fe2c Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Sat, 8 Feb 2025 20:24:54 -0800
Subject: [PATCH 09/32] promote extract_vector_elt nodes to unpacking mov
Also update the test cases.
---
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 21 +-
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 4 +-
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 3 +
llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 2078 ++++++++++++++++-
4 files changed, 2093 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index b5263121aa28c..b4b2eca000097 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -468,10 +468,14 @@ bool NVPTXDAGToDAGISel::tryUNPACK_VECTOR(SDNode *N) {
bool NVPTXDAGToDAGISel::tryEXTRACT_VECTOR_ELEMENT(SDNode *N) {
SDValue Vector = N->getOperand(0);
- // We only care about 16x2 as it's the only real vector type we
- // need to deal with.
+ // We only care about packed vector types: 16x2 and 32x2.
MVT VT = Vector.getSimpleValueType();
- if (!Isv2x16VT(VT))
+ unsigned NewOpcode;
+ if (Isv2x16VT(VT))
+ NewOpcode = NVPTX::I32toV2I16;
+ else if (VT == MVT::v2f32)
+ NewOpcode = NVPTX::I64toV2F32;
+ else
return false;
// Find and record all uses of this vector that extract element 0 or 1.
SmallVector<SDNode *, 4> E0, E1;
@@ -491,16 +495,19 @@ bool NVPTXDAGToDAGISel::tryEXTRACT_VECTOR_ELEMENT(SDNode *N) {
}
}
- // There's no point scattering f16x2 if we only ever access one
+ // There's no point scattering f16x2 or f32x2 if we only ever access one
// element of it.
if (E0.empty() || E1.empty())
return false;
- // Merge (f16 extractelt(V, 0), f16 extractelt(V,1))
- // into f16,f16 SplitF16x2(V)
+ // Merge:
+ // (f16 extractelt(V, 0), f16 extractelt(V,1))
+ // -> f16,f16 SplitF16x2(V)
+ // (f32 extractelt(V, 0), f32 extractelt(V,1))
+ // -> f32,f32 SplitF32x2(V)
MVT EltVT = VT.getVectorElementType();
SDNode *ScatterOp =
- CurDAG->getMachineNode(NVPTX::I32toV2I16, SDLoc(N), EltVT, EltVT, Vector);
+ CurDAG->getMachineNode(NewOpcode, SDLoc(N), EltVT, EltVT, Vector);
for (auto *Node : E0)
ReplaceUses(SDValue(Node, 0), SDValue(ScatterOp, 0));
for (auto *Node : E1)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 9170a73ff65af..e409f03bc617b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5665,10 +5665,10 @@ static SDValue PerformEXTRACTCombine(SDNode *N,
IsPTXVectorType(VectorVT.getSimpleVT()))
return SDValue(); // Native vector loads already combine nicely w/
// extract_vector_elt.
- // Don't mess with singletons or v2*16, v4i8 and v8i8 types, we already
+ // Don't mess with singletons or v2*16, v2f32, v4i8 and v8i8 types, we already
// handle them OK.
if (VectorVT.getVectorNumElements() == 1 || Isv2x16VT(VectorVT) ||
- VectorVT == MVT::v4i8 || VectorVT == MVT::v8i8)
+ VectorVT == MVT::v2f32 || VectorVT == MVT::v4i8 || VectorVT == MVT::v8i8)
return SDValue();
// Don't mess with undef values as sra may be simplified to 0, not undef.
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 7b53c099d7e98..372e029fe0fa8 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -2896,6 +2896,9 @@ let hasSideEffects = false in {
def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
(ins Int64Regs:$s),
"mov.b64 \t{{$d1, $d2}}, $s;", []>;
+ def I64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
+ (ins Int64Regs:$s),
+ "mov.b64 \t{{$d1, $d2}}, $s;", []>;
def I128toV2I64: NVPTXInst<(outs Int64Regs:$d1, Int64Regs:$d2),
(ins Int128Regs:$s),
"mov.b128 \t{{$d1, $d2}}, $s;", []>;
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
index 37833a7b9fca5..97cde07ed2003 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -21,15 +21,76 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "nvptx64-nvidia-cuda"
define <2 x float> @test_ret_const() #0 {
+; CHECK-O0-LABEL: test_ret_const(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<2>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O0-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O0-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_ret_const(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<2>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O3-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O3-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-O3-NEXT: ret;
ret <2 x float> <float 1.0, float 2.0>
}
define float @test_extract_0(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_extract_0(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<2>;
+; CHECK-O0-NEXT: .reg .b64 %rd<2>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_extract_0_param_0];
+; CHECK-O0-NEXT: { .reg .b32 tmp; mov.b64 {%f1, tmp}, %rd1; }
+; CHECK-O0-NEXT: st.param.f32 [func_retval0], %f1;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_extract_0(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<2>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f32 %f1, [test_extract_0_param_0];
+; CHECK-O3-NEXT: st.param.f32 [func_retval0], %f1;
+; CHECK-O3-NEXT: ret;
%e = extractelement <2 x float> %a, i32 0
ret float %e
}
define float @test_extract_1(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_extract_1(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<2>;
+; CHECK-O0-NEXT: .reg .b64 %rd<2>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_extract_1_param_0];
+; CHECK-O0-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %f1}, %rd1; }
+; CHECK-O0-NEXT: st.param.f32 [func_retval0], %f1;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_extract_1(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<2>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f32 %f1, [test_extract_1_param_0+4];
+; CHECK-O3-NEXT: st.param.f32 [func_retval0], %f1;
+; CHECK-O3-NEXT: ret;
%e = extractelement <2 x float> %a, i32 1
ret float %e
}
@@ -37,150 +98,936 @@ define float @test_extract_1(<2 x float> %a) #0 {
; NOTE: disabled as -O3 miscompiles this into pointer arithmetic on
; test_extract_i_param_0 where the symbol's address is not taken first (that
; is, moved to a temporary)
-define float @test_extract_i(<2 x float> %a, i64 %idx) #0 {
- %e = extractelement <2 x float> %a, i64 %idx
- ret float %e
-}
+; define float @test_extract_i(<2 x float> %a, i64 %idx) #0 {
+; %e = extractelement <2 x float> %a, i64 %idx
+; ret float %e
+; }
define <2 x float> @test_fadd(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fadd(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fadd_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_param_0];
+; CHECK-O0-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fadd_param_0];
+; CHECK-O3-NEXT: add.rn.f32x2 %rd3, %rd2, %rd1;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fadd <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fadd_imm_0(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_fadd_imm_0(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_imm_0_param_0];
+; CHECK-O0-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O0-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O0-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-O0-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd_imm_0(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_imm_0_param_0];
+; CHECK-O3-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O3-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O3-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-O3-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fadd <2 x float> <float 1.0, float 2.0>, %a
ret <2 x float> %r
}
define <2 x float> @test_fadd_imm_1(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_fadd_imm_1(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_imm_1_param_0];
+; CHECK-O0-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O0-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O0-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-O0-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd_imm_1(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_imm_1_param_0];
+; CHECK-O3-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O3-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O3-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-O3-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fadd <2 x float> %a, <float 1.0, float 2.0>
ret <2 x float> %r
}
define <4 x float> @test_fadd_v4(<4 x float> %a, <4 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fadd_v4(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<11>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u64 {%rd5, %rd6}, [test_fadd_v4_param_1];
+; CHECK-O0-NEXT: ld.param.v2.u64 {%rd7, %rd8}, [test_fadd_v4_param_0];
+; CHECK-O0-NEXT: add.rn.f32x2 %rd9, %rd8, %rd6;
+; CHECK-O0-NEXT: add.rn.f32x2 %rd10, %rd7, %rd5;
+; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd_v4(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<11>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_v4_param_1];
+; CHECK-O3-NEXT: ld.param.v2.u64 {%rd4, %rd5}, [test_fadd_v4_param_0];
+; CHECK-O3-NEXT: add.rn.f32x2 %rd9, %rd5, %rd2;
+; CHECK-O3-NEXT: add.rn.f32x2 %rd10, %rd4, %rd1;
+; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
+; CHECK-O3-NEXT: ret;
%r = fadd <4 x float> %a, %b
ret <4 x float> %r
}
define <4 x float> @test_fadd_imm_0_v4(<4 x float> %a) #0 {
+; CHECK-O0-LABEL: test_fadd_imm_0_v4(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<9>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_0_v4_param_0];
+; CHECK-O0-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-O0-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-O0-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-O0-NEXT: add.rn.f32x2 %rd6, %rd4, %rd5;
+; CHECK-O0-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-O0-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-O0-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-O0-NEXT: add.rn.f32x2 %rd8, %rd3, %rd7;
+; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd_imm_0_v4(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<9>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_imm_0_v4_param_0];
+; CHECK-O3-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-O3-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-O3-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-O3-NEXT: add.rn.f32x2 %rd6, %rd2, %rd5;
+; CHECK-O3-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-O3-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-O3-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-O3-NEXT: add.rn.f32x2 %rd8, %rd1, %rd7;
+; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-O3-NEXT: ret;
%r = fadd <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %a
ret <4 x float> %r
}
define <4 x float> @test_fadd_imm_1_v4(<4 x float> %a) #0 {
+; CHECK-O0-LABEL: test_fadd_imm_1_v4(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<9>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_1_v4_param_0];
+; CHECK-O0-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-O0-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-O0-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-O0-NEXT: add.rn.f32x2 %rd6, %rd4, %rd5;
+; CHECK-O0-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-O0-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-O0-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-O0-NEXT: add.rn.f32x2 %rd8, %rd3, %rd7;
+; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd_imm_1_v4(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<9>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_imm_1_v4_param_0];
+; CHECK-O3-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-O3-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-O3-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-O3-NEXT: add.rn.f32x2 %rd6, %rd2, %rd5;
+; CHECK-O3-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-O3-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-O3-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-O3-NEXT: add.rn.f32x2 %rd8, %rd1, %rd7;
+; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-O3-NEXT: ret;
%r = fadd <4 x float> %a, <float 1.0, float 2.0, float 3.0, float 4.0>
ret <4 x float> %r
}
define <2 x float> @test_fsub(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fsub(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fsub_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fsub_param_0];
+; CHECK-O0-NEXT: sub.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fsub(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fsub_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fsub_param_0];
+; CHECK-O3-NEXT: sub.rn.f32x2 %rd3, %rd2, %rd1;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fsub <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fneg(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_fneg(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<2>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fneg_param_0];
+; CHECK-O0-NEXT: mov.f32 %f1, 0f00000000;
+; CHECK-O0-NEXT: mov.b64 %rd2, {%f1, %f1};
+; CHECK-O0-NEXT: sub.rn.f32x2 %rd3, %rd2, %rd1;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fneg(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<2>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fneg_param_0];
+; CHECK-O3-NEXT: mov.f32 %f1, 0f00000000;
+; CHECK-O3-NEXT: mov.b64 %rd2, {%f1, %f1};
+; CHECK-O3-NEXT: sub.rn.f32x2 %rd3, %rd2, %rd1;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fsub <2 x float> <float 0.0, float 0.0>, %a
ret <2 x float> %r
}
define <2 x float> @test_fmul(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fmul(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fmul_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fmul_param_0];
+; CHECK-O0-NEXT: mul.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fmul(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fmul_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fmul_param_0];
+; CHECK-O3-NEXT: mul.rn.f32x2 %rd3, %rd2, %rd1;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fmul <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fma(<2 x float> %a, <2 x float> %b, <2 x float> %c) #0 {
+; CHECK-O0-LABEL: test_fma(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<5>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd3, [test_fma_param_2];
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fma_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fma_param_0];
+; CHECK-O0-NEXT: fma.rn.f32x2 %rd4, %rd1, %rd2, %rd3;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd4;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fma(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<5>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fma_param_2];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fma_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd3, [test_fma_param_0];
+; CHECK-O3-NEXT: fma.rn.f32x2 %rd4, %rd3, %rd2, %rd1;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd4;
+; CHECK-O3-NEXT: ret;
%r = call <2 x float> @llvm.fma(<2 x float> %a, <2 x float> %b, <2 x float> %c)
ret <2 x float> %r
}
define <2 x float> @test_fdiv(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fdiv(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<7>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fdiv_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fdiv_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: div.rn.f32 %f5, %f4, %f2;
+; CHECK-O0-NEXT: div.rn.f32 %f6, %f3, %f1;
+; CHECK-O0-NEXT: mov.b64 %rd3, {%f6, %f5};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fdiv(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<7>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fdiv_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fdiv_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: div.rn.f32 %f5, %f4, %f2;
+; CHECK-O3-NEXT: div.rn.f32 %f6, %f3, %f1;
+; CHECK-O3-NEXT: mov.b64 %rd3, {%f6, %f5};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fdiv <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_frem(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_frem(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<15>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_frem_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_frem_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: div.rn.f32 %f5, %f4, %f2;
+; CHECK-O0-NEXT: cvt.rzi.f32.f32 %f6, %f5;
+; CHECK-O0-NEXT: mul.f32 %f7, %f6, %f2;
+; CHECK-O0-NEXT: sub.f32 %f8, %f4, %f7;
+; CHECK-O0-NEXT: testp.infinite.f32 %p1, %f2;
+; CHECK-O0-NEXT: selp.f32 %f9, %f4, %f8, %p1;
+; CHECK-O0-NEXT: div.rn.f32 %f10, %f3, %f1;
+; CHECK-O0-NEXT: cvt.rzi.f32.f32 %f11, %f10;
+; CHECK-O0-NEXT: mul.f32 %f12, %f11, %f1;
+; CHECK-O0-NEXT: sub.f32 %f13, %f3, %f12;
+; CHECK-O0-NEXT: testp.infinite.f32 %p2, %f1;
+; CHECK-O0-NEXT: selp.f32 %f14, %f3, %f13, %p2;
+; CHECK-O0-NEXT: mov.b64 %rd3, {%f14, %f9};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_frem(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<15>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_frem_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_frem_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: div.rn.f32 %f5, %f4, %f2;
+; CHECK-O3-NEXT: cvt.rzi.f32.f32 %f6, %f5;
+; CHECK-O3-NEXT: mul.f32 %f7, %f6, %f2;
+; CHECK-O3-NEXT: sub.f32 %f8, %f4, %f7;
+; CHECK-O3-NEXT: testp.infinite.f32 %p1, %f2;
+; CHECK-O3-NEXT: selp.f32 %f9, %f4, %f8, %p1;
+; CHECK-O3-NEXT: div.rn.f32 %f10, %f3, %f1;
+; CHECK-O3-NEXT: cvt.rzi.f32.f32 %f11, %f10;
+; CHECK-O3-NEXT: mul.f32 %f12, %f11, %f1;
+; CHECK-O3-NEXT: sub.f32 %f13, %f3, %f12;
+; CHECK-O3-NEXT: testp.infinite.f32 %p2, %f1;
+; CHECK-O3-NEXT: selp.f32 %f14, %f3, %f13, %p2;
+; CHECK-O3-NEXT: mov.b64 %rd3, {%f14, %f9};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = frem <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fadd_ftz(<2 x float> %a, <2 x float> %b) #2 {
+; CHECK-O0-LABEL: test_fadd_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fadd_ftz_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_ftz_param_0];
+; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_ftz_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fadd_ftz_param_0];
+; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd3, %rd2, %rd1;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fadd <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fadd_imm_0_ftz(<2 x float> %a) #2 {
+; CHECK-O0-LABEL: test_fadd_imm_0_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_imm_0_ftz_param_0];
+; CHECK-O0-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O0-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O0-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd_imm_0_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_imm_0_ftz_param_0];
+; CHECK-O3-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O3-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O3-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fadd <2 x float> <float 1.0, float 2.0>, %a
ret <2 x float> %r
}
define <2 x float> @test_fadd_imm_1_ftz(<2 x float> %a) #2 {
+; CHECK-O0-LABEL: test_fadd_imm_1_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_imm_1_ftz_param_0];
+; CHECK-O0-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O0-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O0-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd_imm_1_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_imm_1_ftz_param_0];
+; CHECK-O3-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O3-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O3-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fadd <2 x float> %a, <float 1.0, float 2.0>
ret <2 x float> %r
}
define <4 x float> @test_fadd_v4_ftz(<4 x float> %a, <4 x float> %b) #2 {
+; CHECK-O0-LABEL: test_fadd_v4_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<11>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u64 {%rd5, %rd6}, [test_fadd_v4_ftz_param_1];
+; CHECK-O0-NEXT: ld.param.v2.u64 {%rd7, %rd8}, [test_fadd_v4_ftz_param_0];
+; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd9, %rd8, %rd6;
+; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd10, %rd7, %rd5;
+; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd_v4_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<11>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_v4_ftz_param_1];
+; CHECK-O3-NEXT: ld.param.v2.u64 {%rd4, %rd5}, [test_fadd_v4_ftz_param_0];
+; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd9, %rd5, %rd2;
+; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd10, %rd4, %rd1;
+; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
+; CHECK-O3-NEXT: ret;
%r = fadd <4 x float> %a, %b
ret <4 x float> %r
}
define <4 x float> @test_fadd_imm_0_v4_ftz(<4 x float> %a) #2 {
+; CHECK-O0-LABEL: test_fadd_imm_0_v4_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<9>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_0_v4_ftz_param_0];
+; CHECK-O0-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-O0-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-O0-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd6, %rd4, %rd5;
+; CHECK-O0-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-O0-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-O0-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd8, %rd3, %rd7;
+; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd_imm_0_v4_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<9>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_imm_0_v4_ftz_param_0];
+; CHECK-O3-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-O3-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-O3-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd6, %rd2, %rd5;
+; CHECK-O3-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-O3-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-O3-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd8, %rd1, %rd7;
+; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-O3-NEXT: ret;
%r = fadd <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %a
ret <4 x float> %r
}
define <4 x float> @test_fadd_imm_1_v4_ftz(<4 x float> %a) #2 {
+; CHECK-O0-LABEL: test_fadd_imm_1_v4_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<9>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_1_v4_ftz_param_0];
+; CHECK-O0-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-O0-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-O0-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd6, %rd4, %rd5;
+; CHECK-O0-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-O0-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-O0-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd8, %rd3, %rd7;
+; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd_imm_1_v4_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<9>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_imm_1_v4_ftz_param_0];
+; CHECK-O3-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-O3-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-O3-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd6, %rd2, %rd5;
+; CHECK-O3-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-O3-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-O3-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd8, %rd1, %rd7;
+; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-O3-NEXT: ret;
%r = fadd <4 x float> %a, <float 1.0, float 2.0, float 3.0, float 4.0>
ret <4 x float> %r
}
define <2 x float> @test_fsub_ftz(<2 x float> %a, <2 x float> %b) #2 {
+; CHECK-O0-LABEL: test_fsub_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fsub_ftz_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fsub_ftz_param_0];
+; CHECK-O0-NEXT: sub.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fsub_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fsub_ftz_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fsub_ftz_param_0];
+; CHECK-O3-NEXT: sub.rn.ftz.f32x2 %rd3, %rd2, %rd1;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fsub <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fneg_ftz(<2 x float> %a) #2 {
+; CHECK-O0-LABEL: test_fneg_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<2>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fneg_ftz_param_0];
+; CHECK-O0-NEXT: mov.f32 %f1, 0f00000000;
+; CHECK-O0-NEXT: mov.b64 %rd2, {%f1, %f1};
+; CHECK-O0-NEXT: sub.rn.ftz.f32x2 %rd3, %rd2, %rd1;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fneg_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<2>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fneg_ftz_param_0];
+; CHECK-O3-NEXT: mov.f32 %f1, 0f00000000;
+; CHECK-O3-NEXT: mov.b64 %rd2, {%f1, %f1};
+; CHECK-O3-NEXT: sub.rn.ftz.f32x2 %rd3, %rd2, %rd1;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fsub <2 x float> <float 0.0, float 0.0>, %a
ret <2 x float> %r
}
define <2 x float> @test_fmul_ftz(<2 x float> %a, <2 x float> %b) #2 {
+; CHECK-O0-LABEL: test_fmul_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fmul_ftz_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fmul_ftz_param_0];
+; CHECK-O0-NEXT: mul.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fmul_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fmul_ftz_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fmul_ftz_param_0];
+; CHECK-O3-NEXT: mul.rn.ftz.f32x2 %rd3, %rd2, %rd1;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fmul <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fma_ftz(<2 x float> %a, <2 x float> %b, <2 x float> %c) #2 {
+; CHECK-O0-LABEL: test_fma_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<5>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd3, [test_fma_ftz_param_2];
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fma_ftz_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fma_ftz_param_0];
+; CHECK-O0-NEXT: fma.rn.ftz.f32x2 %rd4, %rd1, %rd2, %rd3;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd4;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fma_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<5>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fma_ftz_param_2];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fma_ftz_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd3, [test_fma_ftz_param_0];
+; CHECK-O3-NEXT: fma.rn.ftz.f32x2 %rd4, %rd3, %rd2, %rd1;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd4;
+; CHECK-O3-NEXT: ret;
%r = call <2 x float> @llvm.fma(<2 x float> %a, <2 x float> %b, <2 x float> %c)
ret <2 x float> %r
}
define <2 x float> @test_fdiv_ftz(<2 x float> %a, <2 x float> %b) #2 {
+; CHECK-O0-LABEL: test_fdiv_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<7>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fdiv_ftz_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fdiv_ftz_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
+; CHECK-O0-NEXT: div.rn.ftz.f32 %f6, %f3, %f1;
+; CHECK-O0-NEXT: mov.b64 %rd3, {%f6, %f5};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fdiv_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<7>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fdiv_ftz_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fdiv_ftz_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
+; CHECK-O3-NEXT: div.rn.ftz.f32 %f6, %f3, %f1;
+; CHECK-O3-NEXT: mov.b64 %rd3, {%f6, %f5};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = fdiv <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 {
+; CHECK-O0-LABEL: test_frem_ftz(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<15>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_frem_ftz_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_frem_ftz_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
+; CHECK-O0-NEXT: cvt.rzi.ftz.f32.f32 %f6, %f5;
+; CHECK-O0-NEXT: mul.ftz.f32 %f7, %f6, %f2;
+; CHECK-O0-NEXT: sub.ftz.f32 %f8, %f4, %f7;
+; CHECK-O0-NEXT: testp.infinite.f32 %p1, %f2;
+; CHECK-O0-NEXT: selp.f32 %f9, %f4, %f8, %p1;
+; CHECK-O0-NEXT: div.rn.ftz.f32 %f10, %f3, %f1;
+; CHECK-O0-NEXT: cvt.rzi.ftz.f32.f32 %f11, %f10;
+; CHECK-O0-NEXT: mul.ftz.f32 %f12, %f11, %f1;
+; CHECK-O0-NEXT: sub.ftz.f32 %f13, %f3, %f12;
+; CHECK-O0-NEXT: testp.infinite.f32 %p2, %f1;
+; CHECK-O0-NEXT: selp.f32 %f14, %f3, %f13, %p2;
+; CHECK-O0-NEXT: mov.b64 %rd3, {%f14, %f9};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_frem_ftz(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<15>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_frem_ftz_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_frem_ftz_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
+; CHECK-O3-NEXT: cvt.rzi.ftz.f32.f32 %f6, %f5;
+; CHECK-O3-NEXT: mul.ftz.f32 %f7, %f6, %f2;
+; CHECK-O3-NEXT: sub.ftz.f32 %f8, %f4, %f7;
+; CHECK-O3-NEXT: testp.infinite.f32 %p1, %f2;
+; CHECK-O3-NEXT: selp.f32 %f9, %f4, %f8, %p1;
+; CHECK-O3-NEXT: div.rn.ftz.f32 %f10, %f3, %f1;
+; CHECK-O3-NEXT: cvt.rzi.ftz.f32.f32 %f11, %f10;
+; CHECK-O3-NEXT: mul.ftz.f32 %f12, %f11, %f1;
+; CHECK-O3-NEXT: sub.ftz.f32 %f13, %f3, %f12;
+; CHECK-O3-NEXT: testp.infinite.f32 %p2, %f1;
+; CHECK-O3-NEXT: selp.f32 %f14, %f3, %f13, %p2;
+; CHECK-O3-NEXT: mov.b64 %rd3, {%f14, %f9};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = frem <2 x float> %a, %b
ret <2 x float> %r
}
define void @test_ldst_v2f32(ptr %a, ptr %b) #0 {
+; CHECK-O0-LABEL: test_ldst_v2f32(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_ldst_v2f32_param_1];
+; CHECK-O0-NEXT: ld.param.u64 %rd1, [test_ldst_v2f32_param_0];
+; CHECK-O0-NEXT: ld.f64 %rd3, [%rd1];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd3;
+; CHECK-O0-NEXT: st.v2.f32 [%rd2], {%f1, %f2};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_ldst_v2f32(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.u64 %rd1, [test_ldst_v2f32_param_0];
+; CHECK-O3-NEXT: ld.f64 %rd2, [%rd1];
+; CHECK-O3-NEXT: ld.param.u64 %rd3, [test_ldst_v2f32_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: st.v2.f32 [%rd3], {%f1, %f2};
+; CHECK-O3-NEXT: ret;
%t1 = load <2 x float>, ptr %a
store <2 x float> %t1, ptr %b, align 32
ret void
}
define void @test_ldst_v3f32(ptr %a, ptr %b) #0 {
+; CHECK-O0-LABEL: test_ldst_v3f32(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<2>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_ldst_v3f32_param_1];
+; CHECK-O0-NEXT: ld.param.u64 %rd1, [test_ldst_v3f32_param_0];
+; CHECK-O0-NEXT: ld.u64 %rd3, [%rd1];
+; CHECK-O0-NEXT: ld.f32 %f1, [%rd1+8];
+; CHECK-O0-NEXT: st.f32 [%rd2+8], %f1;
+; CHECK-O0-NEXT: st.u64 [%rd2], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_ldst_v3f32(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<2>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.u64 %rd1, [test_ldst_v3f32_param_0];
+; CHECK-O3-NEXT: ld.u64 %rd2, [%rd1];
+; CHECK-O3-NEXT: ld.f32 %f1, [%rd1+8];
+; CHECK-O3-NEXT: ld.param.u64 %rd3, [test_ldst_v3f32_param_1];
+; CHECK-O3-NEXT: st.f32 [%rd3+8], %f1;
+; CHECK-O3-NEXT: st.u64 [%rd3], %rd2;
+; CHECK-O3-NEXT: ret;
%t1 = load <3 x float>, ptr %a
store <3 x float> %t1, ptr %b, align 32
ret void
}
define void @test_ldst_v4f32(ptr %a, ptr %b) #0 {
+; CHECK-O0-LABEL: test_ldst_v4f32(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_ldst_v4f32_param_1];
+; CHECK-O0-NEXT: ld.param.u64 %rd1, [test_ldst_v4f32_param_0];
+; CHECK-O0-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
+; CHECK-O0-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_ldst_v4f32(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.u64 %rd1, [test_ldst_v4f32_param_0];
+; CHECK-O3-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
+; CHECK-O3-NEXT: ld.param.u64 %rd2, [test_ldst_v4f32_param_1];
+; CHECK-O3-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
+; CHECK-O3-NEXT: ret;
%t1 = load <4 x float>, ptr %a
store <4 x float> %t1, ptr %b, align 32
ret void
}
define void @test_ldst_v8f32(ptr %a, ptr %b) #0 {
+; CHECK-O0-LABEL: test_ldst_v8f32(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<9>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_ldst_v8f32_param_1];
+; CHECK-O0-NEXT: ld.param.u64 %rd1, [test_ldst_v8f32_param_0];
+; CHECK-O0-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
+; CHECK-O0-NEXT: ld.v4.f32 {%f5, %f6, %f7, %f8}, [%rd1+16];
+; CHECK-O0-NEXT: st.v4.f32 [%rd2+16], {%f5, %f6, %f7, %f8};
+; CHECK-O0-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_ldst_v8f32(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<9>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.u64 %rd1, [test_ldst_v8f32_param_0];
+; CHECK-O3-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
+; CHECK-O3-NEXT: ld.v4.f32 {%f5, %f6, %f7, %f8}, [%rd1+16];
+; CHECK-O3-NEXT: ld.param.u64 %rd2, [test_ldst_v8f32_param_1];
+; CHECK-O3-NEXT: st.v4.f32 [%rd2+16], {%f5, %f6, %f7, %f8};
+; CHECK-O3-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
+; CHECK-O3-NEXT: ret;
%t1 = load <8 x float>, ptr %a
store <8 x float> %t1, ptr %b, align 32
ret void
@@ -189,185 +1036,1408 @@ define void @test_ldst_v8f32(ptr %a, ptr %b) #0 {
declare <2 x float> @test_callee(<2 x float> %a, <2 x float> %b) #0
define <2 x float> @test_call(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_call(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<5>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_call_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_call_param_0];
+; CHECK-O0-NEXT: { // callseq 0, 0
+; CHECK-O0-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-O0-NEXT: st.param.b64 [param0], %rd1;
+; CHECK-O0-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-O0-NEXT: st.param.b64 [param1], %rd2;
+; CHECK-O0-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-O0-NEXT: call.uni (retval0),
+; CHECK-O0-NEXT: test_callee,
+; CHECK-O0-NEXT: (
+; CHECK-O0-NEXT: param0,
+; CHECK-O0-NEXT: param1
+; CHECK-O0-NEXT: );
+; CHECK-O0-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-O0-NEXT: } // callseq 0
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_call(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<5>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_call_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_call_param_1];
+; CHECK-O3-NEXT: { // callseq 0, 0
+; CHECK-O3-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-O3-NEXT: st.param.b64 [param0], %rd1;
+; CHECK-O3-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-O3-NEXT: st.param.b64 [param1], %rd2;
+; CHECK-O3-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-O3-NEXT: call.uni (retval0),
+; CHECK-O3-NEXT: test_callee,
+; CHECK-O3-NEXT: (
+; CHECK-O3-NEXT: param0,
+; CHECK-O3-NEXT: param1
+; CHECK-O3-NEXT: );
+; CHECK-O3-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-O3-NEXT: } // callseq 0
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = call <2 x float> @test_callee(<2 x float> %a, <2 x float> %b)
ret <2 x float> %r
}
define <2 x float> @test_call_flipped(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_call_flipped(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<5>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_call_flipped_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_call_flipped_param_0];
+; CHECK-O0-NEXT: { // callseq 1, 0
+; CHECK-O0-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-O0-NEXT: st.param.b64 [param0], %rd2;
+; CHECK-O0-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-O0-NEXT: st.param.b64 [param1], %rd1;
+; CHECK-O0-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-O0-NEXT: call.uni (retval0),
+; CHECK-O0-NEXT: test_callee,
+; CHECK-O0-NEXT: (
+; CHECK-O0-NEXT: param0,
+; CHECK-O0-NEXT: param1
+; CHECK-O0-NEXT: );
+; CHECK-O0-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-O0-NEXT: } // callseq 1
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_call_flipped(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<5>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_call_flipped_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_call_flipped_param_0];
+; CHECK-O3-NEXT: { // callseq 1, 0
+; CHECK-O3-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-O3-NEXT: st.param.b64 [param0], %rd1;
+; CHECK-O3-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-O3-NEXT: st.param.b64 [param1], %rd2;
+; CHECK-O3-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-O3-NEXT: call.uni (retval0),
+; CHECK-O3-NEXT: test_callee,
+; CHECK-O3-NEXT: (
+; CHECK-O3-NEXT: param0,
+; CHECK-O3-NEXT: param1
+; CHECK-O3-NEXT: );
+; CHECK-O3-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-O3-NEXT: } // callseq 1
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = call <2 x float> @test_callee(<2 x float> %b, <2 x float> %a)
ret <2 x float> %r
}
define <2 x float> @test_tailcall_flipped(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_tailcall_flipped(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<5>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_tailcall_flipped_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_tailcall_flipped_param_0];
+; CHECK-O0-NEXT: { // callseq 2, 0
+; CHECK-O0-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-O0-NEXT: st.param.b64 [param0], %rd2;
+; CHECK-O0-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-O0-NEXT: st.param.b64 [param1], %rd1;
+; CHECK-O0-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-O0-NEXT: call.uni (retval0),
+; CHECK-O0-NEXT: test_callee,
+; CHECK-O0-NEXT: (
+; CHECK-O0-NEXT: param0,
+; CHECK-O0-NEXT: param1
+; CHECK-O0-NEXT: );
+; CHECK-O0-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-O0-NEXT: } // callseq 2
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_tailcall_flipped(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<5>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_tailcall_flipped_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_tailcall_flipped_param_0];
+; CHECK-O3-NEXT: { // callseq 2, 0
+; CHECK-O3-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-O3-NEXT: st.param.b64 [param0], %rd1;
+; CHECK-O3-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-O3-NEXT: st.param.b64 [param1], %rd2;
+; CHECK-O3-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-O3-NEXT: call.uni (retval0),
+; CHECK-O3-NEXT: test_callee,
+; CHECK-O3-NEXT: (
+; CHECK-O3-NEXT: param0,
+; CHECK-O3-NEXT: param1
+; CHECK-O3-NEXT: );
+; CHECK-O3-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-O3-NEXT: } // callseq 2
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = tail call <2 x float> @test_callee(<2 x float> %b, <2 x float> %a)
ret <2 x float> %r
}
define <2 x float> @test_select(<2 x float> %a, <2 x float> %b, i1 zeroext %c) #0 {
+; CHECK-O0-LABEL: test_select(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<2>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.u8 %rs1, [test_select_param_2];
+; CHECK-O0-NEXT: and.b16 %rs2, %rs1, 1;
+; CHECK-O0-NEXT: setp.eq.b16 %p1, %rs2, 1;
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_select_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_select_param_0];
+; CHECK-O0-NEXT: selp.b64 %rd3, %rd1, %rd2, %p1;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_select(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<2>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.u8 %rs1, [test_select_param_2];
+; CHECK-O3-NEXT: and.b16 %rs2, %rs1, 1;
+; CHECK-O3-NEXT: setp.eq.b16 %p1, %rs2, 1;
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_select_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_select_param_0];
+; CHECK-O3-NEXT: selp.b64 %rd3, %rd2, %rd1, %p1;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = select i1 %c, <2 x float> %a, <2 x float> %b
ret <2 x float> %r
}
define <2 x float> @test_select_cc(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d) #0 {
+; CHECK-O0-LABEL: test_select_cc(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<11>;
+; CHECK-O0-NEXT: .reg .b64 %rd<6>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd4, [test_select_cc_param_3];
+; CHECK-O0-NEXT: ld.param.f64 %rd3, [test_select_cc_param_2];
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_select_cc_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_select_cc_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd4;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd3;
+; CHECK-O0-NEXT: setp.neu.f32 %p1, %f3, %f1;
+; CHECK-O0-NEXT: setp.neu.f32 %p2, %f4, %f2;
+; CHECK-O0-NEXT: mov.b64 {%f5, %f6}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f7, %f8}, %rd1;
+; CHECK-O0-NEXT: selp.f32 %f9, %f8, %f6, %p2;
+; CHECK-O0-NEXT: selp.f32 %f10, %f7, %f5, %p1;
+; CHECK-O0-NEXT: mov.b64 %rd5, {%f10, %f9};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd5;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_select_cc(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<11>;
+; CHECK-O3-NEXT: .reg .b64 %rd<6>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_select_cc_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_select_cc_param_1];
+; CHECK-O3-NEXT: ld.param.f64 %rd3, [test_select_cc_param_2];
+; CHECK-O3-NEXT: ld.param.f64 %rd4, [test_select_cc_param_3];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd4;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd3;
+; CHECK-O3-NEXT: setp.neu.f32 %p1, %f3, %f1;
+; CHECK-O3-NEXT: setp.neu.f32 %p2, %f4, %f2;
+; CHECK-O3-NEXT: mov.b64 {%f5, %f6}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f7, %f8}, %rd1;
+; CHECK-O3-NEXT: selp.f32 %f9, %f8, %f6, %p2;
+; CHECK-O3-NEXT: selp.f32 %f10, %f7, %f5, %p1;
+; CHECK-O3-NEXT: mov.b64 %rd5, {%f10, %f9};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd5;
+; CHECK-O3-NEXT: ret;
%cc = fcmp une <2 x float> %c, %d
%r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
ret <2 x float> %r
}
define <2 x double> @test_select_cc_f64_f32(<2 x double> %a, <2 x double> %b, <2 x float> %c, <2 x float> %d) #0 {
+; CHECK-O0-LABEL: test_select_cc_f64_f32(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-NEXT: .reg .f64 %fd<7>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f64_f32_param_1];
+; CHECK-O0-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f64_f32_param_0];
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_select_cc_f64_f32_param_3];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_select_cc_f64_f32_param_2];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.neu.f32 %p1, %f3, %f1;
+; CHECK-O0-NEXT: setp.neu.f32 %p2, %f4, %f2;
+; CHECK-O0-NEXT: selp.f64 %fd5, %fd2, %fd4, %p2;
+; CHECK-O0-NEXT: selp.f64 %fd6, %fd1, %fd3, %p1;
+; CHECK-O0-NEXT: st.param.v2.f64 [func_retval0], {%fd6, %fd5};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_select_cc_f64_f32(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-NEXT: .reg .f64 %fd<7>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f64_f32_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_select_cc_f64_f32_param_2];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_select_cc_f64_f32_param_3];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.neu.f32 %p1, %f3, %f1;
+; CHECK-O3-NEXT: setp.neu.f32 %p2, %f4, %f2;
+; CHECK-O3-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f64_f32_param_1];
+; CHECK-O3-NEXT: selp.f64 %fd5, %fd2, %fd4, %p2;
+; CHECK-O3-NEXT: selp.f64 %fd6, %fd1, %fd3, %p1;
+; CHECK-O3-NEXT: st.param.v2.f64 [func_retval0], {%fd6, %fd5};
+; CHECK-O3-NEXT: ret;
%cc = fcmp une <2 x float> %c, %d
%r = select <2 x i1> %cc, <2 x double> %a, <2 x double> %b
ret <2 x double> %r
}
define <2 x float> @test_select_cc_f32_f64(<2 x float> %a, <2 x float> %b, <2 x double> %c, <2 x double> %d) #0 {
+; CHECK-O0-LABEL: test_select_cc_f32_f64(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<7>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-NEXT: .reg .f64 %fd<5>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f32_f64_param_3];
+; CHECK-O0-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f32_f64_param_2];
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_select_cc_f32_f64_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_select_cc_f32_f64_param_0];
+; CHECK-O0-NEXT: setp.neu.f64 %p1, %fd1, %fd3;
+; CHECK-O0-NEXT: setp.neu.f64 %p2, %fd2, %fd4;
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: selp.f32 %f5, %f4, %f2, %p2;
+; CHECK-O0-NEXT: selp.f32 %f6, %f3, %f1, %p1;
+; CHECK-O0-NEXT: mov.b64 %rd3, {%f6, %f5};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_select_cc_f32_f64(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<7>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-NEXT: .reg .f64 %fd<5>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_select_cc_f32_f64_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_select_cc_f32_f64_param_1];
+; CHECK-O3-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f32_f64_param_2];
+; CHECK-O3-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f32_f64_param_3];
+; CHECK-O3-NEXT: setp.neu.f64 %p1, %fd1, %fd3;
+; CHECK-O3-NEXT: setp.neu.f64 %p2, %fd2, %fd4;
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: selp.f32 %f5, %f4, %f2, %p2;
+; CHECK-O3-NEXT: selp.f32 %f6, %f3, %f1, %p1;
+; CHECK-O3-NEXT: mov.b64 %rd3, {%f6, %f5};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%cc = fcmp une <2 x double> %c, %d
%r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
ret <2 x float> %r
}
define <2 x i1> @test_fcmp_une(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_une(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_une_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_une_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.neu.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.neu.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_une(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_une_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_une_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.neu.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.neu.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp une <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ueq(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_ueq(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ueq_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ueq_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.equ.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.equ.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_ueq(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ueq_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ueq_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.equ.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.equ.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp ueq <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ugt(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_ugt(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ugt_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ugt_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.gtu.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.gtu.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_ugt(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ugt_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ugt_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.gtu.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.gtu.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp ugt <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_uge(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_uge(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_uge_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_uge_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.geu.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.geu.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_uge(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_uge_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_uge_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.geu.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.geu.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp uge <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ult(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_ult(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ult_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ult_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.ltu.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.ltu.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_ult(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ult_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ult_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.ltu.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.ltu.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp ult <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ule(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_ule(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ule_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ule_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.leu.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.leu.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_ule(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ule_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ule_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.leu.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.leu.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp ule <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_uno(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_uno(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_uno_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_uno_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.nan.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.nan.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_uno(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_uno_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_uno_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.nan.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.nan.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp uno <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_one(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_one(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_one_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_one_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.ne.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.ne.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_one(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_one_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_one_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.ne.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.ne.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp one <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_oeq(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_oeq(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_oeq_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_oeq_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.eq.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.eq.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_oeq(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_oeq_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_oeq_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.eq.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.eq.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp oeq <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ogt(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_ogt(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ogt_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ogt_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.gt.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.gt.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_ogt(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ogt_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ogt_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.gt.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.gt.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp ogt <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_oge(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_oge(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_oge_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_oge_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.ge.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.ge.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_oge(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_oge_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_oge_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.ge.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.ge.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp oge <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_olt(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_olt(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_olt_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_olt_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.lt.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.lt.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_olt(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_olt_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_olt_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.lt.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.lt.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp olt <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ole(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_ole(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ole_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ole_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.le.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.le.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_ole(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ole_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ole_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.le.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.le.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp ole <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ord(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_fcmp_ord(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .pred %p<3>;
+; CHECK-O0-NEXT: .reg .b16 %rs<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<5>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ord_param_1];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ord_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O0-NEXT: setp.num.f32 %p1, %f4, %f2;
+; CHECK-O0-NEXT: setp.num.f32 %p2, %f3, %f1;
+; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fcmp_ord(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .pred %p<3>;
+; CHECK-O3-NEXT: .reg .b16 %rs<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ord_param_0];
+; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ord_param_1];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-O3-NEXT: setp.num.f32 %p1, %f4, %f2;
+; CHECK-O3-NEXT: setp.num.f32 %p2, %f3, %f1;
+; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-O3-NEXT: ret;
%r = fcmp ord <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i32> @test_fptosi_i32(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_fptosi_i32(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b32 %r<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<2>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fptosi_i32_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-O0-NEXT: cvt.rzi.s32.f32 %r1, %f2;
+; CHECK-O0-NEXT: cvt.rzi.s32.f32 %r2, %f1;
+; CHECK-O0-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fptosi_i32(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b32 %r<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<2>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fptosi_i32_param_0];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-O3-NEXT: cvt.rzi.s32.f32 %r1, %f2;
+; CHECK-O3-NEXT: cvt.rzi.s32.f32 %r2, %f1;
+; CHECK-O3-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-O3-NEXT: ret;
%r = fptosi <2 x float> %a to <2 x i32>
ret <2 x i32> %r
}
define <2 x i64> @test_fptosi_i64(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_fptosi_i64(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fptosi_i64_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-O0-NEXT: cvt.rzi.s64.f32 %rd2, %f2;
+; CHECK-O0-NEXT: cvt.rzi.s64.f32 %rd3, %f1;
+; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fptosi_i64(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fptosi_i64_param_0];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-O3-NEXT: cvt.rzi.s64.f32 %rd2, %f2;
+; CHECK-O3-NEXT: cvt.rzi.s64.f32 %rd3, %f1;
+; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
+; CHECK-O3-NEXT: ret;
%r = fptosi <2 x float> %a to <2 x i64>
ret <2 x i64> %r
}
define <2 x i32> @test_fptoui_2xi32(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_fptoui_2xi32(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b32 %r<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<2>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fptoui_2xi32_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-O0-NEXT: cvt.rzi.u32.f32 %r1, %f2;
+; CHECK-O0-NEXT: cvt.rzi.u32.f32 %r2, %f1;
+; CHECK-O0-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fptoui_2xi32(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b32 %r<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<2>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fptoui_2xi32_param_0];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-O3-NEXT: cvt.rzi.u32.f32 %r1, %f2;
+; CHECK-O3-NEXT: cvt.rzi.u32.f32 %r2, %f1;
+; CHECK-O3-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-O3-NEXT: ret;
%r = fptoui <2 x float> %a to <2 x i32>
ret <2 x i32> %r
}
define <2 x i64> @test_fptoui_2xi64(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_fptoui_2xi64(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fptoui_2xi64_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-O0-NEXT: cvt.rzi.u64.f32 %rd2, %f2;
+; CHECK-O0-NEXT: cvt.rzi.u64.f32 %rd3, %f1;
+; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fptoui_2xi64(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fptoui_2xi64_param_0];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-O3-NEXT: cvt.rzi.u64.f32 %rd2, %f2;
+; CHECK-O3-NEXT: cvt.rzi.u64.f32 %rd3, %f1;
+; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
+; CHECK-O3-NEXT: ret;
%r = fptoui <2 x float> %a to <2 x i64>
ret <2 x i64> %r
}
define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 {
+; CHECK-O0-LABEL: test_uitofp_2xi32(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b32 %r<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<2>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
+; CHECK-O0-NEXT: cvt.rn.f32.u32 %f1, %r2;
+; CHECK-O0-NEXT: cvt.rn.f32.u32 %f2, %r1;
+; CHECK-O0-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_uitofp_2xi32(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b32 %r<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<2>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
+; CHECK-O3-NEXT: cvt.rn.f32.u32 %f1, %r2;
+; CHECK-O3-NEXT: cvt.rn.f32.u32 %f2, %r1;
+; CHECK-O3-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-O3-NEXT: ret;
%r = uitofp <2 x i32> %a to <2 x float>
ret <2 x float> %r
}
define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 {
+; CHECK-O0-LABEL: test_uitofp_2xi64(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_uitofp_2xi64_param_0];
+; CHECK-O0-NEXT: cvt.rn.f32.u64 %f1, %rd2;
+; CHECK-O0-NEXT: cvt.rn.f32.u64 %f2, %rd1;
+; CHECK-O0-NEXT: mov.b64 %rd3, {%f2, %f1};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_uitofp_2xi64(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_uitofp_2xi64_param_0];
+; CHECK-O3-NEXT: cvt.rn.f32.u64 %f1, %rd2;
+; CHECK-O3-NEXT: cvt.rn.f32.u64 %f2, %rd1;
+; CHECK-O3-NEXT: mov.b64 %rd3, {%f2, %f1};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = uitofp <2 x i64> %a to <2 x float>
ret <2 x float> %r
}
define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 {
+; CHECK-O0-LABEL: test_sitofp_2xi32(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b32 %r<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<2>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
+; CHECK-O0-NEXT: cvt.rn.f32.s32 %f1, %r2;
+; CHECK-O0-NEXT: cvt.rn.f32.s32 %f2, %r1;
+; CHECK-O0-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_sitofp_2xi32(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b32 %r<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<2>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
+; CHECK-O3-NEXT: cvt.rn.f32.s32 %f1, %r2;
+; CHECK-O3-NEXT: cvt.rn.f32.s32 %f2, %r1;
+; CHECK-O3-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-O3-NEXT: ret;
%r = sitofp <2 x i32> %a to <2 x float>
ret <2 x float> %r
}
define <2 x float> @test_sitofp_2xi64(<2 x i64> %a) #0 {
+; CHECK-O0-LABEL: test_sitofp_2xi64(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_sitofp_2xi64_param_0];
+; CHECK-O0-NEXT: cvt.rn.f32.s64 %f1, %rd2;
+; CHECK-O0-NEXT: cvt.rn.f32.s64 %f2, %rd1;
+; CHECK-O0-NEXT: mov.b64 %rd3, {%f2, %f1};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_sitofp_2xi64(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_sitofp_2xi64_param_0];
+; CHECK-O3-NEXT: cvt.rn.f32.s64 %f1, %rd2;
+; CHECK-O3-NEXT: cvt.rn.f32.s64 %f2, %rd1;
+; CHECK-O3-NEXT: mov.b64 %rd3, {%f2, %f1};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%r = sitofp <2 x i64> %a to <2 x float>
ret <2 x float> %r
}
define <2 x float> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x float> %b) #0 {
+; CHECK-O0-LABEL: test_uitofp_2xi32_fadd(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b32 %r<3>;
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<4>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0];
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_uitofp_2xi32_fadd_param_1];
+; CHECK-O0-NEXT: cvt.rn.f32.u32 %f1, %r2;
+; CHECK-O0-NEXT: cvt.rn.f32.u32 %f2, %r1;
+; CHECK-O0-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-O0-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_uitofp_2xi32_fadd(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b32 %r<3>;
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<4>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_uitofp_2xi32_fadd_param_1];
+; CHECK-O3-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0];
+; CHECK-O3-NEXT: cvt.rn.f32.u32 %f1, %r2;
+; CHECK-O3-NEXT: cvt.rn.f32.u32 %f2, %r1;
+; CHECK-O3-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-O3-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-O3-NEXT: ret;
%c = uitofp <2 x i32> %a to <2 x float>
%r = fadd <2 x float> %b, %c
ret <2 x float> %r
}
define <2 x float> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
+; CHECK-O0-LABEL: test_fptrunc_2xdouble(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<2>;
+; CHECK-O0-NEXT: .reg .f64 %fd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_fptrunc_2xdouble_param_0];
+; CHECK-O0-NEXT: cvt.rn.f32.f64 %f1, %fd2;
+; CHECK-O0-NEXT: cvt.rn.f32.f64 %f2, %fd1;
+; CHECK-O0-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fptrunc_2xdouble(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<2>;
+; CHECK-O3-NEXT: .reg .f64 %fd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_fptrunc_2xdouble_param_0];
+; CHECK-O3-NEXT: cvt.rn.f32.f64 %f1, %fd2;
+; CHECK-O3-NEXT: cvt.rn.f32.f64 %f2, %fd1;
+; CHECK-O3-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-O3-NEXT: ret;
%r = fptrunc <2 x double> %a to <2 x float>
ret <2 x float> %r
}
define <2 x double> @test_fpext_2xdouble(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_fpext_2xdouble(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<2>;
+; CHECK-O0-NEXT: .reg .f64 %fd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fpext_2xdouble_param_0];
+; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-O0-NEXT: cvt.f64.f32 %fd1, %f2;
+; CHECK-O0-NEXT: cvt.f64.f32 %fd2, %f1;
+; CHECK-O0-NEXT: st.param.v2.f64 [func_retval0], {%fd2, %fd1};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fpext_2xdouble(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<2>;
+; CHECK-O3-NEXT: .reg .f64 %fd<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fpext_2xdouble_param_0];
+; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-O3-NEXT: cvt.f64.f32 %fd1, %f2;
+; CHECK-O3-NEXT: cvt.f64.f32 %fd2, %f1;
+; CHECK-O3-NEXT: st.param.v2.f64 [func_retval0], {%fd2, %fd1};
+; CHECK-O3-NEXT: ret;
%r = fpext <2 x float> %a to <2 x double>
ret <2 x double> %r
}
define <2 x i32> @test_bitcast_2xfloat_to_2xi32(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_bitcast_2xfloat_to_2xi32(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b32 %r<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_bitcast_2xfloat_to_2xi32_param_0];
+; CHECK-O0-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r1}, %rd2; }
+; CHECK-O0-NEXT: cvt.u32.u64 %r2, %rd2;
+; CHECK-O0-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_bitcast_2xfloat_to_2xi32(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b32 %r<3>;
+; CHECK-O3-NEXT: .reg .b64 %rd<2>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.u64 %rd1, [test_bitcast_2xfloat_to_2xi32_param_0];
+; CHECK-O3-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r1}, %rd1; }
+; CHECK-O3-NEXT: cvt.u32.u64 %r2, %rd1;
+; CHECK-O3-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-O3-NEXT: ret;
%r = bitcast <2 x float> %a to <2 x i32>
ret <2 x i32> %r
}
define <2 x float> @test_bitcast_2xi32_to_2xfloat(<2 x i32> %a) #0 {
+; CHECK-O0-LABEL: test_bitcast_2xi32_to_2xfloat(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b32 %r<3>;
+; CHECK-O0-NEXT: .reg .b64 %rd<6>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0];
+; CHECK-O0-NEXT: cvt.u64.u32 %rd1, %r1;
+; CHECK-O0-NEXT: cvt.u64.u32 %rd2, %r2;
+; CHECK-O0-NEXT: shl.b64 %rd3, %rd2, 32;
+; CHECK-O0-NEXT: or.b64 %rd4, %rd1, %rd3;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd4;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_bitcast_2xi32_to_2xfloat(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<2>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_bitcast_2xi32_to_2xfloat_param_0];
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-O3-NEXT: ret;
%r = bitcast <2 x i32> %a to <2 x float>
ret <2 x float> %r
}
define <2 x float> @test_bitcast_double_to_2xfloat(double %a) #0 {
+; CHECK-O0-LABEL: test_bitcast_double_to_2xfloat(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<2>;
+; CHECK-O0-NEXT: .reg .f64 %fd<2>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.f64 %fd1, [test_bitcast_double_to_2xfloat_param_0];
+; CHECK-O0-NEXT: mov.b64 %rd1, %fd1;
+; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_bitcast_double_to_2xfloat(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .b64 %rd<2>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_bitcast_double_to_2xfloat_param_0];
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-O3-NEXT: ret;
%r = bitcast double %a to <2 x float>
ret <2 x float> %r
}
define double @test_bitcast_2xfloat_to_double(<2 x float> %a) #0 {
+; CHECK-O0-LABEL: test_bitcast_2xfloat_to_double(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b64 %rd<3>;
+; CHECK-O0-NEXT: .reg .f64 %fd<2>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_bitcast_2xfloat_to_double_param_0];
+; CHECK-O0-NEXT: mov.b64 %fd1, %rd2;
+; CHECK-O0-NEXT: st.param.f64 [func_retval0], %fd1;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_bitcast_2xfloat_to_double(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f64 %fd<2>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.f64 %fd1, [test_bitcast_2xfloat_to_double_param_0];
+; CHECK-O3-NEXT: st.param.f64 [func_retval0], %fd1;
+; CHECK-O3-NEXT: ret;
%r = bitcast <2 x float> %a to double
ret double %r
}
>From b35a63576751fe3b9d64cda0f1b8db99719f0366 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 11 Feb 2025 17:13:13 -0800
Subject: [PATCH 10/32] [NVPTX] add combiner rule for v2[b]f16 = fp_round v2f32
Now that v2f32 is legal, this node will go straight to instruction
selection. Instead, we want to break it up into two nodes, which can be
handled better in instruction selection, since the final instruction
(cvt.[b]f16x2.f32) takes two f32 arguments.
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 44 ++++++++++++++++++++-
1 file changed, 43 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index e409f03bc617b..9dde7b3ccbe33 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -862,7 +862,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
// We have some custom DAG combine patterns for these nodes
setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::EXTRACT_VECTOR_ELT, ISD::FADD,
ISD::MUL, ISD::SHL, ISD::SREM, ISD::UREM, ISD::VSELECT,
- ISD::BUILD_VECTOR, ISD::ADDRSPACECAST});
+ ISD::BUILD_VECTOR, ISD::ADDRSPACECAST, ISD::FP_ROUND});
// setcc for f16x2 and bf16x2 needs special handling to prevent
// legalizer's attempt to scalarize it due to v2i1 not being legal.
@@ -5813,6 +5813,46 @@ static SDValue combineADDRSPACECAST(SDNode *N,
return SDValue();
}
+// Combiner rule for v2[b]f16 = fp_round v2f32:
+//
+// Now that v2f32 is a legal type for a register, this node will go straight to
+// instruction selection. Instead, we want to break it up into two nodes, which
+// can be combined in instruction selection to cvt.[b]f16x2.f32, which requires
+// two f32 registers.
+static SDValue PerformFP_ROUNDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ SDLoc DL(N);
+ SDValue Op = N->getOperand(0);
+ SDValue Trunc = N->getOperand(1);
+ EVT NarrowVT = N->getValueType(0);
+ EVT WideVT = Op.getValueType();
+
+ // v2[b]f16 = fp_round (v2f32 A)
+ // -> v2[b]f16 = (build_vector ([b]f16 = fp_round (extractelt A, 0)),
+ // ([b]f16 = fp_round (extractelt A, 1)))
+ if ((NarrowVT == MVT::v2bf16 || NarrowVT == MVT::v2f16) &&
+ WideVT == MVT::v2f32) {
+ SDValue F32Op0, F32Op1;
+ if (Op.getOpcode() == ISD::BUILD_VECTOR) {
+ F32Op0 = Op.getOperand(0);
+ F32Op1 = Op.getOperand(1);
+ } else {
+ F32Op0 = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op,
+ DCI.DAG.getIntPtrConstant(0, DL));
+ F32Op1 = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op,
+ DCI.DAG.getIntPtrConstant(1, DL));
+ }
+ return DCI.DAG.getBuildVector(
+ NarrowVT, DL,
+ {DCI.DAG.getNode(ISD::FP_ROUND, DL, NarrowVT.getScalarType(), F32Op0,
+ Trunc),
+ DCI.DAG.getNode(ISD::FP_ROUND, DL, NarrowVT.getScalarType(), F32Op1,
+ Trunc)});
+ }
+
+ return SDValue();
+}
+
SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
CodeGenOptLevel OptLevel = getTargetMachine().getOptLevel();
@@ -5849,6 +5889,8 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
return PerformBUILD_VECTORCombine(N, DCI);
case ISD::ADDRSPACECAST:
return combineADDRSPACECAST(N, DCI);
+ case ISD::FP_ROUND:
+ return PerformFP_ROUNDCombine(N, DCI);
}
return SDValue();
}
>From d56141204f3b336f44952d63e98a36038435fc3f Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 11 Feb 2025 17:28:44 -0800
Subject: [PATCH 11/32] [NVPTX] expand fp_extend v2f32
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 9dde7b3ccbe33..657527085de95 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -930,6 +930,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setOperationAction(ISD::FP_ROUND, VT, Custom);
}
}
+ setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
// sm_80 only has conversions between f32 and bf16. Custom lower all other
// bf16 conversions.
>From e1629efae45e209b1d09b3cc0b82406ef91bb056 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 11 Feb 2025 17:45:48 -0800
Subject: [PATCH 12/32] [NVPTX] expand fexp2 and flog2 for v2f32
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 657527085de95..1773f075f422d 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -1024,6 +1024,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setFP16OperationAction(ISD::FEXP2, MVT::v2f16, Legal, Expand);
setBF16OperationAction(ISD::FEXP2, MVT::bf16, Legal, Promote);
setBF16OperationAction(ISD::FEXP2, MVT::v2bf16, Legal, Expand);
+ setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
// FLOG2 supports f32 only
// f16/bf16 types aren't supported, but they are promoted/expanded to f32.
@@ -1031,7 +1032,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setOperationAction(ISD::FLOG2, MVT::f32, Legal);
setOperationPromotedToType(ISD::FLOG2, MVT::f16, MVT::f32);
setOperationPromotedToType(ISD::FLOG2, MVT::bf16, MVT::f32);
- setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16}, Expand);
+ setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16, MVT::v2f32},
+ Expand);
}
setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom);
>From e5c402d0d13ac728fcb3bc84a94de8dd390e86f7 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 11 Feb 2025 17:54:57 -0800
Subject: [PATCH 13/32] [NVPTX] handle v2f32 for LDU/LDG
---
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index b4b2eca000097..1144c00ba9857 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -1261,7 +1261,8 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
EltVT = EltVT.getVectorElementType();
// vectors of 8/16bits type are loaded/stored as multiples of v4i8/v2x16
// elements.
- if ((EltVT == MVT::f16 && OrigType == MVT::v2f16) ||
+ if ((EltVT == MVT::f32 && OrigType == MVT::v2f32) ||
+ (EltVT == MVT::f16 && OrigType == MVT::v2f16) ||
(EltVT == MVT::bf16 && OrigType == MVT::v2bf16) ||
(EltVT == MVT::i16 && OrigType == MVT::v2i16) ||
(EltVT == MVT::i8 && OrigType == MVT::v4i8)) {
>From baaac0ec24c2fb5bfec265329a135c41d091ffb9 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 11 Feb 2025 18:08:15 -0800
Subject: [PATCH 14/32] [NVPTX] only legalze fadd, fsub, fmul, fma for v2f32 on
sm_100+
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 1773f075f422d..da73a39ea7906 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -883,8 +883,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
if (getOperationAction(Op, MVT::bf16) == Promote)
AddPromotedToType(Op, MVT::bf16, MVT::f32);
- if (STI.hasF32x2Instructions())
- setOperationAction(Op, MVT::v2f32, Legal);
+ setOperationAction(Op, MVT::v2f32,
+ STI.hasF32x2Instructions() ? Legal : Expand);
}
// On SM80, we select add/mul/sub as fma to avoid promotion to float
>From 92c3d6adcc008d80b812b598a30f76a681533008 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 11 Feb 2025 19:13:29 -0800
Subject: [PATCH 15/32] [NVPTX] expand vector_shuffle, insertelt for v2f32 and
lower i64 bitcast
Fixes test/CodeGen/Generic/vector.ll
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 2 ++
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 3 +++
2 files changed, 5 insertions(+)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index da73a39ea7906..769921dc215fc 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -653,6 +653,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f32, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f32, Expand);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Expand);
// Custom conversions to/from v2i8.
setOperationAction(ISD::BITCAST, MVT::v2i8, Custom);
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 372e029fe0fa8..90c3c1f412820 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -2605,6 +2605,9 @@ foreach ta = [v2f16, v2bf16, v2i16, v4i8, i32] in {
}
}
+def: Pat<(i64 (bitconvert v2f32:$a)),
+ (i64 Int64Regs:$a)>;
+
// NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where
// we cannot specify floating-point literals in isel patterns. Therefore, we
// use an integer selp to select either 1 (or -1 in case of signed) or 0
>From c80645c6d6263ceb1764cc2ba601c4f9d6a98831 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 11 Feb 2025 19:52:49 -0800
Subject: [PATCH 16/32] [NVPTX] add combiner rule to peek through bitcast of
BUILD_VECTOR
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 48 ++++++++++++++++++++-
1 file changed, 47 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 769921dc215fc..3af1db8b45d0f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -864,7 +864,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
// We have some custom DAG combine patterns for these nodes
setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::EXTRACT_VECTOR_ELT, ISD::FADD,
ISD::MUL, ISD::SHL, ISD::SREM, ISD::UREM, ISD::VSELECT,
- ISD::BUILD_VECTOR, ISD::ADDRSPACECAST, ISD::FP_ROUND});
+ ISD::BUILD_VECTOR, ISD::ADDRSPACECAST, ISD::FP_ROUND,
+ ISD::TRUNCATE});
// setcc for f16x2 and bf16x2 needs special handling to prevent
// legalizer's attempt to scalarize it due to v2i1 not being legal.
@@ -5858,6 +5859,49 @@ static SDValue PerformFP_ROUNDCombine(SDNode *N,
return SDValue();
}
+static SDValue PerformTRUNCATECombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ SDLoc DL(N);
+ SDValue Op = N->getOperand(0);
+ EVT FromVT = Op.getValueType();
+ EVT ResultVT = N->getValueType(0);
+
+ if (FromVT == MVT::i64 && ResultVT == MVT::i32) {
+ // i32 = truncate (i64 = bitcast (v2f32 = BUILD_VECTOR (f32 A, f32 B)))
+ // -> i32 = bitcast (f32 A)
+ if (Op.getOpcode() == ISD::BITCAST) {
+ SDValue BV = Op.getOperand(0);
+ if (BV.getOpcode() == ISD::BUILD_VECTOR &&
+ BV.getValueType() == MVT::v2f32) {
+ // get lower
+ return DCI.DAG.getNode(ISD::BITCAST, DL, ResultVT, BV.getOperand(0));
+ }
+ }
+
+ // i32 = truncate (i64 = srl
+ // (i64 = bitcast
+ // (v2f32 = BUILD_VECTOR (f32 A, f32 B))), 32)
+ // -> i32 = bitcast (f32 B)
+ if (Op.getOpcode() == ISD::SRL) {
+ if (auto *ShAmt = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+ ShAmt && ShAmt->getAsAPIntVal() == 32) {
+ SDValue Cast = Op.getOperand(0);
+ if (Cast.getOpcode() == ISD::BITCAST) {
+ SDValue BV = Cast.getOperand(0);
+ if (BV.getOpcode() == ISD::BUILD_VECTOR &&
+ BV.getValueType() == MVT::v2f32) {
+ // get upper
+ return DCI.DAG.getNode(ISD::BITCAST, DL, ResultVT,
+ BV.getOperand(1));
+ }
+ }
+ }
+ }
+ }
+
+ return SDValue();
+}
+
SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
CodeGenOptLevel OptLevel = getTargetMachine().getOptLevel();
@@ -5896,6 +5940,8 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
return combineADDRSPACECAST(N, DCI);
case ISD::FP_ROUND:
return PerformFP_ROUNDCombine(N, DCI);
+ case ISD::TRUNCATE:
+ return PerformTRUNCATECombine(N, DCI);
}
return SDValue();
}
>From e86530871fd6b7dfcbbd17a68bd096092a1016a6 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Thu, 13 Feb 2025 13:24:49 -0800
Subject: [PATCH 17/32] [NVPTX] loads, stores of v2f32 are untyped
Ensures ld.b64 and st.b64 for v2f32. Also remove -O3 in
f32x2-instructions.ll test.
---
llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 3088 ++++++-----------
1 file changed, 1009 insertions(+), 2079 deletions(-)
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
index 97cde07ed2003..8f4fd3c6e6ee3 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -2,95 +2,57 @@
; ## Full FP32x2 support enabled by default.
; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
; RUN: -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
-; RUN: | FileCheck --check-prefixes=CHECK-O0 %s
+; RUN: | FileCheck --check-prefixes=CHECK %s
; RUN: %if ptxas %{ \
; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
; RUN: -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
; RUN: | %ptxas-verify -arch=sm_100 \
; RUN: %}
-; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
-; RUN: -O3 -verify-machineinstrs \
-; RUN: | FileCheck --check-prefixes=CHECK-O3 %s
-; RUN: %if ptxas %{ \
-; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
-; RUN: -O3 -verify-machineinstrs \
-; RUN: | %ptxas-verify -arch=sm_100 \
-; RUN: %}
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "nvptx64-nvidia-cuda"
define <2 x float> @test_ret_const() #0 {
-; CHECK-O0-LABEL: test_ret_const(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<2>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-O0-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-O0-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd1;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_ret_const(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<2>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-O3-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-O3-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_ret_const(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
ret <2 x float> <float 1.0, float 2.0>
}
define float @test_extract_0(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_extract_0(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<2>;
-; CHECK-O0-NEXT: .reg .b64 %rd<2>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_extract_0_param_0];
-; CHECK-O0-NEXT: { .reg .b32 tmp; mov.b64 {%f1, tmp}, %rd1; }
-; CHECK-O0-NEXT: st.param.f32 [func_retval0], %f1;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_extract_0(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<2>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f32 %f1, [test_extract_0_param_0];
-; CHECK-O3-NEXT: st.param.f32 [func_retval0], %f1;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_extract_0(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_extract_0_param_0];
+; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {%f1, tmp}, %rd1; }
+; CHECK-NEXT: st.param.f32 [func_retval0], %f1;
+; CHECK-NEXT: ret;
%e = extractelement <2 x float> %a, i32 0
ret float %e
}
define float @test_extract_1(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_extract_1(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<2>;
-; CHECK-O0-NEXT: .reg .b64 %rd<2>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_extract_1_param_0];
-; CHECK-O0-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %f1}, %rd1; }
-; CHECK-O0-NEXT: st.param.f32 [func_retval0], %f1;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_extract_1(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<2>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f32 %f1, [test_extract_1_param_0+4];
-; CHECK-O3-NEXT: st.param.f32 [func_retval0], %f1;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_extract_1(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_extract_1_param_0];
+; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %f1}, %rd1; }
+; CHECK-NEXT: st.param.f32 [func_retval0], %f1;
+; CHECK-NEXT: ret;
%e = extractelement <2 x float> %a, i32 1
ret float %e
}
@@ -104,930 +66,523 @@ define float @test_extract_1(<2 x float> %a) #0 {
; }
define <2 x float> @test_fadd(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fadd(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fadd_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_param_0];
-; CHECK-O0-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fadd_param_0];
-; CHECK-O3-NEXT: add.rn.f32x2 %rd3, %rd2, %rd1;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fadd_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_param_0];
+; CHECK-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fadd <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fadd_imm_0(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_fadd_imm_0(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_imm_0_param_0];
-; CHECK-O0-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-O0-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-O0-NEXT: mov.b64 %rd2, {%f2, %f1};
-; CHECK-O0-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd_imm_0(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_imm_0_param_0];
-; CHECK-O3-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-O3-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-O3-NEXT: mov.b64 %rd2, {%f2, %f1};
-; CHECK-O3-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd_imm_0(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_0_param_0];
+; CHECK-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fadd <2 x float> <float 1.0, float 2.0>, %a
ret <2 x float> %r
}
define <2 x float> @test_fadd_imm_1(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_fadd_imm_1(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_imm_1_param_0];
-; CHECK-O0-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-O0-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-O0-NEXT: mov.b64 %rd2, {%f2, %f1};
-; CHECK-O0-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd_imm_1(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_imm_1_param_0];
-; CHECK-O3-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-O3-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-O3-NEXT: mov.b64 %rd2, {%f2, %f1};
-; CHECK-O3-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd_imm_1(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_1_param_0];
+; CHECK-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fadd <2 x float> %a, <float 1.0, float 2.0>
ret <2 x float> %r
}
define <4 x float> @test_fadd_v4(<4 x float> %a, <4 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fadd_v4(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<11>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u64 {%rd5, %rd6}, [test_fadd_v4_param_1];
-; CHECK-O0-NEXT: ld.param.v2.u64 {%rd7, %rd8}, [test_fadd_v4_param_0];
-; CHECK-O0-NEXT: add.rn.f32x2 %rd9, %rd8, %rd6;
-; CHECK-O0-NEXT: add.rn.f32x2 %rd10, %rd7, %rd5;
-; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd_v4(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<11>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_v4_param_1];
-; CHECK-O3-NEXT: ld.param.v2.u64 {%rd4, %rd5}, [test_fadd_v4_param_0];
-; CHECK-O3-NEXT: add.rn.f32x2 %rd9, %rd5, %rd2;
-; CHECK-O3-NEXT: add.rn.f32x2 %rd10, %rd4, %rd1;
-; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd_v4(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<11>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u64 {%rd5, %rd6}, [test_fadd_v4_param_1];
+; CHECK-NEXT: ld.param.v2.u64 {%rd7, %rd8}, [test_fadd_v4_param_0];
+; CHECK-NEXT: add.rn.f32x2 %rd9, %rd8, %rd6;
+; CHECK-NEXT: add.rn.f32x2 %rd10, %rd7, %rd5;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
+; CHECK-NEXT: ret;
%r = fadd <4 x float> %a, %b
ret <4 x float> %r
}
define <4 x float> @test_fadd_imm_0_v4(<4 x float> %a) #0 {
-; CHECK-O0-LABEL: test_fadd_imm_0_v4(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<9>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_0_v4_param_0];
-; CHECK-O0-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-O0-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-O0-NEXT: mov.b64 %rd5, {%f2, %f1};
-; CHECK-O0-NEXT: add.rn.f32x2 %rd6, %rd4, %rd5;
-; CHECK-O0-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-O0-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-O0-NEXT: mov.b64 %rd7, {%f4, %f3};
-; CHECK-O0-NEXT: add.rn.f32x2 %rd8, %rd3, %rd7;
-; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd_imm_0_v4(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<9>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_imm_0_v4_param_0];
-; CHECK-O3-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-O3-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-O3-NEXT: mov.b64 %rd5, {%f2, %f1};
-; CHECK-O3-NEXT: add.rn.f32x2 %rd6, %rd2, %rd5;
-; CHECK-O3-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-O3-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-O3-NEXT: mov.b64 %rd7, {%f4, %f3};
-; CHECK-O3-NEXT: add.rn.f32x2 %rd8, %rd1, %rd7;
-; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd_imm_0_v4(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_0_v4_param_0];
+; CHECK-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-NEXT: add.rn.f32x2 %rd6, %rd4, %rd5;
+; CHECK-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-NEXT: add.rn.f32x2 %rd8, %rd3, %rd7;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-NEXT: ret;
%r = fadd <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %a
ret <4 x float> %r
}
define <4 x float> @test_fadd_imm_1_v4(<4 x float> %a) #0 {
-; CHECK-O0-LABEL: test_fadd_imm_1_v4(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<9>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_1_v4_param_0];
-; CHECK-O0-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-O0-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-O0-NEXT: mov.b64 %rd5, {%f2, %f1};
-; CHECK-O0-NEXT: add.rn.f32x2 %rd6, %rd4, %rd5;
-; CHECK-O0-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-O0-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-O0-NEXT: mov.b64 %rd7, {%f4, %f3};
-; CHECK-O0-NEXT: add.rn.f32x2 %rd8, %rd3, %rd7;
-; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd_imm_1_v4(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<9>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_imm_1_v4_param_0];
-; CHECK-O3-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-O3-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-O3-NEXT: mov.b64 %rd5, {%f2, %f1};
-; CHECK-O3-NEXT: add.rn.f32x2 %rd6, %rd2, %rd5;
-; CHECK-O3-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-O3-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-O3-NEXT: mov.b64 %rd7, {%f4, %f3};
-; CHECK-O3-NEXT: add.rn.f32x2 %rd8, %rd1, %rd7;
-; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd_imm_1_v4(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_1_v4_param_0];
+; CHECK-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-NEXT: add.rn.f32x2 %rd6, %rd4, %rd5;
+; CHECK-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-NEXT: add.rn.f32x2 %rd8, %rd3, %rd7;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-NEXT: ret;
%r = fadd <4 x float> %a, <float 1.0, float 2.0, float 3.0, float 4.0>
ret <4 x float> %r
}
define <2 x float> @test_fsub(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fsub(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fsub_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fsub_param_0];
-; CHECK-O0-NEXT: sub.rn.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fsub(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fsub_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fsub_param_0];
-; CHECK-O3-NEXT: sub.rn.f32x2 %rd3, %rd2, %rd1;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fsub(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fsub_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fsub_param_0];
+; CHECK-NEXT: sub.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fsub <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fneg(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_fneg(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<2>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fneg_param_0];
-; CHECK-O0-NEXT: mov.f32 %f1, 0f00000000;
-; CHECK-O0-NEXT: mov.b64 %rd2, {%f1, %f1};
-; CHECK-O0-NEXT: sub.rn.f32x2 %rd3, %rd2, %rd1;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fneg(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<2>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fneg_param_0];
-; CHECK-O3-NEXT: mov.f32 %f1, 0f00000000;
-; CHECK-O3-NEXT: mov.b64 %rd2, {%f1, %f1};
-; CHECK-O3-NEXT: sub.rn.f32x2 %rd3, %rd2, %rd1;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fneg(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fneg_param_0];
+; CHECK-NEXT: mov.f32 %f1, 0f00000000;
+; CHECK-NEXT: mov.b64 %rd2, {%f1, %f1};
+; CHECK-NEXT: sub.rn.f32x2 %rd3, %rd2, %rd1;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fsub <2 x float> <float 0.0, float 0.0>, %a
ret <2 x float> %r
}
define <2 x float> @test_fmul(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fmul(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fmul_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fmul_param_0];
-; CHECK-O0-NEXT: mul.rn.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fmul(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fmul_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fmul_param_0];
-; CHECK-O3-NEXT: mul.rn.f32x2 %rd3, %rd2, %rd1;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fmul(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fmul_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fmul_param_0];
+; CHECK-NEXT: mul.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fmul <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fma(<2 x float> %a, <2 x float> %b, <2 x float> %c) #0 {
-; CHECK-O0-LABEL: test_fma(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<5>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd3, [test_fma_param_2];
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fma_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fma_param_0];
-; CHECK-O0-NEXT: fma.rn.f32x2 %rd4, %rd1, %rd2, %rd3;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd4;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fma(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<5>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fma_param_2];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fma_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd3, [test_fma_param_0];
-; CHECK-O3-NEXT: fma.rn.f32x2 %rd4, %rd3, %rd2, %rd1;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd4;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fma(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd3, [test_fma_param_2];
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fma_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fma_param_0];
+; CHECK-NEXT: fma.rn.f32x2 %rd4, %rd1, %rd2, %rd3;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd4;
+; CHECK-NEXT: ret;
%r = call <2 x float> @llvm.fma(<2 x float> %a, <2 x float> %b, <2 x float> %c)
ret <2 x float> %r
}
define <2 x float> @test_fdiv(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fdiv(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<7>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fdiv_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fdiv_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: div.rn.f32 %f5, %f4, %f2;
-; CHECK-O0-NEXT: div.rn.f32 %f6, %f3, %f1;
-; CHECK-O0-NEXT: mov.b64 %rd3, {%f6, %f5};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fdiv(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<7>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fdiv_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fdiv_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: div.rn.f32 %f5, %f4, %f2;
-; CHECK-O3-NEXT: div.rn.f32 %f6, %f3, %f1;
-; CHECK-O3-NEXT: mov.b64 %rd3, {%f6, %f5};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fdiv(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<7>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fdiv_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fdiv_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: div.rn.f32 %f5, %f4, %f2;
+; CHECK-NEXT: div.rn.f32 %f6, %f3, %f1;
+; CHECK-NEXT: mov.b64 %rd3, {%f6, %f5};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fdiv <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_frem(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_frem(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<15>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_frem_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_frem_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: div.rn.f32 %f5, %f4, %f2;
-; CHECK-O0-NEXT: cvt.rzi.f32.f32 %f6, %f5;
-; CHECK-O0-NEXT: mul.f32 %f7, %f6, %f2;
-; CHECK-O0-NEXT: sub.f32 %f8, %f4, %f7;
-; CHECK-O0-NEXT: testp.infinite.f32 %p1, %f2;
-; CHECK-O0-NEXT: selp.f32 %f9, %f4, %f8, %p1;
-; CHECK-O0-NEXT: div.rn.f32 %f10, %f3, %f1;
-; CHECK-O0-NEXT: cvt.rzi.f32.f32 %f11, %f10;
-; CHECK-O0-NEXT: mul.f32 %f12, %f11, %f1;
-; CHECK-O0-NEXT: sub.f32 %f13, %f3, %f12;
-; CHECK-O0-NEXT: testp.infinite.f32 %p2, %f1;
-; CHECK-O0-NEXT: selp.f32 %f14, %f3, %f13, %p2;
-; CHECK-O0-NEXT: mov.b64 %rd3, {%f14, %f9};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_frem(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<15>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_frem_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_frem_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: div.rn.f32 %f5, %f4, %f2;
-; CHECK-O3-NEXT: cvt.rzi.f32.f32 %f6, %f5;
-; CHECK-O3-NEXT: mul.f32 %f7, %f6, %f2;
-; CHECK-O3-NEXT: sub.f32 %f8, %f4, %f7;
-; CHECK-O3-NEXT: testp.infinite.f32 %p1, %f2;
-; CHECK-O3-NEXT: selp.f32 %f9, %f4, %f8, %p1;
-; CHECK-O3-NEXT: div.rn.f32 %f10, %f3, %f1;
-; CHECK-O3-NEXT: cvt.rzi.f32.f32 %f11, %f10;
-; CHECK-O3-NEXT: mul.f32 %f12, %f11, %f1;
-; CHECK-O3-NEXT: sub.f32 %f13, %f3, %f12;
-; CHECK-O3-NEXT: testp.infinite.f32 %p2, %f1;
-; CHECK-O3-NEXT: selp.f32 %f14, %f3, %f13, %p2;
-; CHECK-O3-NEXT: mov.b64 %rd3, {%f14, %f9};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_frem(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .f32 %f<15>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_frem_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_frem_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: div.rn.f32 %f5, %f4, %f2;
+; CHECK-NEXT: cvt.rzi.f32.f32 %f6, %f5;
+; CHECK-NEXT: mul.f32 %f7, %f6, %f2;
+; CHECK-NEXT: sub.f32 %f8, %f4, %f7;
+; CHECK-NEXT: testp.infinite.f32 %p1, %f2;
+; CHECK-NEXT: selp.f32 %f9, %f4, %f8, %p1;
+; CHECK-NEXT: div.rn.f32 %f10, %f3, %f1;
+; CHECK-NEXT: cvt.rzi.f32.f32 %f11, %f10;
+; CHECK-NEXT: mul.f32 %f12, %f11, %f1;
+; CHECK-NEXT: sub.f32 %f13, %f3, %f12;
+; CHECK-NEXT: testp.infinite.f32 %p2, %f1;
+; CHECK-NEXT: selp.f32 %f14, %f3, %f13, %p2;
+; CHECK-NEXT: mov.b64 %rd3, {%f14, %f9};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = frem <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fadd_ftz(<2 x float> %a, <2 x float> %b) #2 {
-; CHECK-O0-LABEL: test_fadd_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fadd_ftz_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_ftz_param_0];
-; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_ftz_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fadd_ftz_param_0];
-; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd3, %rd2, %rd1;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fadd_ftz_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_ftz_param_0];
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fadd <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fadd_imm_0_ftz(<2 x float> %a) #2 {
-; CHECK-O0-LABEL: test_fadd_imm_0_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_imm_0_ftz_param_0];
-; CHECK-O0-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-O0-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-O0-NEXT: mov.b64 %rd2, {%f2, %f1};
-; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd_imm_0_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_imm_0_ftz_param_0];
-; CHECK-O3-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-O3-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-O3-NEXT: mov.b64 %rd2, {%f2, %f1};
-; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd_imm_0_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_0_ftz_param_0];
+; CHECK-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fadd <2 x float> <float 1.0, float 2.0>, %a
ret <2 x float> %r
}
define <2 x float> @test_fadd_imm_1_ftz(<2 x float> %a) #2 {
-; CHECK-O0-LABEL: test_fadd_imm_1_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fadd_imm_1_ftz_param_0];
-; CHECK-O0-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-O0-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-O0-NEXT: mov.b64 %rd2, {%f2, %f1};
-; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd_imm_1_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fadd_imm_1_ftz_param_0];
-; CHECK-O3-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-O3-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-O3-NEXT: mov.b64 %rd2, {%f2, %f1};
-; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd_imm_1_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_1_ftz_param_0];
+; CHECK-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fadd <2 x float> %a, <float 1.0, float 2.0>
ret <2 x float> %r
}
define <4 x float> @test_fadd_v4_ftz(<4 x float> %a, <4 x float> %b) #2 {
-; CHECK-O0-LABEL: test_fadd_v4_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<11>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u64 {%rd5, %rd6}, [test_fadd_v4_ftz_param_1];
-; CHECK-O0-NEXT: ld.param.v2.u64 {%rd7, %rd8}, [test_fadd_v4_ftz_param_0];
-; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd9, %rd8, %rd6;
-; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd10, %rd7, %rd5;
-; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd_v4_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<11>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_v4_ftz_param_1];
-; CHECK-O3-NEXT: ld.param.v2.u64 {%rd4, %rd5}, [test_fadd_v4_ftz_param_0];
-; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd9, %rd5, %rd2;
-; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd10, %rd4, %rd1;
-; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd_v4_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<11>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u64 {%rd5, %rd6}, [test_fadd_v4_ftz_param_1];
+; CHECK-NEXT: ld.param.v2.u64 {%rd7, %rd8}, [test_fadd_v4_ftz_param_0];
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd9, %rd8, %rd6;
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd10, %rd7, %rd5;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
+; CHECK-NEXT: ret;
%r = fadd <4 x float> %a, %b
ret <4 x float> %r
}
define <4 x float> @test_fadd_imm_0_v4_ftz(<4 x float> %a) #2 {
-; CHECK-O0-LABEL: test_fadd_imm_0_v4_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<9>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_0_v4_ftz_param_0];
-; CHECK-O0-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-O0-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-O0-NEXT: mov.b64 %rd5, {%f2, %f1};
-; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd6, %rd4, %rd5;
-; CHECK-O0-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-O0-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-O0-NEXT: mov.b64 %rd7, {%f4, %f3};
-; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd8, %rd3, %rd7;
-; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd_imm_0_v4_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<9>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_imm_0_v4_ftz_param_0];
-; CHECK-O3-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-O3-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-O3-NEXT: mov.b64 %rd5, {%f2, %f1};
-; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd6, %rd2, %rd5;
-; CHECK-O3-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-O3-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-O3-NEXT: mov.b64 %rd7, {%f4, %f3};
-; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd8, %rd1, %rd7;
-; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd_imm_0_v4_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_0_v4_ftz_param_0];
+; CHECK-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd4, %rd5;
+; CHECK-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd8, %rd3, %rd7;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-NEXT: ret;
%r = fadd <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %a
ret <4 x float> %r
}
define <4 x float> @test_fadd_imm_1_v4_ftz(<4 x float> %a) #2 {
-; CHECK-O0-LABEL: test_fadd_imm_1_v4_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<9>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_1_v4_ftz_param_0];
-; CHECK-O0-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-O0-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-O0-NEXT: mov.b64 %rd5, {%f2, %f1};
-; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd6, %rd4, %rd5;
-; CHECK-O0-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-O0-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-O0-NEXT: mov.b64 %rd7, {%f4, %f3};
-; CHECK-O0-NEXT: add.rn.ftz.f32x2 %rd8, %rd3, %rd7;
-; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fadd_imm_1_v4_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<9>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_fadd_imm_1_v4_ftz_param_0];
-; CHECK-O3-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-O3-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-O3-NEXT: mov.b64 %rd5, {%f2, %f1};
-; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd6, %rd2, %rd5;
-; CHECK-O3-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-O3-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-O3-NEXT: mov.b64 %rd7, {%f4, %f3};
-; CHECK-O3-NEXT: add.rn.ftz.f32x2 %rd8, %rd1, %rd7;
-; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fadd_imm_1_v4_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_1_v4_ftz_param_0];
+; CHECK-NEXT: mov.f32 %f1, 0f40800000;
+; CHECK-NEXT: mov.f32 %f2, 0f40400000;
+; CHECK-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd4, %rd5;
+; CHECK-NEXT: mov.f32 %f3, 0f40000000;
+; CHECK-NEXT: mov.f32 %f4, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd8, %rd3, %rd7;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-NEXT: ret;
%r = fadd <4 x float> %a, <float 1.0, float 2.0, float 3.0, float 4.0>
ret <4 x float> %r
}
define <2 x float> @test_fsub_ftz(<2 x float> %a, <2 x float> %b) #2 {
-; CHECK-O0-LABEL: test_fsub_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fsub_ftz_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fsub_ftz_param_0];
-; CHECK-O0-NEXT: sub.rn.ftz.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fsub_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fsub_ftz_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fsub_ftz_param_0];
-; CHECK-O3-NEXT: sub.rn.ftz.f32x2 %rd3, %rd2, %rd1;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fsub_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fsub_ftz_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fsub_ftz_param_0];
+; CHECK-NEXT: sub.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fsub <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fneg_ftz(<2 x float> %a) #2 {
-; CHECK-O0-LABEL: test_fneg_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<2>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fneg_ftz_param_0];
-; CHECK-O0-NEXT: mov.f32 %f1, 0f00000000;
-; CHECK-O0-NEXT: mov.b64 %rd2, {%f1, %f1};
-; CHECK-O0-NEXT: sub.rn.ftz.f32x2 %rd3, %rd2, %rd1;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fneg_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<2>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fneg_ftz_param_0];
-; CHECK-O3-NEXT: mov.f32 %f1, 0f00000000;
-; CHECK-O3-NEXT: mov.b64 %rd2, {%f1, %f1};
-; CHECK-O3-NEXT: sub.rn.ftz.f32x2 %rd3, %rd2, %rd1;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fneg_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fneg_ftz_param_0];
+; CHECK-NEXT: mov.f32 %f1, 0f00000000;
+; CHECK-NEXT: mov.b64 %rd2, {%f1, %f1};
+; CHECK-NEXT: sub.rn.ftz.f32x2 %rd3, %rd2, %rd1;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fsub <2 x float> <float 0.0, float 0.0>, %a
ret <2 x float> %r
}
define <2 x float> @test_fmul_ftz(<2 x float> %a, <2 x float> %b) #2 {
-; CHECK-O0-LABEL: test_fmul_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fmul_ftz_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fmul_ftz_param_0];
-; CHECK-O0-NEXT: mul.rn.ftz.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fmul_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fmul_ftz_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fmul_ftz_param_0];
-; CHECK-O3-NEXT: mul.rn.ftz.f32x2 %rd3, %rd2, %rd1;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fmul_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fmul_ftz_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fmul_ftz_param_0];
+; CHECK-NEXT: mul.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fmul <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_fma_ftz(<2 x float> %a, <2 x float> %b, <2 x float> %c) #2 {
-; CHECK-O0-LABEL: test_fma_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<5>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd3, [test_fma_ftz_param_2];
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fma_ftz_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fma_ftz_param_0];
-; CHECK-O0-NEXT: fma.rn.ftz.f32x2 %rd4, %rd1, %rd2, %rd3;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd4;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fma_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<5>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fma_ftz_param_2];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fma_ftz_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd3, [test_fma_ftz_param_0];
-; CHECK-O3-NEXT: fma.rn.ftz.f32x2 %rd4, %rd3, %rd2, %rd1;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd4;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fma_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd3, [test_fma_ftz_param_2];
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fma_ftz_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fma_ftz_param_0];
+; CHECK-NEXT: fma.rn.ftz.f32x2 %rd4, %rd1, %rd2, %rd3;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd4;
+; CHECK-NEXT: ret;
%r = call <2 x float> @llvm.fma(<2 x float> %a, <2 x float> %b, <2 x float> %c)
ret <2 x float> %r
}
define <2 x float> @test_fdiv_ftz(<2 x float> %a, <2 x float> %b) #2 {
-; CHECK-O0-LABEL: test_fdiv_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<7>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fdiv_ftz_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fdiv_ftz_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
-; CHECK-O0-NEXT: div.rn.ftz.f32 %f6, %f3, %f1;
-; CHECK-O0-NEXT: mov.b64 %rd3, {%f6, %f5};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fdiv_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<7>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fdiv_ftz_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fdiv_ftz_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
-; CHECK-O3-NEXT: div.rn.ftz.f32 %f6, %f3, %f1;
-; CHECK-O3-NEXT: mov.b64 %rd3, {%f6, %f5};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fdiv_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<7>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fdiv_ftz_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fdiv_ftz_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
+; CHECK-NEXT: div.rn.ftz.f32 %f6, %f3, %f1;
+; CHECK-NEXT: mov.b64 %rd3, {%f6, %f5};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = fdiv <2 x float> %a, %b
ret <2 x float> %r
}
define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 {
-; CHECK-O0-LABEL: test_frem_ftz(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<15>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_frem_ftz_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_frem_ftz_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
-; CHECK-O0-NEXT: cvt.rzi.ftz.f32.f32 %f6, %f5;
-; CHECK-O0-NEXT: mul.ftz.f32 %f7, %f6, %f2;
-; CHECK-O0-NEXT: sub.ftz.f32 %f8, %f4, %f7;
-; CHECK-O0-NEXT: testp.infinite.f32 %p1, %f2;
-; CHECK-O0-NEXT: selp.f32 %f9, %f4, %f8, %p1;
-; CHECK-O0-NEXT: div.rn.ftz.f32 %f10, %f3, %f1;
-; CHECK-O0-NEXT: cvt.rzi.ftz.f32.f32 %f11, %f10;
-; CHECK-O0-NEXT: mul.ftz.f32 %f12, %f11, %f1;
-; CHECK-O0-NEXT: sub.ftz.f32 %f13, %f3, %f12;
-; CHECK-O0-NEXT: testp.infinite.f32 %p2, %f1;
-; CHECK-O0-NEXT: selp.f32 %f14, %f3, %f13, %p2;
-; CHECK-O0-NEXT: mov.b64 %rd3, {%f14, %f9};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_frem_ftz(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<15>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_frem_ftz_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_frem_ftz_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
-; CHECK-O3-NEXT: cvt.rzi.ftz.f32.f32 %f6, %f5;
-; CHECK-O3-NEXT: mul.ftz.f32 %f7, %f6, %f2;
-; CHECK-O3-NEXT: sub.ftz.f32 %f8, %f4, %f7;
-; CHECK-O3-NEXT: testp.infinite.f32 %p1, %f2;
-; CHECK-O3-NEXT: selp.f32 %f9, %f4, %f8, %p1;
-; CHECK-O3-NEXT: div.rn.ftz.f32 %f10, %f3, %f1;
-; CHECK-O3-NEXT: cvt.rzi.ftz.f32.f32 %f11, %f10;
-; CHECK-O3-NEXT: mul.ftz.f32 %f12, %f11, %f1;
-; CHECK-O3-NEXT: sub.ftz.f32 %f13, %f3, %f12;
-; CHECK-O3-NEXT: testp.infinite.f32 %p2, %f1;
-; CHECK-O3-NEXT: selp.f32 %f14, %f3, %f13, %p2;
-; CHECK-O3-NEXT: mov.b64 %rd3, {%f14, %f9};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_frem_ftz(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .f32 %f<15>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_frem_ftz_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_frem_ftz_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
+; CHECK-NEXT: cvt.rzi.ftz.f32.f32 %f6, %f5;
+; CHECK-NEXT: mul.ftz.f32 %f7, %f6, %f2;
+; CHECK-NEXT: sub.ftz.f32 %f8, %f4, %f7;
+; CHECK-NEXT: testp.infinite.f32 %p1, %f2;
+; CHECK-NEXT: selp.f32 %f9, %f4, %f8, %p1;
+; CHECK-NEXT: div.rn.ftz.f32 %f10, %f3, %f1;
+; CHECK-NEXT: cvt.rzi.ftz.f32.f32 %f11, %f10;
+; CHECK-NEXT: mul.ftz.f32 %f12, %f11, %f1;
+; CHECK-NEXT: sub.ftz.f32 %f13, %f3, %f12;
+; CHECK-NEXT: testp.infinite.f32 %p2, %f1;
+; CHECK-NEXT: selp.f32 %f14, %f3, %f13, %p2;
+; CHECK-NEXT: mov.b64 %rd3, {%f14, %f9};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = frem <2 x float> %a, %b
ret <2 x float> %r
}
define void @test_ldst_v2f32(ptr %a, ptr %b) #0 {
-; CHECK-O0-LABEL: test_ldst_v2f32(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_ldst_v2f32_param_1];
-; CHECK-O0-NEXT: ld.param.u64 %rd1, [test_ldst_v2f32_param_0];
-; CHECK-O0-NEXT: ld.f64 %rd3, [%rd1];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd3;
-; CHECK-O0-NEXT: st.v2.f32 [%rd2], {%f1, %f2};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_ldst_v2f32(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.u64 %rd1, [test_ldst_v2f32_param_0];
-; CHECK-O3-NEXT: ld.f64 %rd2, [%rd1];
-; CHECK-O3-NEXT: ld.param.u64 %rd3, [test_ldst_v2f32_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: st.v2.f32 [%rd3], {%f1, %f2};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_ldst_v2f32(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v2f32_param_1];
+; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v2f32_param_0];
+; CHECK-NEXT: ld.b64 %rd3, [%rd1];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd3;
+; CHECK-NEXT: st.v2.f32 [%rd2], {%f1, %f2};
+; CHECK-NEXT: ret;
%t1 = load <2 x float>, ptr %a
store <2 x float> %t1, ptr %b, align 32
ret void
}
define void @test_ldst_v3f32(ptr %a, ptr %b) #0 {
-; CHECK-O0-LABEL: test_ldst_v3f32(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<2>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_ldst_v3f32_param_1];
-; CHECK-O0-NEXT: ld.param.u64 %rd1, [test_ldst_v3f32_param_0];
-; CHECK-O0-NEXT: ld.u64 %rd3, [%rd1];
-; CHECK-O0-NEXT: ld.f32 %f1, [%rd1+8];
-; CHECK-O0-NEXT: st.f32 [%rd2+8], %f1;
-; CHECK-O0-NEXT: st.u64 [%rd2], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_ldst_v3f32(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<2>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.u64 %rd1, [test_ldst_v3f32_param_0];
-; CHECK-O3-NEXT: ld.u64 %rd2, [%rd1];
-; CHECK-O3-NEXT: ld.f32 %f1, [%rd1+8];
-; CHECK-O3-NEXT: ld.param.u64 %rd3, [test_ldst_v3f32_param_1];
-; CHECK-O3-NEXT: st.f32 [%rd3+8], %f1;
-; CHECK-O3-NEXT: st.u64 [%rd3], %rd2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_ldst_v3f32(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v3f32_param_1];
+; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v3f32_param_0];
+; CHECK-NEXT: ld.u64 %rd3, [%rd1];
+; CHECK-NEXT: ld.f32 %f1, [%rd1+8];
+; CHECK-NEXT: st.f32 [%rd2+8], %f1;
+; CHECK-NEXT: st.u64 [%rd2], %rd3;
+; CHECK-NEXT: ret;
%t1 = load <3 x float>, ptr %a
store <3 x float> %t1, ptr %b, align 32
ret void
}
define void @test_ldst_v4f32(ptr %a, ptr %b) #0 {
-; CHECK-O0-LABEL: test_ldst_v4f32(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_ldst_v4f32_param_1];
-; CHECK-O0-NEXT: ld.param.u64 %rd1, [test_ldst_v4f32_param_0];
-; CHECK-O0-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
-; CHECK-O0-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_ldst_v4f32(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.u64 %rd1, [test_ldst_v4f32_param_0];
-; CHECK-O3-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
-; CHECK-O3-NEXT: ld.param.u64 %rd2, [test_ldst_v4f32_param_1];
-; CHECK-O3-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_ldst_v4f32(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v4f32_param_1];
+; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v4f32_param_0];
+; CHECK-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
+; CHECK-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
+; CHECK-NEXT: ret;
%t1 = load <4 x float>, ptr %a
store <4 x float> %t1, ptr %b, align 32
ret void
}
define void @test_ldst_v8f32(ptr %a, ptr %b) #0 {
-; CHECK-O0-LABEL: test_ldst_v8f32(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<9>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_ldst_v8f32_param_1];
-; CHECK-O0-NEXT: ld.param.u64 %rd1, [test_ldst_v8f32_param_0];
-; CHECK-O0-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
-; CHECK-O0-NEXT: ld.v4.f32 {%f5, %f6, %f7, %f8}, [%rd1+16];
-; CHECK-O0-NEXT: st.v4.f32 [%rd2+16], {%f5, %f6, %f7, %f8};
-; CHECK-O0-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_ldst_v8f32(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<9>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.u64 %rd1, [test_ldst_v8f32_param_0];
-; CHECK-O3-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
-; CHECK-O3-NEXT: ld.v4.f32 {%f5, %f6, %f7, %f8}, [%rd1+16];
-; CHECK-O3-NEXT: ld.param.u64 %rd2, [test_ldst_v8f32_param_1];
-; CHECK-O3-NEXT: st.v4.f32 [%rd2+16], {%f5, %f6, %f7, %f8};
-; CHECK-O3-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_ldst_v8f32(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v8f32_param_1];
+; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v8f32_param_0];
+; CHECK-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
+; CHECK-NEXT: ld.v4.f32 {%f5, %f6, %f7, %f8}, [%rd1+16];
+; CHECK-NEXT: st.v4.f32 [%rd2+16], {%f5, %f6, %f7, %f8};
+; CHECK-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
+; CHECK-NEXT: ret;
%t1 = load <8 x float>, ptr %a
store <8 x float> %t1, ptr %b, align 32
ret void
@@ -1036,1408 +591,783 @@ define void @test_ldst_v8f32(ptr %a, ptr %b) #0 {
declare <2 x float> @test_callee(<2 x float> %a, <2 x float> %b) #0
define <2 x float> @test_call(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_call(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<5>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_call_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_call_param_0];
-; CHECK-O0-NEXT: { // callseq 0, 0
-; CHECK-O0-NEXT: .param .align 8 .b8 param0[8];
-; CHECK-O0-NEXT: st.param.b64 [param0], %rd1;
-; CHECK-O0-NEXT: .param .align 8 .b8 param1[8];
-; CHECK-O0-NEXT: st.param.b64 [param1], %rd2;
-; CHECK-O0-NEXT: .param .align 8 .b8 retval0[8];
-; CHECK-O0-NEXT: call.uni (retval0),
-; CHECK-O0-NEXT: test_callee,
-; CHECK-O0-NEXT: (
-; CHECK-O0-NEXT: param0,
-; CHECK-O0-NEXT: param1
-; CHECK-O0-NEXT: );
-; CHECK-O0-NEXT: ld.param.b64 %rd3, [retval0];
-; CHECK-O0-NEXT: } // callseq 0
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_call(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<5>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_call_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_call_param_1];
-; CHECK-O3-NEXT: { // callseq 0, 0
-; CHECK-O3-NEXT: .param .align 8 .b8 param0[8];
-; CHECK-O3-NEXT: st.param.b64 [param0], %rd1;
-; CHECK-O3-NEXT: .param .align 8 .b8 param1[8];
-; CHECK-O3-NEXT: st.param.b64 [param1], %rd2;
-; CHECK-O3-NEXT: .param .align 8 .b8 retval0[8];
-; CHECK-O3-NEXT: call.uni (retval0),
-; CHECK-O3-NEXT: test_callee,
-; CHECK-O3-NEXT: (
-; CHECK-O3-NEXT: param0,
-; CHECK-O3-NEXT: param1
-; CHECK-O3-NEXT: );
-; CHECK-O3-NEXT: ld.param.b64 %rd3, [retval0];
-; CHECK-O3-NEXT: } // callseq 0
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_call(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_call_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_call_param_0];
+; CHECK-NEXT: { // callseq 0, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.b64 [param0], %rd1;
+; CHECK-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-NEXT: st.param.b64 [param1], %rd2;
+; CHECK-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0,
+; CHECK-NEXT: param1
+; CHECK-NEXT: );
+; CHECK-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-NEXT: } // callseq 0
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = call <2 x float> @test_callee(<2 x float> %a, <2 x float> %b)
ret <2 x float> %r
}
define <2 x float> @test_call_flipped(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_call_flipped(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<5>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_call_flipped_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_call_flipped_param_0];
-; CHECK-O0-NEXT: { // callseq 1, 0
-; CHECK-O0-NEXT: .param .align 8 .b8 param0[8];
-; CHECK-O0-NEXT: st.param.b64 [param0], %rd2;
-; CHECK-O0-NEXT: .param .align 8 .b8 param1[8];
-; CHECK-O0-NEXT: st.param.b64 [param1], %rd1;
-; CHECK-O0-NEXT: .param .align 8 .b8 retval0[8];
-; CHECK-O0-NEXT: call.uni (retval0),
-; CHECK-O0-NEXT: test_callee,
-; CHECK-O0-NEXT: (
-; CHECK-O0-NEXT: param0,
-; CHECK-O0-NEXT: param1
-; CHECK-O0-NEXT: );
-; CHECK-O0-NEXT: ld.param.b64 %rd3, [retval0];
-; CHECK-O0-NEXT: } // callseq 1
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_call_flipped(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<5>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_call_flipped_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_call_flipped_param_0];
-; CHECK-O3-NEXT: { // callseq 1, 0
-; CHECK-O3-NEXT: .param .align 8 .b8 param0[8];
-; CHECK-O3-NEXT: st.param.b64 [param0], %rd1;
-; CHECK-O3-NEXT: .param .align 8 .b8 param1[8];
-; CHECK-O3-NEXT: st.param.b64 [param1], %rd2;
-; CHECK-O3-NEXT: .param .align 8 .b8 retval0[8];
-; CHECK-O3-NEXT: call.uni (retval0),
-; CHECK-O3-NEXT: test_callee,
-; CHECK-O3-NEXT: (
-; CHECK-O3-NEXT: param0,
-; CHECK-O3-NEXT: param1
-; CHECK-O3-NEXT: );
-; CHECK-O3-NEXT: ld.param.b64 %rd3, [retval0];
-; CHECK-O3-NEXT: } // callseq 1
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_call_flipped(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_call_flipped_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_call_flipped_param_0];
+; CHECK-NEXT: { // callseq 1, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.b64 [param0], %rd2;
+; CHECK-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-NEXT: st.param.b64 [param1], %rd1;
+; CHECK-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0,
+; CHECK-NEXT: param1
+; CHECK-NEXT: );
+; CHECK-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-NEXT: } // callseq 1
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = call <2 x float> @test_callee(<2 x float> %b, <2 x float> %a)
ret <2 x float> %r
}
define <2 x float> @test_tailcall_flipped(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_tailcall_flipped(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<5>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_tailcall_flipped_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_tailcall_flipped_param_0];
-; CHECK-O0-NEXT: { // callseq 2, 0
-; CHECK-O0-NEXT: .param .align 8 .b8 param0[8];
-; CHECK-O0-NEXT: st.param.b64 [param0], %rd2;
-; CHECK-O0-NEXT: .param .align 8 .b8 param1[8];
-; CHECK-O0-NEXT: st.param.b64 [param1], %rd1;
-; CHECK-O0-NEXT: .param .align 8 .b8 retval0[8];
-; CHECK-O0-NEXT: call.uni (retval0),
-; CHECK-O0-NEXT: test_callee,
-; CHECK-O0-NEXT: (
-; CHECK-O0-NEXT: param0,
-; CHECK-O0-NEXT: param1
-; CHECK-O0-NEXT: );
-; CHECK-O0-NEXT: ld.param.b64 %rd3, [retval0];
-; CHECK-O0-NEXT: } // callseq 2
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_tailcall_flipped(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<5>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_tailcall_flipped_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_tailcall_flipped_param_0];
-; CHECK-O3-NEXT: { // callseq 2, 0
-; CHECK-O3-NEXT: .param .align 8 .b8 param0[8];
-; CHECK-O3-NEXT: st.param.b64 [param0], %rd1;
-; CHECK-O3-NEXT: .param .align 8 .b8 param1[8];
-; CHECK-O3-NEXT: st.param.b64 [param1], %rd2;
-; CHECK-O3-NEXT: .param .align 8 .b8 retval0[8];
-; CHECK-O3-NEXT: call.uni (retval0),
-; CHECK-O3-NEXT: test_callee,
-; CHECK-O3-NEXT: (
-; CHECK-O3-NEXT: param0,
-; CHECK-O3-NEXT: param1
-; CHECK-O3-NEXT: );
-; CHECK-O3-NEXT: ld.param.b64 %rd3, [retval0];
-; CHECK-O3-NEXT: } // callseq 2
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_tailcall_flipped(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_tailcall_flipped_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tailcall_flipped_param_0];
+; CHECK-NEXT: { // callseq 2, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.b64 [param0], %rd2;
+; CHECK-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-NEXT: st.param.b64 [param1], %rd1;
+; CHECK-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0,
+; CHECK-NEXT: param1
+; CHECK-NEXT: );
+; CHECK-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-NEXT: } // callseq 2
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = tail call <2 x float> @test_callee(<2 x float> %b, <2 x float> %a)
ret <2 x float> %r
}
define <2 x float> @test_select(<2 x float> %a, <2 x float> %b, i1 zeroext %c) #0 {
-; CHECK-O0-LABEL: test_select(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<2>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.u8 %rs1, [test_select_param_2];
-; CHECK-O0-NEXT: and.b16 %rs2, %rs1, 1;
-; CHECK-O0-NEXT: setp.eq.b16 %p1, %rs2, 1;
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_select_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_select_param_0];
-; CHECK-O0-NEXT: selp.b64 %rd3, %rd1, %rd2, %p1;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_select(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<2>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.u8 %rs1, [test_select_param_2];
-; CHECK-O3-NEXT: and.b16 %rs2, %rs1, 1;
-; CHECK-O3-NEXT: setp.eq.b16 %p1, %rs2, 1;
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_select_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_select_param_0];
-; CHECK-O3-NEXT: selp.b64 %rd3, %rd2, %rd1, %p1;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_select(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [test_select_param_2];
+; CHECK-NEXT: and.b16 %rs2, %rs1, 1;
+; CHECK-NEXT: setp.eq.b16 %p1, %rs2, 1;
+; CHECK-NEXT: ld.param.b64 %rd2, [test_select_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_select_param_0];
+; CHECK-NEXT: selp.b64 %rd3, %rd1, %rd2, %p1;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = select i1 %c, <2 x float> %a, <2 x float> %b
ret <2 x float> %r
}
define <2 x float> @test_select_cc(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d) #0 {
-; CHECK-O0-LABEL: test_select_cc(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<11>;
-; CHECK-O0-NEXT: .reg .b64 %rd<6>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd4, [test_select_cc_param_3];
-; CHECK-O0-NEXT: ld.param.f64 %rd3, [test_select_cc_param_2];
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_select_cc_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_select_cc_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd4;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd3;
-; CHECK-O0-NEXT: setp.neu.f32 %p1, %f3, %f1;
-; CHECK-O0-NEXT: setp.neu.f32 %p2, %f4, %f2;
-; CHECK-O0-NEXT: mov.b64 {%f5, %f6}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f7, %f8}, %rd1;
-; CHECK-O0-NEXT: selp.f32 %f9, %f8, %f6, %p2;
-; CHECK-O0-NEXT: selp.f32 %f10, %f7, %f5, %p1;
-; CHECK-O0-NEXT: mov.b64 %rd5, {%f10, %f9};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd5;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_select_cc(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<11>;
-; CHECK-O3-NEXT: .reg .b64 %rd<6>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_select_cc_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_select_cc_param_1];
-; CHECK-O3-NEXT: ld.param.f64 %rd3, [test_select_cc_param_2];
-; CHECK-O3-NEXT: ld.param.f64 %rd4, [test_select_cc_param_3];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd4;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd3;
-; CHECK-O3-NEXT: setp.neu.f32 %p1, %f3, %f1;
-; CHECK-O3-NEXT: setp.neu.f32 %p2, %f4, %f2;
-; CHECK-O3-NEXT: mov.b64 {%f5, %f6}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f7, %f8}, %rd1;
-; CHECK-O3-NEXT: selp.f32 %f9, %f8, %f6, %p2;
-; CHECK-O3-NEXT: selp.f32 %f10, %f7, %f5, %p1;
-; CHECK-O3-NEXT: mov.b64 %rd5, {%f10, %f9};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd5;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_select_cc(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .f32 %f<11>;
+; CHECK-NEXT: .reg .b64 %rd<6>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd4, [test_select_cc_param_3];
+; CHECK-NEXT: ld.param.b64 %rd3, [test_select_cc_param_2];
+; CHECK-NEXT: ld.param.b64 %rd2, [test_select_cc_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_select_cc_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd4;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd3;
+; CHECK-NEXT: setp.neu.f32 %p1, %f3, %f1;
+; CHECK-NEXT: setp.neu.f32 %p2, %f4, %f2;
+; CHECK-NEXT: mov.b64 {%f5, %f6}, %rd2;
+; CHECK-NEXT: mov.b64 {%f7, %f8}, %rd1;
+; CHECK-NEXT: selp.f32 %f9, %f8, %f6, %p2;
+; CHECK-NEXT: selp.f32 %f10, %f7, %f5, %p1;
+; CHECK-NEXT: mov.b64 %rd5, {%f10, %f9};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd5;
+; CHECK-NEXT: ret;
%cc = fcmp une <2 x float> %c, %d
%r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
ret <2 x float> %r
}
define <2 x double> @test_select_cc_f64_f32(<2 x double> %a, <2 x double> %b, <2 x float> %c, <2 x float> %d) #0 {
-; CHECK-O0-LABEL: test_select_cc_f64_f32(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-NEXT: .reg .f64 %fd<7>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f64_f32_param_1];
-; CHECK-O0-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f64_f32_param_0];
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_select_cc_f64_f32_param_3];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_select_cc_f64_f32_param_2];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.neu.f32 %p1, %f3, %f1;
-; CHECK-O0-NEXT: setp.neu.f32 %p2, %f4, %f2;
-; CHECK-O0-NEXT: selp.f64 %fd5, %fd2, %fd4, %p2;
-; CHECK-O0-NEXT: selp.f64 %fd6, %fd1, %fd3, %p1;
-; CHECK-O0-NEXT: st.param.v2.f64 [func_retval0], {%fd6, %fd5};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_select_cc_f64_f32(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-NEXT: .reg .f64 %fd<7>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f64_f32_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_select_cc_f64_f32_param_2];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_select_cc_f64_f32_param_3];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.neu.f32 %p1, %f3, %f1;
-; CHECK-O3-NEXT: setp.neu.f32 %p2, %f4, %f2;
-; CHECK-O3-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f64_f32_param_1];
-; CHECK-O3-NEXT: selp.f64 %fd5, %fd2, %fd4, %p2;
-; CHECK-O3-NEXT: selp.f64 %fd6, %fd1, %fd3, %p1;
-; CHECK-O3-NEXT: st.param.v2.f64 [func_retval0], {%fd6, %fd5};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_select_cc_f64_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-NEXT: .reg .f64 %fd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f64_f32_param_1];
+; CHECK-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f64_f32_param_0];
+; CHECK-NEXT: ld.param.b64 %rd2, [test_select_cc_f64_f32_param_3];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_select_cc_f64_f32_param_2];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.neu.f32 %p1, %f3, %f1;
+; CHECK-NEXT: setp.neu.f32 %p2, %f4, %f2;
+; CHECK-NEXT: selp.f64 %fd5, %fd2, %fd4, %p2;
+; CHECK-NEXT: selp.f64 %fd6, %fd1, %fd3, %p1;
+; CHECK-NEXT: st.param.v2.f64 [func_retval0], {%fd6, %fd5};
+; CHECK-NEXT: ret;
%cc = fcmp une <2 x float> %c, %d
%r = select <2 x i1> %cc, <2 x double> %a, <2 x double> %b
ret <2 x double> %r
}
define <2 x float> @test_select_cc_f32_f64(<2 x float> %a, <2 x float> %b, <2 x double> %c, <2 x double> %d) #0 {
-; CHECK-O0-LABEL: test_select_cc_f32_f64(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<7>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-NEXT: .reg .f64 %fd<5>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f32_f64_param_3];
-; CHECK-O0-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f32_f64_param_2];
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_select_cc_f32_f64_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_select_cc_f32_f64_param_0];
-; CHECK-O0-NEXT: setp.neu.f64 %p1, %fd1, %fd3;
-; CHECK-O0-NEXT: setp.neu.f64 %p2, %fd2, %fd4;
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: selp.f32 %f5, %f4, %f2, %p2;
-; CHECK-O0-NEXT: selp.f32 %f6, %f3, %f1, %p1;
-; CHECK-O0-NEXT: mov.b64 %rd3, {%f6, %f5};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_select_cc_f32_f64(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<7>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-NEXT: .reg .f64 %fd<5>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_select_cc_f32_f64_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_select_cc_f32_f64_param_1];
-; CHECK-O3-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f32_f64_param_2];
-; CHECK-O3-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f32_f64_param_3];
-; CHECK-O3-NEXT: setp.neu.f64 %p1, %fd1, %fd3;
-; CHECK-O3-NEXT: setp.neu.f64 %p2, %fd2, %fd4;
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: selp.f32 %f5, %f4, %f2, %p2;
-; CHECK-O3-NEXT: selp.f32 %f6, %f3, %f1, %p1;
-; CHECK-O3-NEXT: mov.b64 %rd3, {%f6, %f5};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_select_cc_f32_f64(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .f32 %f<7>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f32_f64_param_3];
+; CHECK-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f32_f64_param_2];
+; CHECK-NEXT: ld.param.b64 %rd2, [test_select_cc_f32_f64_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_select_cc_f32_f64_param_0];
+; CHECK-NEXT: setp.neu.f64 %p1, %fd1, %fd3;
+; CHECK-NEXT: setp.neu.f64 %p2, %fd2, %fd4;
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: selp.f32 %f5, %f4, %f2, %p2;
+; CHECK-NEXT: selp.f32 %f6, %f3, %f1, %p1;
+; CHECK-NEXT: mov.b64 %rd3, {%f6, %f5};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%cc = fcmp une <2 x double> %c, %d
%r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
ret <2 x float> %r
}
define <2 x i1> @test_fcmp_une(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_une(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_une_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_une_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.neu.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.neu.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_une(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_une_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_une_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.neu.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.neu.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_une(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_une_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_une_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.neu.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.neu.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp une <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ueq(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_ueq(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ueq_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ueq_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.equ.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.equ.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_ueq(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ueq_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ueq_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.equ.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.equ.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_ueq(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ueq_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ueq_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.equ.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.equ.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp ueq <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ugt(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_ugt(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ugt_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ugt_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.gtu.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.gtu.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_ugt(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ugt_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ugt_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.gtu.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.gtu.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_ugt(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ugt_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ugt_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.gtu.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.gtu.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp ugt <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_uge(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_uge(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_uge_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_uge_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.geu.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.geu.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_uge(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_uge_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_uge_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.geu.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.geu.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_uge(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_uge_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_uge_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.geu.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.geu.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp uge <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ult(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_ult(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ult_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ult_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.ltu.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.ltu.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_ult(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ult_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ult_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.ltu.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.ltu.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_ult(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ult_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ult_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.ltu.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.ltu.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp ult <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ule(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_ule(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ule_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ule_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.leu.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.leu.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_ule(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ule_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ule_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.leu.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.leu.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_ule(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ule_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ule_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.leu.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.leu.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp ule <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_uno(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_uno(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_uno_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_uno_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.nan.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.nan.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_uno(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_uno_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_uno_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.nan.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.nan.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_uno(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_uno_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_uno_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.nan.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.nan.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp uno <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_one(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_one(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_one_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_one_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.ne.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.ne.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_one(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_one_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_one_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.ne.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.ne.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_one(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_one_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_one_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.ne.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.ne.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp one <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_oeq(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_oeq(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_oeq_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_oeq_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.eq.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.eq.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_oeq(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_oeq_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_oeq_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.eq.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.eq.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_oeq(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_oeq_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_oeq_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.eq.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.eq.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp oeq <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ogt(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_ogt(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ogt_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ogt_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.gt.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.gt.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_ogt(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ogt_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ogt_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.gt.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.gt.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_ogt(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ogt_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ogt_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.gt.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.gt.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp ogt <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_oge(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_oge(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_oge_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_oge_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.ge.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.ge.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_oge(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_oge_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_oge_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.ge.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.ge.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_oge(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_oge_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_oge_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.ge.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.ge.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp oge <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_olt(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_olt(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_olt_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_olt_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.lt.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.lt.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_olt(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_olt_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_olt_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.lt.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.lt.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_olt(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_olt_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_olt_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.lt.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.lt.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp olt <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ole(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_ole(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ole_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ole_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.le.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.le.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_ole(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ole_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ole_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.le.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.le.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_ole(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ole_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ole_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.le.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.le.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp ole <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i1> @test_fcmp_ord(<2 x float> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_fcmp_ord(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .pred %p<3>;
-; CHECK-O0-NEXT: .reg .b16 %rs<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<5>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd2, [test_fcmp_ord_param_1];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fcmp_ord_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O0-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O0-NEXT: setp.num.f32 %p1, %f4, %f2;
-; CHECK-O0-NEXT: setp.num.f32 %p2, %f3, %f1;
-; CHECK-O0-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O0-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O0-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fcmp_ord(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .pred %p<3>;
-; CHECK-O3-NEXT: .reg .b16 %rs<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<5>;
-; CHECK-O3-NEXT: .reg .b64 %rd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fcmp_ord_param_0];
-; CHECK-O3-NEXT: ld.param.f64 %rd2, [test_fcmp_ord_param_1];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-O3-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-O3-NEXT: setp.num.f32 %p1, %f4, %f2;
-; CHECK-O3-NEXT: setp.num.f32 %p2, %f3, %f1;
-; CHECK-O3-NEXT: selp.u16 %rs1, -1, 0, %p2;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-O3-NEXT: selp.u16 %rs2, -1, 0, %p1;
-; CHECK-O3-NEXT: st.param.b8 [func_retval0+1], %rs2;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fcmp_ord(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ord_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ord_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
+; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
+; CHECK-NEXT: setp.num.f32 %p1, %f4, %f2;
+; CHECK-NEXT: setp.num.f32 %p2, %f3, %f1;
+; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT: ret;
%r = fcmp ord <2 x float> %a, %b
ret <2 x i1> %r
}
define <2 x i32> @test_fptosi_i32(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_fptosi_i32(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b32 %r<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<2>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fptosi_i32_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-O0-NEXT: cvt.rzi.s32.f32 %r1, %f2;
-; CHECK-O0-NEXT: cvt.rzi.s32.f32 %r2, %f1;
-; CHECK-O0-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fptosi_i32(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b32 %r<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<2>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fptosi_i32_param_0];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-O3-NEXT: cvt.rzi.s32.f32 %r1, %f2;
-; CHECK-O3-NEXT: cvt.rzi.s32.f32 %r2, %f1;
-; CHECK-O3-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fptosi_i32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fptosi_i32_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-NEXT: cvt.rzi.s32.f32 %r1, %f2;
+; CHECK-NEXT: cvt.rzi.s32.f32 %r2, %f1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT: ret;
%r = fptosi <2 x float> %a to <2 x i32>
ret <2 x i32> %r
}
define <2 x i64> @test_fptosi_i64(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_fptosi_i64(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fptosi_i64_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-O0-NEXT: cvt.rzi.s64.f32 %rd2, %f2;
-; CHECK-O0-NEXT: cvt.rzi.s64.f32 %rd3, %f1;
-; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fptosi_i64(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fptosi_i64_param_0];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-O3-NEXT: cvt.rzi.s64.f32 %rd2, %f2;
-; CHECK-O3-NEXT: cvt.rzi.s64.f32 %rd3, %f1;
-; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fptosi_i64(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fptosi_i64_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-NEXT: cvt.rzi.s64.f32 %rd2, %f2;
+; CHECK-NEXT: cvt.rzi.s64.f32 %rd3, %f1;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
+; CHECK-NEXT: ret;
%r = fptosi <2 x float> %a to <2 x i64>
ret <2 x i64> %r
}
define <2 x i32> @test_fptoui_2xi32(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_fptoui_2xi32(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b32 %r<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<2>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fptoui_2xi32_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-O0-NEXT: cvt.rzi.u32.f32 %r1, %f2;
-; CHECK-O0-NEXT: cvt.rzi.u32.f32 %r2, %f1;
-; CHECK-O0-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fptoui_2xi32(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b32 %r<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<2>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fptoui_2xi32_param_0];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-O3-NEXT: cvt.rzi.u32.f32 %r1, %f2;
-; CHECK-O3-NEXT: cvt.rzi.u32.f32 %r2, %f1;
-; CHECK-O3-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fptoui_2xi32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fptoui_2xi32_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-NEXT: cvt.rzi.u32.f32 %r1, %f2;
+; CHECK-NEXT: cvt.rzi.u32.f32 %r2, %f1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT: ret;
%r = fptoui <2 x float> %a to <2 x i32>
ret <2 x i32> %r
}
define <2 x i64> @test_fptoui_2xi64(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_fptoui_2xi64(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fptoui_2xi64_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-O0-NEXT: cvt.rzi.u64.f32 %rd2, %f2;
-; CHECK-O0-NEXT: cvt.rzi.u64.f32 %rd3, %f1;
-; CHECK-O0-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fptoui_2xi64(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fptoui_2xi64_param_0];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-O3-NEXT: cvt.rzi.u64.f32 %rd2, %f2;
-; CHECK-O3-NEXT: cvt.rzi.u64.f32 %rd3, %f1;
-; CHECK-O3-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fptoui_2xi64(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fptoui_2xi64_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-NEXT: cvt.rzi.u64.f32 %rd2, %f2;
+; CHECK-NEXT: cvt.rzi.u64.f32 %rd3, %f1;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
+; CHECK-NEXT: ret;
%r = fptoui <2 x float> %a to <2 x i64>
ret <2 x i64> %r
}
define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 {
-; CHECK-O0-LABEL: test_uitofp_2xi32(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b32 %r<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<2>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
-; CHECK-O0-NEXT: cvt.rn.f32.u32 %f1, %r2;
-; CHECK-O0-NEXT: cvt.rn.f32.u32 %f2, %r1;
-; CHECK-O0-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd1;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_uitofp_2xi32(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b32 %r<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<2>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
-; CHECK-O3-NEXT: cvt.rn.f32.u32 %f1, %r2;
-; CHECK-O3-NEXT: cvt.rn.f32.u32 %f2, %r1;
-; CHECK-O3-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_uitofp_2xi32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
+; CHECK-NEXT: cvt.rn.f32.u32 %f1, %r2;
+; CHECK-NEXT: cvt.rn.f32.u32 %f2, %r1;
+; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
%r = uitofp <2 x i32> %a to <2 x float>
ret <2 x float> %r
}
define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 {
-; CHECK-O0-LABEL: test_uitofp_2xi64(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_uitofp_2xi64_param_0];
-; CHECK-O0-NEXT: cvt.rn.f32.u64 %f1, %rd2;
-; CHECK-O0-NEXT: cvt.rn.f32.u64 %f2, %rd1;
-; CHECK-O0-NEXT: mov.b64 %rd3, {%f2, %f1};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_uitofp_2xi64(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_uitofp_2xi64_param_0];
-; CHECK-O3-NEXT: cvt.rn.f32.u64 %f1, %rd2;
-; CHECK-O3-NEXT: cvt.rn.f32.u64 %f2, %rd1;
-; CHECK-O3-NEXT: mov.b64 %rd3, {%f2, %f1};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_uitofp_2xi64(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_uitofp_2xi64_param_0];
+; CHECK-NEXT: cvt.rn.f32.u64 %f1, %rd2;
+; CHECK-NEXT: cvt.rn.f32.u64 %f2, %rd1;
+; CHECK-NEXT: mov.b64 %rd3, {%f2, %f1};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = uitofp <2 x i64> %a to <2 x float>
ret <2 x float> %r
}
define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 {
-; CHECK-O0-LABEL: test_sitofp_2xi32(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b32 %r<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<2>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
-; CHECK-O0-NEXT: cvt.rn.f32.s32 %f1, %r2;
-; CHECK-O0-NEXT: cvt.rn.f32.s32 %f2, %r1;
-; CHECK-O0-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd1;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_sitofp_2xi32(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b32 %r<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<2>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
-; CHECK-O3-NEXT: cvt.rn.f32.s32 %f1, %r2;
-; CHECK-O3-NEXT: cvt.rn.f32.s32 %f2, %r1;
-; CHECK-O3-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_sitofp_2xi32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
+; CHECK-NEXT: cvt.rn.f32.s32 %f1, %r2;
+; CHECK-NEXT: cvt.rn.f32.s32 %f2, %r1;
+; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
%r = sitofp <2 x i32> %a to <2 x float>
ret <2 x float> %r
}
define <2 x float> @test_sitofp_2xi64(<2 x i64> %a) #0 {
-; CHECK-O0-LABEL: test_sitofp_2xi64(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_sitofp_2xi64_param_0];
-; CHECK-O0-NEXT: cvt.rn.f32.s64 %f1, %rd2;
-; CHECK-O0-NEXT: cvt.rn.f32.s64 %f2, %rd1;
-; CHECK-O0-NEXT: mov.b64 %rd3, {%f2, %f1};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_sitofp_2xi64(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_sitofp_2xi64_param_0];
-; CHECK-O3-NEXT: cvt.rn.f32.s64 %f1, %rd2;
-; CHECK-O3-NEXT: cvt.rn.f32.s64 %f2, %rd1;
-; CHECK-O3-NEXT: mov.b64 %rd3, {%f2, %f1};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_sitofp_2xi64(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_sitofp_2xi64_param_0];
+; CHECK-NEXT: cvt.rn.f32.s64 %f1, %rd2;
+; CHECK-NEXT: cvt.rn.f32.s64 %f2, %rd1;
+; CHECK-NEXT: mov.b64 %rd3, {%f2, %f1};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%r = sitofp <2 x i64> %a to <2 x float>
ret <2 x float> %r
}
define <2 x float> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x float> %b) #0 {
-; CHECK-O0-LABEL: test_uitofp_2xi32_fadd(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b32 %r<3>;
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<4>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0];
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_uitofp_2xi32_fadd_param_1];
-; CHECK-O0-NEXT: cvt.rn.f32.u32 %f1, %r2;
-; CHECK-O0-NEXT: cvt.rn.f32.u32 %f2, %r1;
-; CHECK-O0-NEXT: mov.b64 %rd2, {%f2, %f1};
-; CHECK-O0-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_uitofp_2xi32_fadd(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b32 %r<3>;
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<4>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_uitofp_2xi32_fadd_param_1];
-; CHECK-O3-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0];
-; CHECK-O3-NEXT: cvt.rn.f32.u32 %f1, %r2;
-; CHECK-O3-NEXT: cvt.rn.f32.u32 %f2, %r1;
-; CHECK-O3-NEXT: mov.b64 %rd2, {%f2, %f1};
-; CHECK-O3-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd3;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_uitofp_2xi32_fadd(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_uitofp_2xi32_fadd_param_1];
+; CHECK-NEXT: cvt.rn.f32.u32 %f1, %r2;
+; CHECK-NEXT: cvt.rn.f32.u32 %f2, %r1;
+; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: ret;
%c = uitofp <2 x i32> %a to <2 x float>
%r = fadd <2 x float> %b, %c
ret <2 x float> %r
}
define <2 x float> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
-; CHECK-O0-LABEL: test_fptrunc_2xdouble(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<2>;
-; CHECK-O0-NEXT: .reg .f64 %fd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_fptrunc_2xdouble_param_0];
-; CHECK-O0-NEXT: cvt.rn.f32.f64 %f1, %fd2;
-; CHECK-O0-NEXT: cvt.rn.f32.f64 %f2, %fd1;
-; CHECK-O0-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd1;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fptrunc_2xdouble(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<2>;
-; CHECK-O3-NEXT: .reg .f64 %fd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_fptrunc_2xdouble_param_0];
-; CHECK-O3-NEXT: cvt.rn.f32.f64 %f1, %fd2;
-; CHECK-O3-NEXT: cvt.rn.f32.f64 %f2, %fd1;
-; CHECK-O3-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fptrunc_2xdouble(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_fptrunc_2xdouble_param_0];
+; CHECK-NEXT: cvt.rn.f32.f64 %f1, %fd2;
+; CHECK-NEXT: cvt.rn.f32.f64 %f2, %fd1;
+; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
%r = fptrunc <2 x double> %a to <2 x float>
ret <2 x float> %r
}
define <2 x double> @test_fpext_2xdouble(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_fpext_2xdouble(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .f32 %f<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<2>;
-; CHECK-O0-NEXT: .reg .f64 %fd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %rd1, [test_fpext_2xdouble_param_0];
-; CHECK-O0-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-O0-NEXT: cvt.f64.f32 %fd1, %f2;
-; CHECK-O0-NEXT: cvt.f64.f32 %fd2, %f1;
-; CHECK-O0-NEXT: st.param.v2.f64 [func_retval0], {%fd2, %fd1};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_fpext_2xdouble(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f32 %f<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<2>;
-; CHECK-O3-NEXT: .reg .f64 %fd<3>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_fpext_2xdouble_param_0];
-; CHECK-O3-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-O3-NEXT: cvt.f64.f32 %fd1, %f2;
-; CHECK-O3-NEXT: cvt.f64.f32 %fd2, %f1;
-; CHECK-O3-NEXT: st.param.v2.f64 [func_retval0], {%fd2, %fd1};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_fpext_2xdouble(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fpext_2xdouble_param_0];
+; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1;
+; CHECK-NEXT: cvt.f64.f32 %fd1, %f2;
+; CHECK-NEXT: cvt.f64.f32 %fd2, %f1;
+; CHECK-NEXT: st.param.v2.f64 [func_retval0], {%fd2, %fd1};
+; CHECK-NEXT: ret;
%r = fpext <2 x float> %a to <2 x double>
ret <2 x double> %r
}
define <2 x i32> @test_bitcast_2xfloat_to_2xi32(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_bitcast_2xfloat_to_2xi32(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b32 %r<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_bitcast_2xfloat_to_2xi32_param_0];
-; CHECK-O0-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r1}, %rd2; }
-; CHECK-O0-NEXT: cvt.u32.u64 %r2, %rd2;
-; CHECK-O0-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_bitcast_2xfloat_to_2xi32(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b32 %r<3>;
-; CHECK-O3-NEXT: .reg .b64 %rd<2>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.u64 %rd1, [test_bitcast_2xfloat_to_2xi32_param_0];
-; CHECK-O3-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r1}, %rd1; }
-; CHECK-O3-NEXT: cvt.u32.u64 %r2, %rd1;
-; CHECK-O3-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_bitcast_2xfloat_to_2xi32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd2, [test_bitcast_2xfloat_to_2xi32_param_0];
+; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r1}, %rd2; }
+; CHECK-NEXT: cvt.u32.u64 %r2, %rd2;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT: ret;
%r = bitcast <2 x float> %a to <2 x i32>
ret <2 x i32> %r
}
define <2 x float> @test_bitcast_2xi32_to_2xfloat(<2 x i32> %a) #0 {
-; CHECK-O0-LABEL: test_bitcast_2xi32_to_2xfloat(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b32 %r<3>;
-; CHECK-O0-NEXT: .reg .b64 %rd<6>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0];
-; CHECK-O0-NEXT: cvt.u64.u32 %rd1, %r1;
-; CHECK-O0-NEXT: cvt.u64.u32 %rd2, %r2;
-; CHECK-O0-NEXT: shl.b64 %rd3, %rd2, 32;
-; CHECK-O0-NEXT: or.b64 %rd4, %rd1, %rd3;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd4;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_bitcast_2xi32_to_2xfloat(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<2>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_bitcast_2xi32_to_2xfloat_param_0];
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_bitcast_2xi32_to_2xfloat(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<6>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0];
+; CHECK-NEXT: cvt.u64.u32 %rd1, %r1;
+; CHECK-NEXT: cvt.u64.u32 %rd2, %r2;
+; CHECK-NEXT: shl.b64 %rd3, %rd2, 32;
+; CHECK-NEXT: or.b64 %rd4, %rd1, %rd3;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd4;
+; CHECK-NEXT: ret;
%r = bitcast <2 x i32> %a to <2 x float>
ret <2 x float> %r
}
define <2 x float> @test_bitcast_double_to_2xfloat(double %a) #0 {
-; CHECK-O0-LABEL: test_bitcast_double_to_2xfloat(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<2>;
-; CHECK-O0-NEXT: .reg .f64 %fd<2>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.f64 %fd1, [test_bitcast_double_to_2xfloat_param_0];
-; CHECK-O0-NEXT: mov.b64 %rd1, %fd1;
-; CHECK-O0-NEXT: st.param.b64 [func_retval0], %rd1;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_bitcast_double_to_2xfloat(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .b64 %rd<2>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %rd1, [test_bitcast_double_to_2xfloat_param_0];
-; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd1;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_bitcast_double_to_2xfloat(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f64 %fd1, [test_bitcast_double_to_2xfloat_param_0];
+; CHECK-NEXT: mov.b64 %rd1, %fd1;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
%r = bitcast double %a to <2 x float>
ret <2 x float> %r
}
define double @test_bitcast_2xfloat_to_double(<2 x float> %a) #0 {
-; CHECK-O0-LABEL: test_bitcast_2xfloat_to_double(
-; CHECK-O0: {
-; CHECK-O0-NEXT: .reg .b64 %rd<3>;
-; CHECK-O0-NEXT: .reg .f64 %fd<2>;
-; CHECK-O0-EMPTY:
-; CHECK-O0-NEXT: // %bb.0:
-; CHECK-O0-NEXT: ld.param.u64 %rd2, [test_bitcast_2xfloat_to_double_param_0];
-; CHECK-O0-NEXT: mov.b64 %fd1, %rd2;
-; CHECK-O0-NEXT: st.param.f64 [func_retval0], %fd1;
-; CHECK-O0-NEXT: ret;
-;
-; CHECK-O3-LABEL: test_bitcast_2xfloat_to_double(
-; CHECK-O3: {
-; CHECK-O3-NEXT: .reg .f64 %fd<2>;
-; CHECK-O3-EMPTY:
-; CHECK-O3-NEXT: // %bb.0:
-; CHECK-O3-NEXT: ld.param.f64 %fd1, [test_bitcast_2xfloat_to_double_param_0];
-; CHECK-O3-NEXT: st.param.f64 [func_retval0], %fd1;
-; CHECK-O3-NEXT: ret;
+; CHECK-LABEL: test_bitcast_2xfloat_to_double(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd2, [test_bitcast_2xfloat_to_double_param_0];
+; CHECK-NEXT: mov.b64 %fd1, %rd2;
+; CHECK-NEXT: st.param.f64 [func_retval0], %fd1;
+; CHECK-NEXT: ret;
%r = bitcast <2 x float> %a to double
ret double %r
}
>From a0fd5be0ea42ce1300f0d86e6bf3c6b1a773d43b Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 25 Feb 2025 11:01:48 -0800
Subject: [PATCH 18/32] [NVPTX] add combiner rule for expanding StoreRetval
vector parameters
Do this to reduce the amount of packing movs.
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 68 ++++++++++++++++---
llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 56 ++++++---------
2 files changed, 80 insertions(+), 44 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 3af1db8b45d0f..2bfc3952a5b55 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5189,26 +5189,78 @@ PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
return SDValue();
}
-static SDValue PerformStoreCombineHelper(SDNode *N, std::size_t Front,
- std::size_t Back) {
+static SDValue PerformStoreCombineHelper(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ std::size_t Front, std::size_t Back) {
if (all_of(N->ops().drop_front(Front).drop_back(Back),
[](const SDUse &U) { return U.get()->isUndef(); }))
// Operand 0 is the previous value in the chain. Cannot return EntryToken
// as the previous value will become unused and eliminated later.
return N->getOperand(0);
+ auto *MemN = cast<MemSDNode>(N);
+ if (MemN->getMemoryVT() == MVT::v2f32) {
+ // try to fold, and expand:
+ // c: v2f32 = BUILD_VECTOR (a: f32, b: f32)
+ // StoreRetval c
+ // -->
+ // StoreRetvalV2 {a, b}
+ // likewise for V2 -> V4 case
+
+ std::optional<NVPTXISD::NodeType> NewOpcode;
+ switch (N->getOpcode()) {
+ case NVPTXISD::StoreParam:
+ NewOpcode = NVPTXISD::StoreParamV2;
+ break;
+ case NVPTXISD::StoreParamV2:
+ NewOpcode = NVPTXISD::StoreParamV4;
+ break;
+ case NVPTXISD::StoreRetval:
+ NewOpcode = NVPTXISD::StoreRetvalV2;
+ break;
+ case NVPTXISD::StoreRetvalV2:
+ NewOpcode = NVPTXISD::StoreRetvalV4;
+ break;
+ }
+
+ if (NewOpcode) {
+ // copy chain, offset from existing store
+ SmallVector<SDValue> NewOps = {N->getOperand(0), N->getOperand(1)};
+ // gather all operands to expand
+ for (unsigned I = 2, E = N->getNumOperands(); I < E; ++I) {
+ SDValue CurrentOp = N->getOperand(I);
+ if (CurrentOp->getOpcode() == ISD::BUILD_VECTOR) {
+ assert(CurrentOp.getValueType() == MVT::v2f32);
+ NewOps.push_back(CurrentOp.getNode()->getOperand(0));
+ NewOps.push_back(CurrentOp.getNode()->getOperand(1));
+ } else {
+ NewOps.clear();
+ break;
+ }
+ }
+
+ if (!NewOps.empty()) {
+ return DCI.DAG.getMemIntrinsicNode(*NewOpcode, SDLoc(N), N->getVTList(),
+ NewOps, MVT::f32,
+ MemN->getMemOperand());
+ }
+ }
+ }
+
return SDValue();
}
-static SDValue PerformStoreParamCombine(SDNode *N) {
+static SDValue PerformStoreParamCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
// Operands from the 3rd to the 2nd last one are the values to be stored.
// {Chain, ArgID, Offset, Val, Glue}
- return PerformStoreCombineHelper(N, 3, 1);
+ return PerformStoreCombineHelper(N, DCI, 3, 1);
}
-static SDValue PerformStoreRetvalCombine(SDNode *N) {
+static SDValue PerformStoreRetvalCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
// Operands from the 2nd to the last one are the values to be stored
- return PerformStoreCombineHelper(N, 2, 0);
+ return PerformStoreCombineHelper(N, DCI, 2, 0);
}
/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
@@ -5925,11 +5977,11 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
case NVPTXISD::StoreRetval:
case NVPTXISD::StoreRetvalV2:
case NVPTXISD::StoreRetvalV4:
- return PerformStoreRetvalCombine(N);
+ return PerformStoreRetvalCombine(N, DCI);
case NVPTXISD::StoreParam:
case NVPTXISD::StoreParamV2:
case NVPTXISD::StoreParamV4:
- return PerformStoreParamCombine(N);
+ return PerformStoreParamCombine(N, DCI);
case ISD::EXTRACT_VECTOR_ELT:
return PerformEXTRACTCombine(N, DCI);
case ISD::VSELECT:
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
index 8f4fd3c6e6ee3..1f21740ba589e 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -16,13 +16,11 @@ define <2 x float> @test_ret_const() #0 {
; CHECK-LABEL: test_ret_const(
; CHECK: {
; CHECK-NEXT: .reg .f32 %f<3>;
-; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.f32 %f1, 0f40000000;
; CHECK-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
; CHECK-NEXT: ret;
ret <2 x float> <float 1.0, float 2.0>
}
@@ -243,7 +241,7 @@ define <2 x float> @test_fdiv(<2 x float> %a, <2 x float> %b) #0 {
; CHECK-LABEL: test_fdiv(
; CHECK: {
; CHECK-NEXT: .reg .f32 %f<7>;
-; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fdiv_param_1];
@@ -252,8 +250,7 @@ define <2 x float> @test_fdiv(<2 x float> %a, <2 x float> %b) #0 {
; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
; CHECK-NEXT: div.rn.f32 %f5, %f4, %f2;
; CHECK-NEXT: div.rn.f32 %f6, %f3, %f1;
-; CHECK-NEXT: mov.b64 %rd3, {%f6, %f5};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f6, %f5};
; CHECK-NEXT: ret;
%r = fdiv <2 x float> %a, %b
ret <2 x float> %r
@@ -264,7 +261,7 @@ define <2 x float> @test_frem(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .f32 %f<15>;
-; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_frem_param_1];
@@ -283,8 +280,7 @@ define <2 x float> @test_frem(<2 x float> %a, <2 x float> %b) #0 {
; CHECK-NEXT: sub.f32 %f13, %f3, %f12;
; CHECK-NEXT: testp.infinite.f32 %p2, %f1;
; CHECK-NEXT: selp.f32 %f14, %f3, %f13, %p2;
-; CHECK-NEXT: mov.b64 %rd3, {%f14, %f9};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f14, %f9};
; CHECK-NEXT: ret;
%r = frem <2 x float> %a, %b
ret <2 x float> %r
@@ -468,7 +464,7 @@ define <2 x float> @test_fdiv_ftz(<2 x float> %a, <2 x float> %b) #2 {
; CHECK-LABEL: test_fdiv_ftz(
; CHECK: {
; CHECK-NEXT: .reg .f32 %f<7>;
-; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fdiv_ftz_param_1];
@@ -477,8 +473,7 @@ define <2 x float> @test_fdiv_ftz(<2 x float> %a, <2 x float> %b) #2 {
; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
; CHECK-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
; CHECK-NEXT: div.rn.ftz.f32 %f6, %f3, %f1;
-; CHECK-NEXT: mov.b64 %rd3, {%f6, %f5};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f6, %f5};
; CHECK-NEXT: ret;
%r = fdiv <2 x float> %a, %b
ret <2 x float> %r
@@ -489,7 +484,7 @@ define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .f32 %f<15>;
-; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_frem_ftz_param_1];
@@ -508,8 +503,7 @@ define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 {
; CHECK-NEXT: sub.ftz.f32 %f13, %f3, %f12;
; CHECK-NEXT: testp.infinite.f32 %p2, %f1;
; CHECK-NEXT: selp.f32 %f14, %f3, %f13, %p2;
-; CHECK-NEXT: mov.b64 %rd3, {%f14, %f9};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f14, %f9};
; CHECK-NEXT: ret;
%r = frem <2 x float> %a, %b
ret <2 x float> %r
@@ -699,7 +693,7 @@ define <2 x float> @test_select_cc(<2 x float> %a, <2 x float> %b, <2 x float> %
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .f32 %f<11>;
-; CHECK-NEXT: .reg .b64 %rd<6>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd4, [test_select_cc_param_3];
@@ -714,8 +708,7 @@ define <2 x float> @test_select_cc(<2 x float> %a, <2 x float> %b, <2 x float> %
; CHECK-NEXT: mov.b64 {%f7, %f8}, %rd1;
; CHECK-NEXT: selp.f32 %f9, %f8, %f6, %p2;
; CHECK-NEXT: selp.f32 %f10, %f7, %f5, %p1;
-; CHECK-NEXT: mov.b64 %rd5, {%f10, %f9};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd5;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f10, %f9};
; CHECK-NEXT: ret;
%cc = fcmp une <2 x float> %c, %d
%r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
@@ -753,7 +746,7 @@ define <2 x float> @test_select_cc_f32_f64(<2 x float> %a, <2 x float> %b, <2 x
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .f32 %f<7>;
-; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-NEXT: .reg .f64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -767,8 +760,7 @@ define <2 x float> @test_select_cc_f32_f64(<2 x float> %a, <2 x float> %b, <2 x
; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
; CHECK-NEXT: selp.f32 %f5, %f4, %f2, %p2;
; CHECK-NEXT: selp.f32 %f6, %f3, %f1, %p1;
-; CHECK-NEXT: mov.b64 %rd3, {%f6, %f5};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f6, %f5};
; CHECK-NEXT: ret;
%cc = fcmp une <2 x double> %c, %d
%r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
@@ -1186,14 +1178,12 @@ define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-NEXT: .reg .f32 %f<3>;
-; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
; CHECK-NEXT: cvt.rn.f32.u32 %f1, %r2;
; CHECK-NEXT: cvt.rn.f32.u32 %f2, %r1;
-; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
; CHECK-NEXT: ret;
%r = uitofp <2 x i32> %a to <2 x float>
ret <2 x float> %r
@@ -1203,14 +1193,13 @@ define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 {
; CHECK-LABEL: test_uitofp_2xi64(
; CHECK: {
; CHECK-NEXT: .reg .f32 %f<3>;
-; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_uitofp_2xi64_param_0];
; CHECK-NEXT: cvt.rn.f32.u64 %f1, %rd2;
; CHECK-NEXT: cvt.rn.f32.u64 %f2, %rd1;
-; CHECK-NEXT: mov.b64 %rd3, {%f2, %f1};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
; CHECK-NEXT: ret;
%r = uitofp <2 x i64> %a to <2 x float>
ret <2 x float> %r
@@ -1221,14 +1210,12 @@ define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-NEXT: .reg .f32 %f<3>;
-; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
; CHECK-NEXT: cvt.rn.f32.s32 %f1, %r2;
; CHECK-NEXT: cvt.rn.f32.s32 %f2, %r1;
-; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
; CHECK-NEXT: ret;
%r = sitofp <2 x i32> %a to <2 x float>
ret <2 x float> %r
@@ -1238,14 +1225,13 @@ define <2 x float> @test_sitofp_2xi64(<2 x i64> %a) #0 {
; CHECK-LABEL: test_sitofp_2xi64(
; CHECK: {
; CHECK-NEXT: .reg .f32 %f<3>;
-; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_sitofp_2xi64_param_0];
; CHECK-NEXT: cvt.rn.f32.s64 %f1, %rd2;
; CHECK-NEXT: cvt.rn.f32.s64 %f2, %rd1;
-; CHECK-NEXT: mov.b64 %rd3, {%f2, %f1};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
; CHECK-NEXT: ret;
%r = sitofp <2 x i64> %a to <2 x float>
ret <2 x float> %r
@@ -1276,15 +1262,13 @@ define <2 x float> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
; CHECK-LABEL: test_fptrunc_2xdouble(
; CHECK: {
; CHECK-NEXT: .reg .f32 %f<3>;
-; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-NEXT: .reg .f64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_fptrunc_2xdouble_param_0];
; CHECK-NEXT: cvt.rn.f32.f64 %f1, %fd2;
; CHECK-NEXT: cvt.rn.f32.f64 %f2, %fd1;
-; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1};
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
; CHECK-NEXT: ret;
%r = fptrunc <2 x double> %a to <2 x float>
ret <2 x float> %r
>From 2d61d1bb5d400b7d3eb850477f7cba18fe4d89c1 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Fri, 28 Feb 2025 15:07:39 -0800
Subject: [PATCH 19/32] [NVPTX] add combiner rule for expanding LOAD, LoadV2,
LoadParam, LoadParamV2
To reduce the number of unpacking movs when the element type is i64 but
all uses are of unpacked f32s.
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 390 +++++++++++++++-----
1 file changed, 299 insertions(+), 91 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 2bfc3952a5b55..a29604d3b7b3b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -865,7 +865,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::EXTRACT_VECTOR_ELT, ISD::FADD,
ISD::MUL, ISD::SHL, ISD::SREM, ISD::UREM, ISD::VSELECT,
ISD::BUILD_VECTOR, ISD::ADDRSPACECAST, ISD::FP_ROUND,
- ISD::TRUNCATE});
+ ISD::TRUNCATE, ISD::LOAD});
// setcc for f16x2 and bf16x2 needs special handling to prevent
// legalizer's attempt to scalarize it due to v2i1 not being legal.
@@ -5189,6 +5189,295 @@ PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
return SDValue();
}
+static std::optional<std::pair<SDValue, SDValue>>
+convertVectorLoad(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI,
+ bool BuildVector) {
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ const EVT ResVT = LD->getValueType(0);
+ const EVT MemVT = LD->getMemoryVT();
+
+ // If we're doing sign/zero extension as part of the load, avoid lowering to
+ // a LoadV node. TODO: consider relaxing this restriction.
+ if (ResVT != MemVT)
+ return {};
+
+ const auto NumEltsAndEltVT = getVectorLoweringShape(
+ ResVT, STI.has256BitVectorLoadStore(LD->getAddressSpace()));
+ if (!NumEltsAndEltVT)
+ return {};
+ const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
+
+ Align Alignment = LD->getAlign();
+ const auto &TD = DAG.getDataLayout();
+ Align PrefAlign = TD.getPrefTypeAlign(MemVT.getTypeForEVT(*DAG.getContext()));
+ if (Alignment < PrefAlign) {
+ // This load is not sufficiently aligned, so bail out and let this vector
+ // load be scalarized. Note that we may still be able to emit smaller
+ // vector loads. For example, if we are loading a <4 x float> with an
+ // alignment of 8, this check will fail but the legalizer will try again
+ // with 2 x <2 x float>, which will succeed with an alignment of 8.
+ return {};
+ }
+
+ // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // loaded type to i16 and propagate the "real" type as the memory type.
+ const MVT LoadEltVT = (EltVT.getSizeInBits() < 16) ? MVT::i16 : EltVT;
+
+ unsigned Opcode;
+ switch (NumElts) {
+ default:
+ return {};
+ case 2:
+ Opcode = NVPTXISD::LoadV2;
+ break;
+ case 4:
+ Opcode = NVPTXISD::LoadV4;
+ break;
+ case 8:
+ Opcode = NVPTXISD::LoadV8;
+ break;
+ }
+ auto ListVTs = SmallVector<EVT, 9>(NumElts, LoadEltVT);
+ ListVTs.push_back(MVT::Other);
+ SDVTList LdResVTs = DAG.getVTList(ListVTs);
+
+ SDLoc DL(LD);
+
+ // Copy regular operands
+ SmallVector<SDValue, 8> OtherOps(LD->ops());
+
+ // The select routine does not have access to the LoadSDNode instance, so
+ // pass along the extension information
+ OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
+
+ SDValue NewLD = DAG.getMemIntrinsicNode(
+ Opcode, DL, LdResVTs, OtherOps, LD->getMemoryVT(), LD->getMemOperand());
+
+ SmallVector<SDValue> ScalarRes;
+ if (EltVT.isVector()) {
+ assert(EVT(EltVT.getVectorElementType()) == ResVT.getVectorElementType());
+ assert(NumElts * EltVT.getVectorNumElements() ==
+ ResVT.getVectorNumElements());
+ // Generate EXTRACT_VECTOR_ELTs to split v2[i,f,bf]16/v4i8 subvectors back
+ // into individual elements.
+ for (const unsigned I : llvm::seq(NumElts)) {
+ SDValue SubVector = NewLD.getValue(I);
+ DAG.ExtractVectorElements(SubVector, ScalarRes);
+ }
+ } else {
+ for (const unsigned I : llvm::seq(NumElts)) {
+ SDValue Res = NewLD.getValue(I);
+ if (LoadEltVT != EltVT)
+ Res = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res);
+ ScalarRes.push_back(Res);
+ }
+ }
+
+ SDValue LoadChain = NewLD.getValue(NumElts);
+
+ if (BuildVector) {
+ const MVT BuildVecVT =
+ MVT::getVectorVT(EltVT.getScalarType(), ScalarRes.size());
+ SDValue BuildVec = DAG.getBuildVector(BuildVecVT, DL, ScalarRes);
+ SDValue LoadValue = DAG.getBitcast(ResVT, BuildVec);
+
+ return {{LoadValue, LoadChain}};
+ }
+
+ return {{NewLD, LoadChain}};
+}
+
+static SDValue PerformLoadCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &STI) {
+ auto *MemN = cast<MemSDNode>(N);
+ EVT MemVT = MemN->getMemoryVT();
+
+ // ignore volatile loads
+ if (MemN->isVolatile())
+ return SDValue();
+
+ // only operate on vectors of f32s / i64s
+ if (!MemVT.isVector())
+ return SDValue();
+
+ EVT ElementVT = MemVT.getVectorElementType();
+ if (!(ElementVT == MVT::f32 ||
+ (ElementVT == MVT::i64 && N->getOpcode() != ISD::LOAD)))
+ return SDValue();
+
+ SmallDenseMap<SDNode *, unsigned> ExtractElts;
+ SDNode *ProxyReg = nullptr;
+ SmallVector<std::pair<SDNode *, unsigned /*offset*/>> WorkList{{N, 0}};
+ while (!WorkList.empty()) {
+ auto [V, Offset] = WorkList.pop_back_val();
+
+ // follow users of this to an extractelt, along the way collecting proxy
+ // regs and bitcasts
+ for (SDUse &U : V->uses()) {
+ if (U.getValueType() == MVT::Other || U.getValueType() == MVT::Glue)
+ continue; // we'll process chain/glue later
+
+ SDNode *User = U.getUser();
+ if (User->getOpcode() == NVPTXISD::ProxyReg) {
+ if (ProxyReg)
+ return SDValue(); // bail out if we've seen a proxy reg?
+ ProxyReg = User;
+ } else if (User->getOpcode() == ISD::BITCAST &&
+ User->getValueType(0) == MVT::v2f32 &&
+ U.getValueType() == MVT::i64) {
+ // match v2f32 = bitcast i64
+ Offset = U.getResNo() * 2;
+ } else if (User->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ User->getValueType(0) == MVT::f32) {
+ // match f32 = extractelt v2f32
+ if (auto *CI = dyn_cast<ConstantSDNode>(User->getOperand(1))) {
+ unsigned Index = CI->getZExtValue();
+ ExtractElts[User] = Offset + Index;
+ continue; // don't search
+ }
+ return SDValue(); // could not match
+ } else
+ return SDValue(); // couldn't match
+
+ // enqueue this to visit its uses
+ WorkList.push_back({User, Offset});
+ }
+ }
+
+ // (2) If the load's value is only used as f32 elements, replace all
+ // extractelts with individual elements of the newly-created load. If there's
+ // a ProxyReg, handle that too. After this check, we'll proceed in the
+ // following way:
+ // 1. Determine which type of load to create, which will split the results
+ // of the original load into f32 components.
+ // 2. If there's a ProxyReg, split that too.
+ // 3. Replace all extractelts with references to the new load / proxy reg.
+ // 4. Replace all glue/chain references with references to the new load /
+ // proxy reg.
+ if (ExtractElts.empty())
+ return SDValue();
+
+ // Do we have to tweak the opcode for an NVPTXISD::Load* or do we have to
+ // rewrite an ISD::LOAD?
+ std::optional<NVPTXISD::NodeType> NewOpcode;
+ switch (N->getOpcode()) {
+ case NVPTXISD::LoadV2:
+ NewOpcode = NVPTXISD::LoadV4;
+ break;
+ case NVPTXISD::LoadParam:
+ NewOpcode = NVPTXISD::LoadParamV2;
+ break;
+ case NVPTXISD::LoadParamV2:
+ NewOpcode = NVPTXISD::LoadParamV4;
+ break;
+ }
+
+ SDValue OldChain, OldGlue;
+ for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) {
+ if (N->getValueType(I) == MVT::Other)
+ OldChain = SDValue(N, I);
+ else if (N->getValueType(I) == MVT::Glue)
+ OldGlue = SDValue(N, I);
+ }
+
+ SDValue NewLoad, NewChain, NewGlue /* (optional) */;
+ unsigned NumElts = 0;
+ if (NewOpcode) { // tweak NVPTXISD::Load* opcode
+ SmallVector<EVT> VTs;
+
+ // should always be non-null after this
+ std::optional<unsigned> NewChainIdx;
+ std::optional<unsigned> NewGlueIdx;
+ for (const EVT &V : N->values()) {
+ if (V == MVT::i64 || V == MVT::v2f32) {
+ VTs.append({MVT::f32, MVT::f32});
+ NumElts += 2;
+ } else {
+ assert((V == MVT::Other || V == MVT::Glue) &&
+ "expected i64,...,ch,glue = load or v2f32,ch = load");
+ if (V == MVT::Other)
+ NewChainIdx = VTs.size();
+ else
+ NewGlueIdx = VTs.size();
+ VTs.push_back(V);
+ }
+ }
+
+ NewLoad = DCI.DAG.getMemIntrinsicNode(
+ *NewOpcode, SDLoc(N), DCI.DAG.getVTList(VTs),
+ SmallVector<SDValue>(N->ops()), MVT::f32, MemN->getMemOperand());
+ NewChain = NewLoad.getValue(*NewChainIdx);
+ if (NewGlueIdx)
+ NewGlue = NewLoad.getValue(*NewGlueIdx);
+ } else if (N->getOpcode() == ISD::LOAD) { // rewrite a load
+ if (auto Result =
+ convertVectorLoad(N, DCI.DAG, STI, /*BuildVector=*/false)) {
+ std::tie(NewLoad, NewChain) = *Result;
+ NumElts = MemVT.getVectorNumElements();
+ if (NewLoad->getValueType(NewLoad->getNumValues() - 1) == MVT::Glue)
+ NewGlue = NewLoad.getValue(NewLoad->getNumValues() - 1);
+ }
+ }
+
+ if (!NewLoad)
+ return SDValue(); // could not match pattern
+
+ // (3) begin rewriting uses
+ SmallVector<SDValue> NewOutputsF32;
+
+ if (ProxyReg) {
+ // scalarize proxyreg, but first rewrite all uses of chain and glue from the
+ // old load to the new load
+ DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
+ DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue);
+
+ // Update the new chain and glue to be old inputs to the proxyreg, if they
+ // came from an intervening instruction between this proxyreg and the
+ // original load (ex: callseq_end). Other than bitcasts and extractelts, we
+ // followed all other nodes by chain and glue accesses.
+ if (SDValue OldInChain = ProxyReg->getOperand(0); OldInChain.getNode() != N)
+ NewChain = OldInChain;
+ if (SDValue OldInGlue = ProxyReg->getOperand(2); OldInGlue.getNode() != N)
+ NewGlue = OldInGlue;
+
+ // update OldChain, OldGlue to the outputs of ProxyReg, which we will
+ // replace later
+ OldChain = SDValue(ProxyReg, 1);
+ OldGlue = SDValue(ProxyReg, 2);
+
+ // generate the scalar proxy regs
+ for (unsigned I = 0, E = NumElts; I != E; ++I) {
+ SDValue ProxyRegElem =
+ DCI.DAG.getNode(NVPTXISD::ProxyReg, SDLoc(ProxyReg),
+ DCI.DAG.getVTList(MVT::f32, MVT::Other, MVT::Glue),
+ {NewChain, NewLoad.getValue(I), NewGlue});
+ NewChain = ProxyRegElem.getValue(1);
+ NewGlue = ProxyRegElem.getValue(2);
+ NewOutputsF32.push_back(ProxyRegElem);
+ }
+ } else {
+ for (unsigned I = 0, E = NumElts; I != E; ++I)
+ if (NewLoad->getValueType(I) == MVT::f32)
+ NewOutputsF32.push_back(NewLoad.getValue(I));
+ }
+
+ // now, for all extractelts, replace them with one of the new outputs
+ for (auto &[Extract, Index] : ExtractElts)
+ DCI.CombineTo(Extract, NewOutputsF32[Index], false);
+
+ // now replace all glue and chain nodes
+ DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
+ if (OldGlue)
+ DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue);
+
+ // cleanup
+ if (ProxyReg)
+ DCI.recursivelyDeleteUnusedNodes(ProxyReg);
+ return SDValue();
+}
+
static SDValue PerformStoreCombineHelper(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
std::size_t Front, std::size_t Back) {
@@ -5978,6 +6267,11 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
case NVPTXISD::StoreRetvalV2:
case NVPTXISD::StoreRetvalV4:
return PerformStoreRetvalCombine(N, DCI);
+ case ISD::LOAD:
+ case NVPTXISD::LoadV2:
+ case NVPTXISD::LoadParam:
+ case NVPTXISD::LoadParamV2:
+ return PerformLoadCombine(N, DCI, STI);
case NVPTXISD::StoreParam:
case NVPTXISD::StoreParamV2:
case NVPTXISD::StoreParamV4:
@@ -6024,97 +6318,11 @@ static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG,
static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &Results,
const NVPTXSubtarget &STI) {
- LoadSDNode *LD = cast<LoadSDNode>(N);
- const EVT ResVT = LD->getValueType(0);
- const EVT MemVT = LD->getMemoryVT();
-
- // If we're doing sign/zero extension as part of the load, avoid lowering to
- // a LoadV node. TODO: consider relaxing this restriction.
- if (ResVT != MemVT)
- return;
-
- const auto NumEltsAndEltVT = getVectorLoweringShape(
- ResVT, STI.has256BitVectorLoadStore(LD->getAddressSpace()));
- if (!NumEltsAndEltVT)
- return;
- const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
-
- Align Alignment = LD->getAlign();
- const auto &TD = DAG.getDataLayout();
- Align PrefAlign = TD.getPrefTypeAlign(MemVT.getTypeForEVT(*DAG.getContext()));
- if (Alignment < PrefAlign) {
- // This load is not sufficiently aligned, so bail out and let this vector
- // load be scalarized. Note that we may still be able to emit smaller
- // vector loads. For example, if we are loading a <4 x float> with an
- // alignment of 8, this check will fail but the legalizer will try again
- // with 2 x <2 x float>, which will succeed with an alignment of 8.
- return;
- }
-
- // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // loaded type to i16 and propagate the "real" type as the memory type.
- const MVT LoadEltVT = (EltVT.getSizeInBits() < 16) ? MVT::i16 : EltVT;
-
- unsigned Opcode;
- switch (NumElts) {
- default:
- return;
- case 2:
- Opcode = NVPTXISD::LoadV2;
- break;
- case 4:
- Opcode = NVPTXISD::LoadV4;
- break;
- case 8:
- Opcode = NVPTXISD::LoadV8;
- break;
+ if (auto Outputs = convertVectorLoad(N, DAG, STI, /*BuildVector=*/true)) {
+ auto [BuildVec, LoadChain] = *Outputs;
+ Results.push_back(BuildVec);
+ Results.push_back(LoadChain);
}
- auto ListVTs = SmallVector<EVT, 9>(NumElts, LoadEltVT);
- ListVTs.push_back(MVT::Other);
- SDVTList LdResVTs = DAG.getVTList(ListVTs);
-
- SDLoc DL(LD);
-
- // Copy regular operands
- SmallVector<SDValue, 8> OtherOps(LD->ops());
-
- // The select routine does not have access to the LoadSDNode instance, so
- // pass along the extension information
- OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
-
- SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
- LD->getMemoryVT(),
- LD->getMemOperand());
-
- SmallVector<SDValue> ScalarRes;
- if (EltVT.isVector()) {
- assert(EVT(EltVT.getVectorElementType()) == ResVT.getVectorElementType());
- assert(NumElts * EltVT.getVectorNumElements() ==
- ResVT.getVectorNumElements());
- // Generate EXTRACT_VECTOR_ELTs to split v2[i,f,bf]16/v4i8 subvectors back
- // into individual elements.
- for (const unsigned I : llvm::seq(NumElts)) {
- SDValue SubVector = NewLD.getValue(I);
- DAG.ExtractVectorElements(SubVector, ScalarRes);
- }
- } else {
- for (const unsigned I : llvm::seq(NumElts)) {
- SDValue Res = NewLD.getValue(I);
- if (LoadEltVT != EltVT)
- Res = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res);
- ScalarRes.push_back(Res);
- }
- }
-
- SDValue LoadChain = NewLD.getValue(NumElts);
-
- const MVT BuildVecVT =
- MVT::getVectorVT(EltVT.getScalarType(), ScalarRes.size());
- SDValue BuildVec = DAG.getBuildVector(BuildVecVT, DL, ScalarRes);
- SDValue LoadValue = DAG.getBitcast(ResVT, BuildVec);
-
- Results.append({LoadValue, LoadChain});
}
// Lower vector return type of tcgen05.ld intrinsics
>From 592bd160971ec56dfbc7adc7e24bdd845bfb1429 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Wed, 5 Mar 2025 22:33:15 -0800
Subject: [PATCH 20/32] [NVPTX] update combiner rule for more types of loads
Handle more loads, including ones with multiple proxy registers:
- i64 = LOAD
- i64 = LoadParam
- v2f32,v2f32 = LoadParamV2
Also update the test cases. Because this is an optimization, it is not
triggered for some of these tests that compile with no optimizations.
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 150 ++--
llvm/test/CodeGen/NVPTX/aggregate-return.ll | 4 +-
llvm/test/CodeGen/NVPTX/bf16-instructions.ll | 96 +--
llvm/test/CodeGen/NVPTX/f16x2-instructions.ll | 71 +-
llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 685 +++++++++---------
.../NVPTX/read-global-variable-constant.ll | 2 +-
llvm/test/CodeGen/NVPTX/vector-loads.ll | 50 +-
llvm/test/CodeGen/NVPTX/vector-stores.ll | 2 +-
8 files changed, 553 insertions(+), 507 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index a29604d3b7b3b..4b37805d61ac5 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5189,12 +5189,14 @@ PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
return SDValue();
}
+/// OverrideVT - allows overriding result and memory type
static std::optional<std::pair<SDValue, SDValue>>
convertVectorLoad(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI,
- bool BuildVector) {
+ bool BuildVector,
+ std::optional<EVT> OverrideVT = std::nullopt) {
LoadSDNode *LD = cast<LoadSDNode>(N);
- const EVT ResVT = LD->getValueType(0);
- const EVT MemVT = LD->getMemoryVT();
+ const EVT ResVT = OverrideVT.value_or(LD->getValueType(0));
+ const EVT MemVT = OverrideVT.value_or(LD->getMemoryVT());
// If we're doing sign/zero extension as part of the load, avoid lowering to
// a LoadV node. TODO: consider relaxing this restriction.
@@ -5251,8 +5253,8 @@ convertVectorLoad(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI,
// pass along the extension information
OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
- SDValue NewLD = DAG.getMemIntrinsicNode(
- Opcode, DL, LdResVTs, OtherOps, LD->getMemoryVT(), LD->getMemOperand());
+ SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps, MemVT,
+ LD->getMemOperand());
SmallVector<SDValue> ScalarRes;
if (EltVT.isVector()) {
@@ -5277,6 +5279,26 @@ convertVectorLoad(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI,
SDValue LoadChain = NewLD.getValue(NumElts);
if (BuildVector) {
+ SmallVector<SDValue> ScalarRes;
+ if (EltVT.isVector()) {
+ assert(EVT(EltVT.getVectorElementType()) == ResVT.getVectorElementType());
+ assert(NumElts * EltVT.getVectorNumElements() ==
+ ResVT.getVectorNumElements());
+ // Generate EXTRACT_VECTOR_ELTs to split v2[i,f,bf]16/v4i8 subvectors back
+ // into individual elements.
+ for (const unsigned I : llvm::seq(NumElts)) {
+ SDValue SubVector = NewLD.getValue(I);
+ DAG.ExtractVectorElements(SubVector, ScalarRes);
+ }
+ } else {
+ for (const unsigned I : llvm::seq(NumElts)) {
+ SDValue Res = NewLD.getValue(I);
+ if (LoadEltVT != EltVT)
+ Res = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res);
+ ScalarRes.push_back(Res);
+ }
+ }
+
const MVT BuildVecVT =
MVT::getVectorVT(EltVT.getScalarType(), ScalarRes.size());
SDValue BuildVec = DAG.getBuildVector(BuildVecVT, DL, ScalarRes);
@@ -5292,23 +5314,20 @@ static SDValue PerformLoadCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const NVPTXSubtarget &STI) {
auto *MemN = cast<MemSDNode>(N);
- EVT MemVT = MemN->getMemoryVT();
-
- // ignore volatile loads
- if (MemN->isVolatile())
- return SDValue();
-
// only operate on vectors of f32s / i64s
- if (!MemVT.isVector())
+ if (EVT MemVT = MemN->getMemoryVT();
+ !(MemVT == MVT::i64 ||
+ (MemVT.isVector() && (MemVT.getVectorElementType() == MVT::f32 ||
+ MemVT.getVectorElementType() == MVT::i64))))
return SDValue();
- EVT ElementVT = MemVT.getVectorElementType();
- if (!(ElementVT == MVT::f32 ||
- (ElementVT == MVT::i64 && N->getOpcode() != ISD::LOAD)))
- return SDValue();
+ const unsigned OrigNumResults =
+ llvm::count_if(N->values(), [](const auto &VT) {
+ return VT == MVT::i64 || VT == MVT::f32 || VT.isVector();
+ });
SmallDenseMap<SDNode *, unsigned> ExtractElts;
- SDNode *ProxyReg = nullptr;
+ SmallVector<SDNode *> ProxyRegs(OrigNumResults, nullptr);
SmallVector<std::pair<SDNode *, unsigned /*offset*/>> WorkList{{N, 0}};
while (!WorkList.empty()) {
auto [V, Offset] = WorkList.pop_back_val();
@@ -5321,8 +5340,14 @@ static SDValue PerformLoadCombine(SDNode *N,
SDNode *User = U.getUser();
if (User->getOpcode() == NVPTXISD::ProxyReg) {
+ Offset = U.getResNo() * 2;
+ SDNode *&ProxyReg = ProxyRegs[Offset / 2];
+
+ // We shouldn't have multiple proxy regs for the same value from the
+ // load, but bail out anyway since we don't handle this.
if (ProxyReg)
- return SDValue(); // bail out if we've seen a proxy reg?
+ return SDValue();
+
ProxyReg = User;
} else if (User->getOpcode() == ISD::BITCAST &&
User->getValueType(0) == MVT::v2f32 &&
@@ -5412,10 +5437,18 @@ static SDValue PerformLoadCombine(SDNode *N,
if (NewGlueIdx)
NewGlue = NewLoad.getValue(*NewGlueIdx);
} else if (N->getOpcode() == ISD::LOAD) { // rewrite a load
- if (auto Result =
- convertVectorLoad(N, DCI.DAG, STI, /*BuildVector=*/false)) {
+ std::optional<EVT> CastToType;
+ EVT ResVT = N->getValueType(0);
+ if (ResVT == MVT::i64) {
+ // ld.b64 is treated as a vector by subsequent code
+ CastToType = MVT::v2f32;
+ }
+ if (auto Result = convertVectorLoad(N, DCI.DAG, STI, /*BuildVector=*/false,
+ CastToType)) {
std::tie(NewLoad, NewChain) = *Result;
- NumElts = MemVT.getVectorNumElements();
+ NumElts =
+ CastToType.value_or(cast<MemSDNode>(NewLoad.getNode())->getMemoryVT())
+ .getVectorNumElements();
if (NewLoad->getValueType(NewLoad->getNumValues() - 1) == MVT::Glue)
NewGlue = NewLoad.getValue(NewLoad->getNumValues() - 1);
}
@@ -5427,54 +5460,65 @@ static SDValue PerformLoadCombine(SDNode *N,
// (3) begin rewriting uses
SmallVector<SDValue> NewOutputsF32;
- if (ProxyReg) {
- // scalarize proxyreg, but first rewrite all uses of chain and glue from the
- // old load to the new load
+ if (llvm::any_of(ProxyRegs, [](const SDNode *PR) { return PR != nullptr; })) {
+ // scalarize proxy regs, but first rewrite all uses of chain and glue from
+ // the old load to the new load
DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue);
- // Update the new chain and glue to be old inputs to the proxyreg, if they
- // came from an intervening instruction between this proxyreg and the
- // original load (ex: callseq_end). Other than bitcasts and extractelts, we
- // followed all other nodes by chain and glue accesses.
- if (SDValue OldInChain = ProxyReg->getOperand(0); OldInChain.getNode() != N)
+ for (unsigned ProxyI = 0, ProxyE = ProxyRegs.size(); ProxyI != ProxyE;
+ ++ProxyI) {
+ SDNode *ProxyReg = ProxyRegs[ProxyI];
+
+ // no proxy reg might mean this result is unused
+ if (!ProxyReg)
+ continue;
+
+ // Update the new chain and glue to be old inputs to the proxyreg, if they
+ // came from an intervening instruction between this proxyreg and the
+ // original load (ex: callseq_end). Other than bitcasts and extractelts,
+ // we followed all other nodes by chain and glue accesses.
+ if (SDValue OldInChain = ProxyReg->getOperand(0);
+ OldInChain.getNode() != N)
NewChain = OldInChain;
- if (SDValue OldInGlue = ProxyReg->getOperand(2); OldInGlue.getNode() != N)
+ if (SDValue OldInGlue = ProxyReg->getOperand(2); OldInGlue.getNode() != N)
NewGlue = OldInGlue;
- // update OldChain, OldGlue to the outputs of ProxyReg, which we will
- // replace later
- OldChain = SDValue(ProxyReg, 1);
- OldGlue = SDValue(ProxyReg, 2);
-
- // generate the scalar proxy regs
- for (unsigned I = 0, E = NumElts; I != E; ++I) {
- SDValue ProxyRegElem =
- DCI.DAG.getNode(NVPTXISD::ProxyReg, SDLoc(ProxyReg),
- DCI.DAG.getVTList(MVT::f32, MVT::Other, MVT::Glue),
- {NewChain, NewLoad.getValue(I), NewGlue});
- NewChain = ProxyRegElem.getValue(1);
- NewGlue = ProxyRegElem.getValue(2);
- NewOutputsF32.push_back(ProxyRegElem);
+ // update OldChain, OldGlue to the outputs of ProxyReg, which we will
+ // replace later
+ OldChain = SDValue(ProxyReg, 1);
+ OldGlue = SDValue(ProxyReg, 2);
+
+ // generate the scalar proxy regs
+ for (unsigned I = 0, E = 2; I != E; ++I) {
+ SDValue ProxyRegElem = DCI.DAG.getNode(
+ NVPTXISD::ProxyReg, SDLoc(ProxyReg),
+ DCI.DAG.getVTList(MVT::f32, MVT::Other, MVT::Glue),
+ {NewChain, NewLoad.getValue(ProxyI * 2 + I), NewGlue});
+ NewChain = ProxyRegElem.getValue(1);
+ NewGlue = ProxyRegElem.getValue(2);
+ NewOutputsF32.push_back(ProxyRegElem);
+ }
+
+ // replace all uses of the glue and chain from the old proxy reg
+ DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
+ DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue);
}
} else {
for (unsigned I = 0, E = NumElts; I != E; ++I)
if (NewLoad->getValueType(I) == MVT::f32)
NewOutputsF32.push_back(NewLoad.getValue(I));
+
+ // replace all glue and chain nodes
+ DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
+ if (OldGlue)
+ DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue);
}
- // now, for all extractelts, replace them with one of the new outputs
+ // replace all extractelts with the new outputs
for (auto &[Extract, Index] : ExtractElts)
DCI.CombineTo(Extract, NewOutputsF32[Index], false);
- // now replace all glue and chain nodes
- DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
- if (OldGlue)
- DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue);
-
- // cleanup
- if (ProxyReg)
- DCI.recursivelyDeleteUnusedNodes(ProxyReg);
return SDValue();
}
diff --git a/llvm/test/CodeGen/NVPTX/aggregate-return.ll b/llvm/test/CodeGen/NVPTX/aggregate-return.ll
index 1c8f019922e37..784355d96551e 100644
--- a/llvm/test/CodeGen/NVPTX/aggregate-return.ll
+++ b/llvm/test/CodeGen/NVPTX/aggregate-return.ll
@@ -27,9 +27,7 @@ define void @test_v3f32(<3 x float> %input, ptr %output) {
; CHECK-NOT: ld.param.b32 [[E3:%r[0-9]+]], [retval0+12];
store <3 x float> %call, ptr %output, align 8
; CHECK-DAG: st.b32 [{{%rd[0-9]}}+8],
-; -- This is suboptimal. We should do st.v2.f32 instead
-; of combining 2xf32 info i64.
-; CHECK-DAG: st.b64 [{{%rd[0-9]}}],
+; CHECK-DAG: st.v2.b32 [{{%rd[0-9]}}], {[[E0]], [[E1]]}
; CHECK: ret;
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
index 2854ea4b79302..8c02174edefb5 100644
--- a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
@@ -712,25 +712,25 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.b64 %rd1, [test_extload_bf16x8_param_0];
; SM70-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; SM70-NEXT: mov.b32 {%rs1, %rs2}, %r1;
-; SM70-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; SM70-NEXT: mov.b32 {%rs5, %rs6}, %r3;
-; SM70-NEXT: mov.b32 {%rs7, %rs8}, %r4;
-; SM70-NEXT: cvt.u32.u16 %r5, %rs8;
+; SM70-NEXT: mov.b32 {%rs1, %rs2}, %r4;
+; SM70-NEXT: cvt.u32.u16 %r5, %rs2;
; SM70-NEXT: shl.b32 %r29, %r5, 16;
-; SM70-NEXT: cvt.u32.u16 %r8, %rs7;
+; SM70-NEXT: cvt.u32.u16 %r8, %rs1;
; SM70-NEXT: shl.b32 %r30, %r8, 16;
-; SM70-NEXT: cvt.u32.u16 %r11, %rs6;
+; SM70-NEXT: mov.b32 {%rs3, %rs4}, %r3;
+; SM70-NEXT: cvt.u32.u16 %r11, %rs4;
; SM70-NEXT: shl.b32 %r31, %r11, 16;
-; SM70-NEXT: cvt.u32.u16 %r14, %rs5;
+; SM70-NEXT: cvt.u32.u16 %r14, %rs3;
; SM70-NEXT: shl.b32 %r32, %r14, 16;
-; SM70-NEXT: cvt.u32.u16 %r17, %rs4;
+; SM70-NEXT: mov.b32 {%rs5, %rs6}, %r2;
+; SM70-NEXT: cvt.u32.u16 %r17, %rs6;
; SM70-NEXT: shl.b32 %r33, %r17, 16;
-; SM70-NEXT: cvt.u32.u16 %r20, %rs3;
+; SM70-NEXT: cvt.u32.u16 %r20, %rs5;
; SM70-NEXT: shl.b32 %r34, %r20, 16;
-; SM70-NEXT: cvt.u32.u16 %r23, %rs2;
+; SM70-NEXT: mov.b32 {%rs7, %rs8}, %r1;
+; SM70-NEXT: cvt.u32.u16 %r23, %rs8;
; SM70-NEXT: shl.b32 %r35, %r23, 16;
-; SM70-NEXT: cvt.u32.u16 %r26, %rs1;
+; SM70-NEXT: cvt.u32.u16 %r26, %rs7;
; SM70-NEXT: shl.b32 %r36, %r26, 16;
; SM70-NEXT: st.param.v4.b32 [func_retval0], {%r36, %r35, %r34, %r33};
; SM70-NEXT: st.param.v4.b32 [func_retval0+16], {%r32, %r31, %r30, %r29};
@@ -745,18 +745,18 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.b64 %rd1, [test_extload_bf16x8_param_0];
; SM80-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r1;
-; SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; SM80-NEXT: mov.b32 {%rs5, %rs6}, %r3;
-; SM80-NEXT: mov.b32 {%rs7, %rs8}, %r4;
-; SM80-NEXT: cvt.f32.bf16 %r5, %rs8;
-; SM80-NEXT: cvt.f32.bf16 %r6, %rs7;
-; SM80-NEXT: cvt.f32.bf16 %r7, %rs6;
-; SM80-NEXT: cvt.f32.bf16 %r8, %rs5;
-; SM80-NEXT: cvt.f32.bf16 %r9, %rs4;
-; SM80-NEXT: cvt.f32.bf16 %r10, %rs3;
-; SM80-NEXT: cvt.f32.bf16 %r11, %rs2;
-; SM80-NEXT: cvt.f32.bf16 %r12, %rs1;
+; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4;
+; SM80-NEXT: cvt.f32.bf16 %r5, %rs2;
+; SM80-NEXT: cvt.f32.bf16 %r6, %rs1;
+; SM80-NEXT: mov.b32 {%rs3, %rs4}, %r3;
+; SM80-NEXT: cvt.f32.bf16 %r7, %rs4;
+; SM80-NEXT: cvt.f32.bf16 %r8, %rs3;
+; SM80-NEXT: mov.b32 {%rs5, %rs6}, %r2;
+; SM80-NEXT: cvt.f32.bf16 %r9, %rs6;
+; SM80-NEXT: cvt.f32.bf16 %r10, %rs5;
+; SM80-NEXT: mov.b32 {%rs7, %rs8}, %r1;
+; SM80-NEXT: cvt.f32.bf16 %r11, %rs8;
+; SM80-NEXT: cvt.f32.bf16 %r12, %rs7;
; SM80-NEXT: st.param.v4.b32 [func_retval0], {%r12, %r11, %r10, %r9};
; SM80-NEXT: st.param.v4.b32 [func_retval0+16], {%r8, %r7, %r6, %r5};
; SM80-NEXT: ret;
@@ -770,18 +770,18 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b64 %rd1, [test_extload_bf16x8_param_0];
; SM80-FTZ-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; SM80-FTZ-NEXT: mov.b32 {%rs1, %rs2}, %r1;
-; SM80-FTZ-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; SM80-FTZ-NEXT: mov.b32 {%rs5, %rs6}, %r3;
-; SM80-FTZ-NEXT: mov.b32 {%rs7, %rs8}, %r4;
-; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r5, %rs8;
-; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r6, %rs7;
-; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r7, %rs6;
-; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r8, %rs5;
-; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r9, %rs4;
-; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r10, %rs3;
-; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r11, %rs2;
-; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r12, %rs1;
+; SM80-FTZ-NEXT: mov.b32 {%rs1, %rs2}, %r4;
+; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r5, %rs2;
+; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r6, %rs1;
+; SM80-FTZ-NEXT: mov.b32 {%rs3, %rs4}, %r3;
+; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r7, %rs4;
+; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r8, %rs3;
+; SM80-FTZ-NEXT: mov.b32 {%rs5, %rs6}, %r2;
+; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r9, %rs6;
+; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r10, %rs5;
+; SM80-FTZ-NEXT: mov.b32 {%rs7, %rs8}, %r1;
+; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r11, %rs8;
+; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %r12, %rs7;
; SM80-FTZ-NEXT: st.param.v4.b32 [func_retval0], {%r12, %r11, %r10, %r9};
; SM80-FTZ-NEXT: st.param.v4.b32 [func_retval0+16], {%r8, %r7, %r6, %r5};
; SM80-FTZ-NEXT: ret;
@@ -795,18 +795,18 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
; SM90-NEXT: // %bb.0:
; SM90-NEXT: ld.param.b64 %rd1, [test_extload_bf16x8_param_0];
; SM90-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; SM90-NEXT: mov.b32 {%rs1, %rs2}, %r1;
-; SM90-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; SM90-NEXT: mov.b32 {%rs5, %rs6}, %r3;
-; SM90-NEXT: mov.b32 {%rs7, %rs8}, %r4;
-; SM90-NEXT: cvt.f32.bf16 %r5, %rs8;
-; SM90-NEXT: cvt.f32.bf16 %r6, %rs7;
-; SM90-NEXT: cvt.f32.bf16 %r7, %rs6;
-; SM90-NEXT: cvt.f32.bf16 %r8, %rs5;
-; SM90-NEXT: cvt.f32.bf16 %r9, %rs4;
-; SM90-NEXT: cvt.f32.bf16 %r10, %rs3;
-; SM90-NEXT: cvt.f32.bf16 %r11, %rs2;
-; SM90-NEXT: cvt.f32.bf16 %r12, %rs1;
+; SM90-NEXT: mov.b32 {%rs1, %rs2}, %r4;
+; SM90-NEXT: cvt.f32.bf16 %r5, %rs2;
+; SM90-NEXT: cvt.f32.bf16 %r6, %rs1;
+; SM90-NEXT: mov.b32 {%rs3, %rs4}, %r3;
+; SM90-NEXT: cvt.f32.bf16 %r7, %rs4;
+; SM90-NEXT: cvt.f32.bf16 %r8, %rs3;
+; SM90-NEXT: mov.b32 {%rs5, %rs6}, %r2;
+; SM90-NEXT: cvt.f32.bf16 %r9, %rs6;
+; SM90-NEXT: cvt.f32.bf16 %r10, %rs5;
+; SM90-NEXT: mov.b32 {%rs7, %rs8}, %r1;
+; SM90-NEXT: cvt.f32.bf16 %r11, %rs8;
+; SM90-NEXT: cvt.f32.bf16 %r12, %rs7;
; SM90-NEXT: st.param.v4.b32 [func_retval0], {%r12, %r11, %r10, %r9};
; SM90-NEXT: st.param.v4.b32 [func_retval0+16], {%r8, %r7, %r6, %r5};
; SM90-NEXT: ret;
diff --git a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
index 636ca801e97b7..ece55a8fb44f8 100644
--- a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
@@ -614,15 +614,18 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
; CHECK-F16: {
; CHECK-F16-NEXT: .reg .pred %p<3>;
; CHECK-F16-NEXT: .reg .b32 %r<9>;
+; CHECK-F16-NEXT: .reg .b64 %rd<3>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
-; CHECK-F16-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_select_cc_f32_f16_param_1];
-; CHECK-F16-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_select_cc_f32_f16_param_0];
-; CHECK-F16-NEXT: ld.param.b32 %r6, [test_select_cc_f32_f16_param_3];
-; CHECK-F16-NEXT: ld.param.b32 %r5, [test_select_cc_f32_f16_param_2];
-; CHECK-F16-NEXT: setp.neu.f16x2 %p1|%p2, %r5, %r6;
-; CHECK-F16-NEXT: selp.f32 %r7, %r2, %r4, %p2;
-; CHECK-F16-NEXT: selp.f32 %r8, %r1, %r3, %p1;
+; CHECK-F16-NEXT: ld.param.b32 %r2, [test_select_cc_f32_f16_param_3];
+; CHECK-F16-NEXT: ld.param.b32 %r1, [test_select_cc_f32_f16_param_2];
+; CHECK-F16-NEXT: ld.param.b64 %rd2, [test_select_cc_f32_f16_param_1];
+; CHECK-F16-NEXT: ld.param.b64 %rd1, [test_select_cc_f32_f16_param_0];
+; CHECK-F16-NEXT: setp.neu.f16x2 %p1|%p2, %r1, %r2;
+; CHECK-F16-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-F16-NEXT: mov.b64 {%r5, %r6}, %rd1;
+; CHECK-F16-NEXT: selp.f32 %r7, %r6, %r4, %p2;
+; CHECK-F16-NEXT: selp.f32 %r8, %r5, %r3, %p1;
; CHECK-F16-NEXT: st.param.v2.b32 [func_retval0], {%r8, %r7};
; CHECK-F16-NEXT: ret;
;
@@ -631,22 +634,25 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<5>;
; CHECK-NOF16-NEXT: .reg .b32 %r<13>;
+; CHECK-NOF16-NEXT: .reg .b64 %rd<3>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
-; CHECK-NOF16-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_select_cc_f32_f16_param_1];
-; CHECK-NOF16-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_select_cc_f32_f16_param_0];
-; CHECK-NOF16-NEXT: ld.param.b32 %r6, [test_select_cc_f32_f16_param_3];
-; CHECK-NOF16-NEXT: ld.param.b32 %r5, [test_select_cc_f32_f16_param_2];
-; CHECK-NOF16-NEXT: mov.b32 {%rs1, %rs2}, %r6;
-; CHECK-NOF16-NEXT: cvt.f32.f16 %r7, %rs1;
-; CHECK-NOF16-NEXT: mov.b32 {%rs3, %rs4}, %r5;
-; CHECK-NOF16-NEXT: cvt.f32.f16 %r8, %rs3;
-; CHECK-NOF16-NEXT: setp.neu.f32 %p1, %r8, %r7;
-; CHECK-NOF16-NEXT: cvt.f32.f16 %r9, %rs2;
-; CHECK-NOF16-NEXT: cvt.f32.f16 %r10, %rs4;
-; CHECK-NOF16-NEXT: setp.neu.f32 %p2, %r10, %r9;
-; CHECK-NOF16-NEXT: selp.f32 %r11, %r2, %r4, %p2;
-; CHECK-NOF16-NEXT: selp.f32 %r12, %r1, %r3, %p1;
+; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_select_cc_f32_f16_param_3];
+; CHECK-NOF16-NEXT: ld.param.b32 %r1, [test_select_cc_f32_f16_param_2];
+; CHECK-NOF16-NEXT: ld.param.b64 %rd2, [test_select_cc_f32_f16_param_1];
+; CHECK-NOF16-NEXT: ld.param.b64 %rd1, [test_select_cc_f32_f16_param_0];
+; CHECK-NOF16-NEXT: mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-NOF16-NEXT: cvt.f32.f16 %r3, %rs1;
+; CHECK-NOF16-NEXT: mov.b32 {%rs3, %rs4}, %r1;
+; CHECK-NOF16-NEXT: cvt.f32.f16 %r4, %rs3;
+; CHECK-NOF16-NEXT: setp.neu.f32 %p1, %r4, %r3;
+; CHECK-NOF16-NEXT: cvt.f32.f16 %r5, %rs2;
+; CHECK-NOF16-NEXT: cvt.f32.f16 %r6, %rs4;
+; CHECK-NOF16-NEXT: setp.neu.f32 %p2, %r6, %r5;
+; CHECK-NOF16-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-NOF16-NEXT: mov.b64 {%r9, %r10}, %rd1;
+; CHECK-NOF16-NEXT: selp.f32 %r11, %r10, %r8, %p2;
+; CHECK-NOF16-NEXT: selp.f32 %r12, %r9, %r7, %p1;
; CHECK-NOF16-NEXT: st.param.v2.b32 [func_retval0], {%r12, %r11};
; CHECK-NOF16-NEXT: ret;
<2 x half> %c, <2 x half> %d) #0 {
@@ -661,14 +667,17 @@ define <2 x half> @test_select_cc_f16_f32(<2 x half> %a, <2 x half> %b,
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<7>;
; CHECK-NEXT: .reg .b32 %r<8>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [test_select_cc_f16_f32_param_3];
-; CHECK-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_select_cc_f16_f32_param_2];
+; CHECK-NEXT: ld.param.b64 %rd2, [test_select_cc_f16_f32_param_3];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_select_cc_f16_f32_param_2];
; CHECK-NEXT: ld.param.b32 %r2, [test_select_cc_f16_f32_param_1];
; CHECK-NEXT: ld.param.b32 %r1, [test_select_cc_f16_f32_param_0];
-; CHECK-NEXT: setp.neu.f32 %p1, %r3, %r5;
-; CHECK-NEXT: setp.neu.f32 %p2, %r4, %r6;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-NEXT: mov.b64 {%r5, %r6}, %rd1;
+; CHECK-NEXT: setp.neu.f32 %p1, %r5, %r3;
+; CHECK-NEXT: setp.neu.f32 %p2, %r6, %r4;
; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2;
; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r1;
; CHECK-NEXT: selp.b16 %rs5, %rs4, %rs2, %p2;
@@ -1517,9 +1526,11 @@ define <2 x half> @test_fptrunc_2xfloat(<2 x float> %a) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_fptrunc_2xfloat_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_fptrunc_2xfloat_param_0];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd1;
; CHECK-NEXT: cvt.rn.f16.f32 %rs1, %r2;
; CHECK-NEXT: cvt.rn.f16.f32 %rs2, %r1;
; CHECK-NEXT: mov.b32 %r3, {%rs2, %rs1};
@@ -1945,10 +1956,12 @@ define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 {
; CHECK-F16: {
; CHECK-F16-NEXT: .reg .b16 %rs<3>;
; CHECK-F16-NEXT: .reg .b32 %r<8>;
+; CHECK-F16-NEXT: .reg .b64 %rd<2>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
-; CHECK-F16-NEXT: ld.param.v2.b32 {%r2, %r3}, [test_copysign_f32_param_1];
+; CHECK-F16-NEXT: ld.param.b64 %rd1, [test_copysign_f32_param_1];
; CHECK-F16-NEXT: ld.param.b32 %r1, [test_copysign_f32_param_0];
+; CHECK-F16-NEXT: mov.b64 {%r2, %r3}, %rd1;
; CHECK-F16-NEXT: cvt.rn.f16.f32 %rs1, %r3;
; CHECK-F16-NEXT: cvt.rn.f16.f32 %rs2, %r2;
; CHECK-F16-NEXT: mov.b32 %r4, {%rs2, %rs1};
@@ -1962,10 +1975,12 @@ define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<9>;
; CHECK-NOF16-NEXT: .reg .b32 %r<9>;
+; CHECK-NOF16-NEXT: .reg .b64 %rd<2>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
-; CHECK-NOF16-NEXT: ld.param.v2.b32 {%r2, %r3}, [test_copysign_f32_param_1];
+; CHECK-NOF16-NEXT: ld.param.b64 %rd1, [test_copysign_f32_param_1];
; CHECK-NOF16-NEXT: ld.param.b32 %r1, [test_copysign_f32_param_0];
+; CHECK-NOF16-NEXT: mov.b64 {%r2, %r3}, %rd1;
; CHECK-NOF16-NEXT: mov.b32 {%rs1, %rs2}, %r1;
; CHECK-NOF16-NEXT: and.b16 %rs3, %rs2, 32767;
; CHECK-NOF16-NEXT: mov.b32 %r4, %r3;
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
index 1f21740ba589e..30ba29c11716b 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -15,12 +15,12 @@ target triple = "nvptx64-nvidia-cuda"
define <2 x float> @test_ret_const() #0 {
; CHECK-LABEL: test_ret_const(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
+; CHECK-NEXT: mov.b32 %r1, 0f40000000;
+; CHECK-NEXT: mov.b32 %r2, 0f3F800000;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
; CHECK-NEXT: ret;
ret <2 x float> <float 1.0, float 2.0>
}
@@ -28,13 +28,13 @@ define <2 x float> @test_ret_const() #0 {
define float @test_extract_0(<2 x float> %a) #0 {
; CHECK-LABEL: test_extract_0(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_extract_0_param_0];
-; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {%f1, tmp}, %rd1; }
-; CHECK-NEXT: st.param.f32 [func_retval0], %f1;
+; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {%r1, tmp}, %rd1; }
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
; CHECK-NEXT: ret;
%e = extractelement <2 x float> %a, i32 0
ret float %e
@@ -43,13 +43,13 @@ define float @test_extract_0(<2 x float> %a) #0 {
define float @test_extract_1(<2 x float> %a) #0 {
; CHECK-LABEL: test_extract_1(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_extract_1_param_0];
-; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %f1}, %rd1; }
-; CHECK-NEXT: st.param.f32 [func_retval0], %f1;
+; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r1}, %rd1; }
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
; CHECK-NEXT: ret;
%e = extractelement <2 x float> %a, i32 1
ret float %e
@@ -81,14 +81,14 @@ define <2 x float> @test_fadd(<2 x float> %a, <2 x float> %b) #0 {
define <2 x float> @test_fadd_imm_0(<2 x float> %a) #0 {
; CHECK-LABEL: test_fadd_imm_0(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_0_param_0];
-; CHECK-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-NEXT: mov.b32 %r1, 0f40000000;
+; CHECK-NEXT: mov.b32 %r2, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd2, {%r2, %r1};
; CHECK-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
@@ -99,14 +99,14 @@ define <2 x float> @test_fadd_imm_0(<2 x float> %a) #0 {
define <2 x float> @test_fadd_imm_1(<2 x float> %a) #0 {
; CHECK-LABEL: test_fadd_imm_1(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_1_param_0];
-; CHECK-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-NEXT: mov.b32 %r1, 0f40000000;
+; CHECK-NEXT: mov.b32 %r2, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd2, {%r2, %r1};
; CHECK-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
@@ -120,8 +120,8 @@ define <4 x float> @test_fadd_v4(<4 x float> %a, <4 x float> %b) #0 {
; CHECK-NEXT: .reg .b64 %rd<11>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u64 {%rd5, %rd6}, [test_fadd_v4_param_1];
-; CHECK-NEXT: ld.param.v2.u64 {%rd7, %rd8}, [test_fadd_v4_param_0];
+; CHECK-NEXT: ld.param.v2.b64 {%rd5, %rd6}, [test_fadd_v4_param_1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd7, %rd8}, [test_fadd_v4_param_0];
; CHECK-NEXT: add.rn.f32x2 %rd9, %rd8, %rd6;
; CHECK-NEXT: add.rn.f32x2 %rd10, %rd7, %rd5;
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
@@ -133,18 +133,18 @@ define <4 x float> @test_fadd_v4(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_fadd_imm_0_v4(<4 x float> %a) #0 {
; CHECK-LABEL: test_fadd_imm_0_v4(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<9>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_0_v4_param_0];
-; CHECK-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_imm_0_v4_param_0];
+; CHECK-NEXT: mov.b32 %r1, 0f40800000;
+; CHECK-NEXT: mov.b32 %r2, 0f40400000;
+; CHECK-NEXT: mov.b64 %rd5, {%r2, %r1};
; CHECK-NEXT: add.rn.f32x2 %rd6, %rd4, %rd5;
-; CHECK-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-NEXT: mov.b32 %r3, 0f40000000;
+; CHECK-NEXT: mov.b32 %r4, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd7, {%r4, %r3};
; CHECK-NEXT: add.rn.f32x2 %rd8, %rd3, %rd7;
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
; CHECK-NEXT: ret;
@@ -155,18 +155,18 @@ define <4 x float> @test_fadd_imm_0_v4(<4 x float> %a) #0 {
define <4 x float> @test_fadd_imm_1_v4(<4 x float> %a) #0 {
; CHECK-LABEL: test_fadd_imm_1_v4(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<9>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_1_v4_param_0];
-; CHECK-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_imm_1_v4_param_0];
+; CHECK-NEXT: mov.b32 %r1, 0f40800000;
+; CHECK-NEXT: mov.b32 %r2, 0f40400000;
+; CHECK-NEXT: mov.b64 %rd5, {%r2, %r1};
; CHECK-NEXT: add.rn.f32x2 %rd6, %rd4, %rd5;
-; CHECK-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-NEXT: mov.b32 %r3, 0f40000000;
+; CHECK-NEXT: mov.b32 %r4, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd7, {%r4, %r3};
; CHECK-NEXT: add.rn.f32x2 %rd8, %rd3, %rd7;
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
; CHECK-NEXT: ret;
@@ -192,13 +192,13 @@ define <2 x float> @test_fsub(<2 x float> %a, <2 x float> %b) #0 {
define <2 x float> @test_fneg(<2 x float> %a) #0 {
; CHECK-LABEL: test_fneg(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_fneg_param_0];
-; CHECK-NEXT: mov.f32 %f1, 0f00000000;
-; CHECK-NEXT: mov.b64 %rd2, {%f1, %f1};
+; CHECK-NEXT: mov.b32 %r1, 0f00000000;
+; CHECK-NEXT: mov.b64 %rd2, {%r1, %r1};
; CHECK-NEXT: sub.rn.f32x2 %rd3, %rd2, %rd1;
; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
@@ -240,17 +240,17 @@ define <2 x float> @test_fma(<2 x float> %a, <2 x float> %b, <2 x float> %c) #0
define <2 x float> @test_fdiv(<2 x float> %a, <2 x float> %b) #0 {
; CHECK-LABEL: test_fdiv(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<7>;
+; CHECK-NEXT: .reg .b32 %r<7>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fdiv_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fdiv_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: div.rn.f32 %f5, %f4, %f2;
-; CHECK-NEXT: div.rn.f32 %f6, %f3, %f1;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f6, %f5};
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: div.rn.f32 %r5, %r4, %r2;
+; CHECK-NEXT: div.rn.f32 %r6, %r3, %r1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
; CHECK-NEXT: ret;
%r = fdiv <2 x float> %a, %b
ret <2 x float> %r
@@ -260,27 +260,27 @@ define <2 x float> @test_frem(<2 x float> %a, <2 x float> %b) #0 {
; CHECK-LABEL: test_frem(
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
-; CHECK-NEXT: .reg .f32 %f<15>;
+; CHECK-NEXT: .reg .b32 %r<15>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_frem_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_frem_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: div.rn.f32 %f5, %f4, %f2;
-; CHECK-NEXT: cvt.rzi.f32.f32 %f6, %f5;
-; CHECK-NEXT: mul.f32 %f7, %f6, %f2;
-; CHECK-NEXT: sub.f32 %f8, %f4, %f7;
-; CHECK-NEXT: testp.infinite.f32 %p1, %f2;
-; CHECK-NEXT: selp.f32 %f9, %f4, %f8, %p1;
-; CHECK-NEXT: div.rn.f32 %f10, %f3, %f1;
-; CHECK-NEXT: cvt.rzi.f32.f32 %f11, %f10;
-; CHECK-NEXT: mul.f32 %f12, %f11, %f1;
-; CHECK-NEXT: sub.f32 %f13, %f3, %f12;
-; CHECK-NEXT: testp.infinite.f32 %p2, %f1;
-; CHECK-NEXT: selp.f32 %f14, %f3, %f13, %p2;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f14, %f9};
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: div.rn.f32 %r5, %r4, %r2;
+; CHECK-NEXT: cvt.rzi.f32.f32 %r6, %r5;
+; CHECK-NEXT: neg.f32 %r7, %r6;
+; CHECK-NEXT: fma.rn.f32 %r8, %r7, %r2, %r4;
+; CHECK-NEXT: testp.infinite.f32 %p1, %r2;
+; CHECK-NEXT: selp.f32 %r9, %r4, %r8, %p1;
+; CHECK-NEXT: div.rn.f32 %r10, %r3, %r1;
+; CHECK-NEXT: cvt.rzi.f32.f32 %r11, %r10;
+; CHECK-NEXT: neg.f32 %r12, %r11;
+; CHECK-NEXT: fma.rn.f32 %r13, %r12, %r1, %r3;
+; CHECK-NEXT: testp.infinite.f32 %p2, %r1;
+; CHECK-NEXT: selp.f32 %r14, %r3, %r13, %p2;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r14, %r9};
; CHECK-NEXT: ret;
%r = frem <2 x float> %a, %b
ret <2 x float> %r
@@ -304,14 +304,14 @@ define <2 x float> @test_fadd_ftz(<2 x float> %a, <2 x float> %b) #2 {
define <2 x float> @test_fadd_imm_0_ftz(<2 x float> %a) #2 {
; CHECK-LABEL: test_fadd_imm_0_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_0_ftz_param_0];
-; CHECK-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-NEXT: mov.b32 %r1, 0f40000000;
+; CHECK-NEXT: mov.b32 %r2, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd2, {%r2, %r1};
; CHECK-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
@@ -322,14 +322,14 @@ define <2 x float> @test_fadd_imm_0_ftz(<2 x float> %a) #2 {
define <2 x float> @test_fadd_imm_1_ftz(<2 x float> %a) #2 {
; CHECK-LABEL: test_fadd_imm_1_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_1_ftz_param_0];
-; CHECK-NEXT: mov.f32 %f1, 0f40000000;
-; CHECK-NEXT: mov.f32 %f2, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-NEXT: mov.b32 %r1, 0f40000000;
+; CHECK-NEXT: mov.b32 %r2, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd2, {%r2, %r1};
; CHECK-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
@@ -343,8 +343,8 @@ define <4 x float> @test_fadd_v4_ftz(<4 x float> %a, <4 x float> %b) #2 {
; CHECK-NEXT: .reg .b64 %rd<11>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u64 {%rd5, %rd6}, [test_fadd_v4_ftz_param_1];
-; CHECK-NEXT: ld.param.v2.u64 {%rd7, %rd8}, [test_fadd_v4_ftz_param_0];
+; CHECK-NEXT: ld.param.v2.b64 {%rd5, %rd6}, [test_fadd_v4_ftz_param_1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd7, %rd8}, [test_fadd_v4_ftz_param_0];
; CHECK-NEXT: add.rn.ftz.f32x2 %rd9, %rd8, %rd6;
; CHECK-NEXT: add.rn.ftz.f32x2 %rd10, %rd7, %rd5;
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
@@ -356,18 +356,18 @@ define <4 x float> @test_fadd_v4_ftz(<4 x float> %a, <4 x float> %b) #2 {
define <4 x float> @test_fadd_imm_0_v4_ftz(<4 x float> %a) #2 {
; CHECK-LABEL: test_fadd_imm_0_v4_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<9>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_0_v4_ftz_param_0];
-; CHECK-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_imm_0_v4_ftz_param_0];
+; CHECK-NEXT: mov.b32 %r1, 0f40800000;
+; CHECK-NEXT: mov.b32 %r2, 0f40400000;
+; CHECK-NEXT: mov.b64 %rd5, {%r2, %r1};
; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd4, %rd5;
-; CHECK-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-NEXT: mov.b32 %r3, 0f40000000;
+; CHECK-NEXT: mov.b32 %r4, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd7, {%r4, %r3};
; CHECK-NEXT: add.rn.ftz.f32x2 %rd8, %rd3, %rd7;
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
; CHECK-NEXT: ret;
@@ -378,18 +378,18 @@ define <4 x float> @test_fadd_imm_0_v4_ftz(<4 x float> %a) #2 {
define <4 x float> @test_fadd_imm_1_v4_ftz(<4 x float> %a) #2 {
; CHECK-LABEL: test_fadd_imm_1_v4_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<9>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [test_fadd_imm_1_v4_ftz_param_0];
-; CHECK-NEXT: mov.f32 %f1, 0f40800000;
-; CHECK-NEXT: mov.f32 %f2, 0f40400000;
-; CHECK-NEXT: mov.b64 %rd5, {%f2, %f1};
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_imm_1_v4_ftz_param_0];
+; CHECK-NEXT: mov.b32 %r1, 0f40800000;
+; CHECK-NEXT: mov.b32 %r2, 0f40400000;
+; CHECK-NEXT: mov.b64 %rd5, {%r2, %r1};
; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd4, %rd5;
-; CHECK-NEXT: mov.f32 %f3, 0f40000000;
-; CHECK-NEXT: mov.f32 %f4, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd7, {%f4, %f3};
+; CHECK-NEXT: mov.b32 %r3, 0f40000000;
+; CHECK-NEXT: mov.b32 %r4, 0f3F800000;
+; CHECK-NEXT: mov.b64 %rd7, {%r4, %r3};
; CHECK-NEXT: add.rn.ftz.f32x2 %rd8, %rd3, %rd7;
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
; CHECK-NEXT: ret;
@@ -415,13 +415,13 @@ define <2 x float> @test_fsub_ftz(<2 x float> %a, <2 x float> %b) #2 {
define <2 x float> @test_fneg_ftz(<2 x float> %a) #2 {
; CHECK-LABEL: test_fneg_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_fneg_ftz_param_0];
-; CHECK-NEXT: mov.f32 %f1, 0f00000000;
-; CHECK-NEXT: mov.b64 %rd2, {%f1, %f1};
+; CHECK-NEXT: mov.b32 %r1, 0f00000000;
+; CHECK-NEXT: mov.b64 %rd2, {%r1, %r1};
; CHECK-NEXT: sub.rn.ftz.f32x2 %rd3, %rd2, %rd1;
; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
@@ -463,17 +463,17 @@ define <2 x float> @test_fma_ftz(<2 x float> %a, <2 x float> %b, <2 x float> %c)
define <2 x float> @test_fdiv_ftz(<2 x float> %a, <2 x float> %b) #2 {
; CHECK-LABEL: test_fdiv_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<7>;
+; CHECK-NEXT: .reg .b32 %r<7>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fdiv_ftz_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fdiv_ftz_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
-; CHECK-NEXT: div.rn.ftz.f32 %f6, %f3, %f1;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f6, %f5};
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: div.rn.ftz.f32 %r5, %r4, %r2;
+; CHECK-NEXT: div.rn.ftz.f32 %r6, %r3, %r1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
; CHECK-NEXT: ret;
%r = fdiv <2 x float> %a, %b
ret <2 x float> %r
@@ -483,27 +483,27 @@ define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 {
; CHECK-LABEL: test_frem_ftz(
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
-; CHECK-NEXT: .reg .f32 %f<15>;
+; CHECK-NEXT: .reg .b32 %r<15>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_frem_ftz_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_frem_ftz_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: div.rn.ftz.f32 %f5, %f4, %f2;
-; CHECK-NEXT: cvt.rzi.ftz.f32.f32 %f6, %f5;
-; CHECK-NEXT: mul.ftz.f32 %f7, %f6, %f2;
-; CHECK-NEXT: sub.ftz.f32 %f8, %f4, %f7;
-; CHECK-NEXT: testp.infinite.f32 %p1, %f2;
-; CHECK-NEXT: selp.f32 %f9, %f4, %f8, %p1;
-; CHECK-NEXT: div.rn.ftz.f32 %f10, %f3, %f1;
-; CHECK-NEXT: cvt.rzi.ftz.f32.f32 %f11, %f10;
-; CHECK-NEXT: mul.ftz.f32 %f12, %f11, %f1;
-; CHECK-NEXT: sub.ftz.f32 %f13, %f3, %f12;
-; CHECK-NEXT: testp.infinite.f32 %p2, %f1;
-; CHECK-NEXT: selp.f32 %f14, %f3, %f13, %p2;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f14, %f9};
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: div.rn.ftz.f32 %r5, %r4, %r2;
+; CHECK-NEXT: cvt.rzi.ftz.f32.f32 %r6, %r5;
+; CHECK-NEXT: neg.ftz.f32 %r7, %r6;
+; CHECK-NEXT: fma.rn.ftz.f32 %r8, %r7, %r2, %r4;
+; CHECK-NEXT: testp.infinite.f32 %p1, %r2;
+; CHECK-NEXT: selp.f32 %r9, %r4, %r8, %p1;
+; CHECK-NEXT: div.rn.ftz.f32 %r10, %r3, %r1;
+; CHECK-NEXT: cvt.rzi.ftz.f32.f32 %r11, %r10;
+; CHECK-NEXT: neg.ftz.f32 %r12, %r11;
+; CHECK-NEXT: fma.rn.ftz.f32 %r13, %r12, %r1, %r3;
+; CHECK-NEXT: testp.infinite.f32 %p2, %r1;
+; CHECK-NEXT: selp.f32 %r14, %r3, %r13, %p2;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r14, %r9};
; CHECK-NEXT: ret;
%r = frem <2 x float> %a, %b
ret <2 x float> %r
@@ -512,15 +512,14 @@ define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 {
define void @test_ldst_v2f32(ptr %a, ptr %b) #0 {
; CHECK-LABEL: test_ldst_v2f32(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
-; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v2f32_param_1];
-; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v2f32_param_0];
-; CHECK-NEXT: ld.b64 %rd3, [%rd1];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd3;
-; CHECK-NEXT: st.v2.f32 [%rd2], {%f1, %f2};
+; CHECK-NEXT: ld.param.b64 %rd2, [test_ldst_v2f32_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_ldst_v2f32_param_0];
+; CHECK-NEXT: ld.v2.b32 {%r1, %r2}, [%rd1];
+; CHECK-NEXT: st.v2.b32 [%rd2], {%r1, %r2};
; CHECK-NEXT: ret;
%t1 = load <2 x float>, ptr %a
store <2 x float> %t1, ptr %b, align 32
@@ -530,16 +529,16 @@ define void @test_ldst_v2f32(ptr %a, ptr %b) #0 {
define void @test_ldst_v3f32(ptr %a, ptr %b) #0 {
; CHECK-LABEL: test_ldst_v3f32(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v3f32_param_1];
-; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v3f32_param_0];
-; CHECK-NEXT: ld.u64 %rd3, [%rd1];
-; CHECK-NEXT: ld.f32 %f1, [%rd1+8];
-; CHECK-NEXT: st.f32 [%rd2+8], %f1;
-; CHECK-NEXT: st.u64 [%rd2], %rd3;
+; CHECK-NEXT: ld.param.b64 %rd2, [test_ldst_v3f32_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_ldst_v3f32_param_0];
+; CHECK-NEXT: ld.b64 %rd3, [%rd1];
+; CHECK-NEXT: ld.b32 %r1, [%rd1+8];
+; CHECK-NEXT: st.b32 [%rd2+8], %r1;
+; CHECK-NEXT: st.b64 [%rd2], %rd3;
; CHECK-NEXT: ret;
%t1 = load <3 x float>, ptr %a
store <3 x float> %t1, ptr %b, align 32
@@ -549,14 +548,14 @@ define void @test_ldst_v3f32(ptr %a, ptr %b) #0 {
define void @test_ldst_v4f32(ptr %a, ptr %b) #0 {
; CHECK-LABEL: test_ldst_v4f32(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v4f32_param_1];
-; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v4f32_param_0];
-; CHECK-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
-; CHECK-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
+; CHECK-NEXT: ld.param.b64 %rd2, [test_ldst_v4f32_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_ldst_v4f32_param_0];
+; CHECK-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; CHECK-NEXT: st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
; CHECK-NEXT: ret;
%t1 = load <4 x float>, ptr %a
store <4 x float> %t1, ptr %b, align 32
@@ -566,16 +565,16 @@ define void @test_ldst_v4f32(ptr %a, ptr %b) #0 {
define void @test_ldst_v8f32(ptr %a, ptr %b) #0 {
; CHECK-LABEL: test_ldst_v8f32(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %r<9>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v8f32_param_1];
-; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v8f32_param_0];
-; CHECK-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
-; CHECK-NEXT: ld.v4.f32 {%f5, %f6, %f7, %f8}, [%rd1+16];
-; CHECK-NEXT: st.v4.f32 [%rd2+16], {%f5, %f6, %f7, %f8};
-; CHECK-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4};
+; CHECK-NEXT: ld.param.b64 %rd2, [test_ldst_v8f32_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_ldst_v8f32_param_0];
+; CHECK-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; CHECK-NEXT: ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
+; CHECK-NEXT: st.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
+; CHECK-NEXT: st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
; CHECK-NEXT: ret;
%t1 = load <8 x float>, ptr %a
store <8 x float> %t1, ptr %b, align 32
@@ -676,9 +675,9 @@ define <2 x float> @test_select(<2 x float> %a, <2 x float> %b, i1 zeroext %c) #
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u8 %rs1, [test_select_param_2];
+; CHECK-NEXT: ld.param.b8 %rs1, [test_select_param_2];
; CHECK-NEXT: and.b16 %rs2, %rs1, 1;
-; CHECK-NEXT: setp.eq.b16 %p1, %rs2, 1;
+; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0;
; CHECK-NEXT: ld.param.b64 %rd2, [test_select_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_select_param_0];
; CHECK-NEXT: selp.b64 %rd3, %rd1, %rd2, %p1;
@@ -692,7 +691,7 @@ define <2 x float> @test_select_cc(<2 x float> %a, <2 x float> %b, <2 x float> %
; CHECK-LABEL: test_select_cc(
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
-; CHECK-NEXT: .reg .f32 %f<11>;
+; CHECK-NEXT: .reg .b32 %r<11>;
; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -700,15 +699,15 @@ define <2 x float> @test_select_cc(<2 x float> %a, <2 x float> %b, <2 x float> %
; CHECK-NEXT: ld.param.b64 %rd3, [test_select_cc_param_2];
; CHECK-NEXT: ld.param.b64 %rd2, [test_select_cc_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_select_cc_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd4;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd3;
-; CHECK-NEXT: setp.neu.f32 %p1, %f3, %f1;
-; CHECK-NEXT: setp.neu.f32 %p2, %f4, %f2;
-; CHECK-NEXT: mov.b64 {%f5, %f6}, %rd2;
-; CHECK-NEXT: mov.b64 {%f7, %f8}, %rd1;
-; CHECK-NEXT: selp.f32 %f9, %f8, %f6, %p2;
-; CHECK-NEXT: selp.f32 %f10, %f7, %f5, %p1;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f10, %f9};
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd3;
+; CHECK-NEXT: setp.neu.f32 %p1, %r3, %r1;
+; CHECK-NEXT: setp.neu.f32 %p2, %r4, %r2;
+; CHECK-NEXT: mov.b64 {%r5, %r6}, %rd2;
+; CHECK-NEXT: mov.b64 {%r7, %r8}, %rd1;
+; CHECK-NEXT: selp.f32 %r9, %r8, %r6, %p2;
+; CHECK-NEXT: selp.f32 %r10, %r7, %r5, %p1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r10, %r9};
; CHECK-NEXT: ret;
%cc = fcmp une <2 x float> %c, %d
%r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
@@ -719,22 +718,21 @@ define <2 x double> @test_select_cc_f64_f32(<2 x double> %a, <2 x double> %b, <2
; CHECK-LABEL: test_select_cc_f64_f32(
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
-; CHECK-NEXT: .reg .b64 %rd<3>;
-; CHECK-NEXT: .reg .f64 %fd<7>;
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<9>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f64_f32_param_1];
-; CHECK-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f64_f32_param_0];
-; CHECK-NEXT: ld.param.b64 %rd2, [test_select_cc_f64_f32_param_3];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_select_cc_f64_f32_param_2];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.neu.f32 %p1, %f3, %f1;
-; CHECK-NEXT: setp.neu.f32 %p2, %f4, %f2;
-; CHECK-NEXT: selp.f64 %fd5, %fd2, %fd4, %p2;
-; CHECK-NEXT: selp.f64 %fd6, %fd1, %fd3, %p1;
-; CHECK-NEXT: st.param.v2.f64 [func_retval0], {%fd6, %fd5};
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_select_cc_f64_f32_param_1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_select_cc_f64_f32_param_0];
+; CHECK-NEXT: ld.param.b64 %rd6, [test_select_cc_f64_f32_param_3];
+; CHECK-NEXT: ld.param.b64 %rd5, [test_select_cc_f64_f32_param_2];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd6;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd5;
+; CHECK-NEXT: setp.neu.f32 %p1, %r3, %r1;
+; CHECK-NEXT: setp.neu.f32 %p2, %r4, %r2;
+; CHECK-NEXT: selp.f64 %rd7, %rd2, %rd4, %p2;
+; CHECK-NEXT: selp.f64 %rd8, %rd1, %rd3, %p1;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd7};
; CHECK-NEXT: ret;
%cc = fcmp une <2 x float> %c, %d
%r = select <2 x i1> %cc, <2 x double> %a, <2 x double> %b
@@ -745,22 +743,21 @@ define <2 x float> @test_select_cc_f32_f64(<2 x float> %a, <2 x float> %b, <2 x
; CHECK-LABEL: test_select_cc_f32_f64(
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
-; CHECK-NEXT: .reg .f32 %f<7>;
-; CHECK-NEXT: .reg .b64 %rd<3>;
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-NEXT: .reg .b64 %rd<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f32_f64_param_3];
-; CHECK-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f32_f64_param_2];
+; CHECK-NEXT: ld.param.v2.b64 {%rd5, %rd6}, [test_select_cc_f32_f64_param_3];
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_select_cc_f32_f64_param_2];
; CHECK-NEXT: ld.param.b64 %rd2, [test_select_cc_f32_f64_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_select_cc_f32_f64_param_0];
-; CHECK-NEXT: setp.neu.f64 %p1, %fd1, %fd3;
-; CHECK-NEXT: setp.neu.f64 %p2, %fd2, %fd4;
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: selp.f32 %f5, %f4, %f2, %p2;
-; CHECK-NEXT: selp.f32 %f6, %f3, %f1, %p1;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f6, %f5};
+; CHECK-NEXT: setp.neu.f64 %p1, %rd3, %rd5;
+; CHECK-NEXT: setp.neu.f64 %p2, %rd4, %rd6;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: selp.f32 %r5, %r4, %r2, %p2;
+; CHECK-NEXT: selp.f32 %r6, %r3, %r1, %p1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
; CHECK-NEXT: ret;
%cc = fcmp une <2 x double> %c, %d
%r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
@@ -772,19 +769,19 @@ define <2 x i1> @test_fcmp_une(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_une_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_une_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.neu.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.neu.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.neu.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.neu.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp une <2 x float> %a, %b
@@ -796,19 +793,19 @@ define <2 x i1> @test_fcmp_ueq(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ueq_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ueq_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.equ.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.equ.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.equ.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.equ.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp ueq <2 x float> %a, %b
@@ -820,19 +817,19 @@ define <2 x i1> @test_fcmp_ugt(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ugt_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ugt_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.gtu.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.gtu.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.gtu.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.gtu.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp ugt <2 x float> %a, %b
@@ -844,19 +841,19 @@ define <2 x i1> @test_fcmp_uge(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_uge_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_uge_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.geu.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.geu.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.geu.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.geu.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp uge <2 x float> %a, %b
@@ -868,19 +865,19 @@ define <2 x i1> @test_fcmp_ult(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ult_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ult_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.ltu.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.ltu.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.ltu.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.ltu.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp ult <2 x float> %a, %b
@@ -892,19 +889,19 @@ define <2 x i1> @test_fcmp_ule(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ule_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ule_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.leu.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.leu.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.leu.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.leu.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp ule <2 x float> %a, %b
@@ -916,19 +913,19 @@ define <2 x i1> @test_fcmp_uno(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_uno_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_uno_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.nan.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.nan.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.nan.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.nan.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp uno <2 x float> %a, %b
@@ -940,19 +937,19 @@ define <2 x i1> @test_fcmp_one(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_one_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_one_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.ne.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.ne.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.ne.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.ne.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp one <2 x float> %a, %b
@@ -964,19 +961,19 @@ define <2 x i1> @test_fcmp_oeq(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_oeq_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_oeq_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.eq.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.eq.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.eq.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.eq.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp oeq <2 x float> %a, %b
@@ -988,19 +985,19 @@ define <2 x i1> @test_fcmp_ogt(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ogt_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ogt_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.gt.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.gt.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.gt.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.gt.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp ogt <2 x float> %a, %b
@@ -1012,19 +1009,19 @@ define <2 x i1> @test_fcmp_oge(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_oge_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_oge_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.ge.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.ge.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.ge.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.ge.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp oge <2 x float> %a, %b
@@ -1036,19 +1033,19 @@ define <2 x i1> @test_fcmp_olt(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_olt_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_olt_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.lt.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.lt.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.lt.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.lt.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp olt <2 x float> %a, %b
@@ -1060,19 +1057,19 @@ define <2 x i1> @test_fcmp_ole(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ole_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ole_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.le.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.le.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.le.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.le.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp ole <2 x float> %a, %b
@@ -1084,19 +1081,19 @@ define <2 x i1> @test_fcmp_ord(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ord_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ord_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2;
-; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1;
-; CHECK-NEXT: setp.num.f32 %p1, %f4, %f2;
-; CHECK-NEXT: setp.num.f32 %p2, %f3, %f1;
-; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p2;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-NEXT: setp.num.f32 %p1, %r4, %r2;
+; CHECK-NEXT: setp.num.f32 %p2, %r3, %r1;
+; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2;
; CHECK-NEXT: st.param.b8 [func_retval0], %rs1;
-; CHECK-NEXT: selp.u16 %rs2, -1, 0, %p1;
+; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1;
; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2;
; CHECK-NEXT: ret;
%r = fcmp ord <2 x float> %a, %b
@@ -1106,16 +1103,15 @@ define <2 x i1> @test_fcmp_ord(<2 x float> %a, <2 x float> %b) #0 {
define <2 x i32> @test_fptosi_i32(<2 x float> %a) #0 {
; CHECK-LABEL: test_fptosi_i32(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_fptosi_i32_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-NEXT: cvt.rzi.s32.f32 %r1, %f2;
-; CHECK-NEXT: cvt.rzi.s32.f32 %r2, %f1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-NEXT: cvt.rzi.s32.f32 %r3, %r2;
+; CHECK-NEXT: cvt.rzi.s32.f32 %r4, %r1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
; CHECK-NEXT: ret;
%r = fptosi <2 x float> %a to <2 x i32>
ret <2 x i32> %r
@@ -1124,14 +1120,14 @@ define <2 x i32> @test_fptosi_i32(<2 x float> %a) #0 {
define <2 x i64> @test_fptosi_i64(<2 x float> %a) #0 {
; CHECK-LABEL: test_fptosi_i64(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_fptosi_i64_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-NEXT: cvt.rzi.s64.f32 %rd2, %f2;
-; CHECK-NEXT: cvt.rzi.s64.f32 %rd3, %f1;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-NEXT: cvt.rzi.s64.f32 %rd2, %r2;
+; CHECK-NEXT: cvt.rzi.s64.f32 %rd3, %r1;
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
; CHECK-NEXT: ret;
%r = fptosi <2 x float> %a to <2 x i64>
@@ -1141,16 +1137,15 @@ define <2 x i64> @test_fptosi_i64(<2 x float> %a) #0 {
define <2 x i32> @test_fptoui_2xi32(<2 x float> %a) #0 {
; CHECK-LABEL: test_fptoui_2xi32(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_fptoui_2xi32_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-NEXT: cvt.rzi.u32.f32 %r1, %f2;
-; CHECK-NEXT: cvt.rzi.u32.f32 %r2, %f1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-NEXT: cvt.rzi.u32.f32 %r3, %r2;
+; CHECK-NEXT: cvt.rzi.u32.f32 %r4, %r1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
; CHECK-NEXT: ret;
%r = fptoui <2 x float> %a to <2 x i32>
ret <2 x i32> %r
@@ -1159,14 +1154,14 @@ define <2 x i32> @test_fptoui_2xi32(<2 x float> %a) #0 {
define <2 x i64> @test_fptoui_2xi64(<2 x float> %a) #0 {
; CHECK-LABEL: test_fptoui_2xi64(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_fptoui_2xi64_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-NEXT: cvt.rzi.u64.f32 %rd2, %f2;
-; CHECK-NEXT: cvt.rzi.u64.f32 %rd3, %f1;
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-NEXT: cvt.rzi.u64.f32 %rd2, %r2;
+; CHECK-NEXT: cvt.rzi.u64.f32 %rd3, %r1;
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
; CHECK-NEXT: ret;
%r = fptoui <2 x float> %a to <2 x i64>
@@ -1176,14 +1171,13 @@ define <2 x i64> @test_fptoui_2xi64(<2 x float> %a) #0 {
define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 {
; CHECK-LABEL: test_uitofp_2xi32(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
-; CHECK-NEXT: cvt.rn.f32.u32 %f1, %r2;
-; CHECK-NEXT: cvt.rn.f32.u32 %f2, %r1;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
+; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
+; CHECK-NEXT: cvt.rn.f32.u32 %r3, %r2;
+; CHECK-NEXT: cvt.rn.f32.u32 %r4, %r1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
; CHECK-NEXT: ret;
%r = uitofp <2 x i32> %a to <2 x float>
ret <2 x float> %r
@@ -1192,14 +1186,14 @@ define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 {
define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 {
; CHECK-LABEL: test_uitofp_2xi64(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_uitofp_2xi64_param_0];
-; CHECK-NEXT: cvt.rn.f32.u64 %f1, %rd2;
-; CHECK-NEXT: cvt.rn.f32.u64 %f2, %rd1;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_uitofp_2xi64_param_0];
+; CHECK-NEXT: cvt.rn.f32.u64 %r1, %rd2;
+; CHECK-NEXT: cvt.rn.f32.u64 %r2, %rd1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
; CHECK-NEXT: ret;
%r = uitofp <2 x i64> %a to <2 x float>
ret <2 x float> %r
@@ -1208,14 +1202,13 @@ define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 {
define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 {
; CHECK-LABEL: test_sitofp_2xi32(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
-; CHECK-NEXT: cvt.rn.f32.s32 %f1, %r2;
-; CHECK-NEXT: cvt.rn.f32.s32 %f2, %r1;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
+; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
+; CHECK-NEXT: cvt.rn.f32.s32 %r3, %r2;
+; CHECK-NEXT: cvt.rn.f32.s32 %r4, %r1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
; CHECK-NEXT: ret;
%r = sitofp <2 x i32> %a to <2 x float>
ret <2 x float> %r
@@ -1224,14 +1217,14 @@ define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 {
define <2 x float> @test_sitofp_2xi64(<2 x i64> %a) #0 {
; CHECK-LABEL: test_sitofp_2xi64(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_sitofp_2xi64_param_0];
-; CHECK-NEXT: cvt.rn.f32.s64 %f1, %rd2;
-; CHECK-NEXT: cvt.rn.f32.s64 %f2, %rd1;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_sitofp_2xi64_param_0];
+; CHECK-NEXT: cvt.rn.f32.s64 %r1, %rd2;
+; CHECK-NEXT: cvt.rn.f32.s64 %r2, %rd1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
; CHECK-NEXT: ret;
%r = sitofp <2 x i64> %a to <2 x float>
ret <2 x float> %r
@@ -1240,16 +1233,15 @@ define <2 x float> @test_sitofp_2xi64(<2 x i64> %a) #0 {
define <2 x float> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x float> %b) #0 {
; CHECK-LABEL: test_uitofp_2xi32_fadd(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %r<5>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0];
+; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0];
; CHECK-NEXT: ld.param.b64 %rd1, [test_uitofp_2xi32_fadd_param_1];
-; CHECK-NEXT: cvt.rn.f32.u32 %f1, %r2;
-; CHECK-NEXT: cvt.rn.f32.u32 %f2, %r1;
-; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1};
+; CHECK-NEXT: cvt.rn.f32.u32 %r3, %r2;
+; CHECK-NEXT: cvt.rn.f32.u32 %r4, %r1;
+; CHECK-NEXT: mov.b64 %rd2, {%r4, %r3};
; CHECK-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2;
; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
@@ -1261,14 +1253,14 @@ define <2 x float> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x float> %b) #0 {
define <2 x float> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
; CHECK-LABEL: test_fptrunc_2xdouble(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_fptrunc_2xdouble_param_0];
-; CHECK-NEXT: cvt.rn.f32.f64 %f1, %fd2;
-; CHECK-NEXT: cvt.rn.f32.f64 %f2, %fd1;
-; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fptrunc_2xdouble_param_0];
+; CHECK-NEXT: cvt.rn.f32.f64 %r1, %rd2;
+; CHECK-NEXT: cvt.rn.f32.f64 %r2, %rd1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
; CHECK-NEXT: ret;
%r = fptrunc <2 x double> %a to <2 x float>
ret <2 x float> %r
@@ -1277,16 +1269,15 @@ define <2 x float> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
define <2 x double> @test_fpext_2xdouble(<2 x float> %a) #0 {
; CHECK-LABEL: test_fpext_2xdouble(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
-; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_fpext_2xdouble_param_0];
-; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1;
-; CHECK-NEXT: cvt.f64.f32 %fd1, %f2;
-; CHECK-NEXT: cvt.f64.f32 %fd2, %f1;
-; CHECK-NEXT: st.param.v2.f64 [func_retval0], {%fd2, %fd1};
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-NEXT: cvt.f64.f32 %rd2, %r2;
+; CHECK-NEXT: cvt.f64.f32 %rd3, %r1;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
; CHECK-NEXT: ret;
%r = fpext <2 x float> %a to <2 x double>
ret <2 x double> %r
@@ -1299,8 +1290,8 @@ define <2 x i32> @test_bitcast_2xfloat_to_2xi32(<2 x float> %a) #0 {
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u64 %rd2, [test_bitcast_2xfloat_to_2xi32_param_0];
-; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r1}, %rd2; }
+; CHECK-NEXT: ld.param.b64 %rd2, [test_bitcast_2xfloat_to_2xi32_param_0];
+; CHECK-NEXT: mov.b64 {_, %r1}, %rd2;
; CHECK-NEXT: cvt.u32.u64 %r2, %rd2;
; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
; CHECK-NEXT: ret;
@@ -1315,7 +1306,7 @@ define <2 x float> @test_bitcast_2xi32_to_2xfloat(<2 x i32> %a) #0 {
; CHECK-NEXT: .reg .b64 %rd<6>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0];
+; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0];
; CHECK-NEXT: cvt.u64.u32 %rd1, %r1;
; CHECK-NEXT: cvt.u64.u32 %rd2, %r2;
; CHECK-NEXT: shl.b64 %rd3, %rd2, 32;
@@ -1329,13 +1320,12 @@ define <2 x float> @test_bitcast_2xi32_to_2xfloat(<2 x i32> %a) #0 {
define <2 x float> @test_bitcast_double_to_2xfloat(double %a) #0 {
; CHECK-LABEL: test_bitcast_double_to_2xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.f64 %fd1, [test_bitcast_double_to_2xfloat_param_0];
-; CHECK-NEXT: mov.b64 %rd1, %fd1;
-; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ld.param.b64 %rd1, [test_bitcast_double_to_2xfloat_param_0];
+; CHECK-NEXT: mov.b64 %rd2, %rd1;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd2;
; CHECK-NEXT: ret;
%r = bitcast double %a to <2 x float>
ret <2 x float> %r
@@ -1344,13 +1334,12 @@ define <2 x float> @test_bitcast_double_to_2xfloat(double %a) #0 {
define double @test_bitcast_2xfloat_to_double(<2 x float> %a) #0 {
; CHECK-LABEL: test_bitcast_2xfloat_to_double(
; CHECK: {
-; CHECK-NEXT: .reg .b64 %rd<3>;
-; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u64 %rd2, [test_bitcast_2xfloat_to_double_param_0];
-; CHECK-NEXT: mov.b64 %fd1, %rd2;
-; CHECK-NEXT: st.param.f64 [func_retval0], %fd1;
+; CHECK-NEXT: ld.param.b64 %rd2, [test_bitcast_2xfloat_to_double_param_0];
+; CHECK-NEXT: mov.b64 %rd3, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
%r = bitcast <2 x float> %a to double
ret double %r
diff --git a/llvm/test/CodeGen/NVPTX/read-global-variable-constant.ll b/llvm/test/CodeGen/NVPTX/read-global-variable-constant.ll
index aa463b510fe84..b244d6a8bff48 100644
--- a/llvm/test/CodeGen/NVPTX/read-global-variable-constant.ll
+++ b/llvm/test/CodeGen/NVPTX/read-global-variable-constant.ll
@@ -17,7 +17,7 @@ define float @test_gv_float() {
; CHECK-LABEL: test_gv_float2()
define <2 x float> @test_gv_float2() {
-; CHECK: ld.global.nc.v2.b32
+; CHECK: ld.global.nc.b64
%v = load <2 x float>, ptr @gv_float2
ret <2 x float> %v
}
diff --git a/llvm/test/CodeGen/NVPTX/vector-loads.ll b/llvm/test/CodeGen/NVPTX/vector-loads.ll
index 88ff59407a143..aecb3102ca8b0 100644
--- a/llvm/test/CodeGen/NVPTX/vector-loads.ll
+++ b/llvm/test/CodeGen/NVPTX/vector-loads.ll
@@ -4,7 +4,7 @@
; Even though general vector types are not supported in PTX, we can still
; optimize loads/stores with pseudo-vector instructions of the form:
;
-; ld.v2.f32 {%r0, %r1}, [%r0]
+; ld.v2.b32 {%r0, %r1}, [%r0]
;
; which will load two floats at once into scalar registers.
@@ -101,18 +101,18 @@ define void @foo_complex(ptr nocapture readonly align 16 dereferenceable(1342177
define void @extv8f16_global_a16(ptr addrspace(1) noalias readonly align 16 %dst, ptr addrspace(1) noalias readonly align 16 %src) #0 {
; CHECK: ld.global.v4.b32 {%r
%v = load <8 x half>, ptr addrspace(1) %src, align 16
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: mov.b32 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}
+; CHECK-DAG: mov.b32 {[[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}
+; CHECK-DAG: mov.b32 {[[E4:%rs[0-9]+]], [[E5:%rs[0-9]+]]}
+; CHECK-DAG: mov.b32 {[[E6:%rs[0-9]+]], [[E7:%rs[0-9]+]]}
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E0]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E1]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E2]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E3]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E4]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E5]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E6]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E7]]
%ext = fpext <8 x half> %v to <8 x float>
; CHECK: st.global.v4.b32
; CHECK: st.global.v4.b32
@@ -151,18 +151,18 @@ define void @extv8f16_global_a4(ptr addrspace(1) noalias readonly align 16 %dst,
define void @extv8f16_generic_a16(ptr noalias readonly align 16 %dst, ptr noalias readonly align 16 %src) #0 {
; CHECK: ld.v4.b32 {%r
%v = load <8 x half>, ptr %src, align 16
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: mov.b32 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}
+; CHECK-DAG: mov.b32 {[[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}
+; CHECK-DAG: mov.b32 {[[E4:%rs[0-9]+]], [[E5:%rs[0-9]+]]}
+; CHECK-DAG: mov.b32 {[[E6:%rs[0-9]+]], [[E7:%rs[0-9]+]]}
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E0]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E1]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E2]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E3]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E4]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E5]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E6]]
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, [[E7]]
%ext = fpext <8 x half> %v to <8 x float>
; CHECK: st.v4.b32
; CHECK: st.v4.b32
diff --git a/llvm/test/CodeGen/NVPTX/vector-stores.ll b/llvm/test/CodeGen/NVPTX/vector-stores.ll
index f3b1015070085..b82a04898c2e2 100644
--- a/llvm/test/CodeGen/NVPTX/vector-stores.ll
+++ b/llvm/test/CodeGen/NVPTX/vector-stores.ll
@@ -2,7 +2,7 @@
; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
; CHECK-LABEL: .visible .func foo1
-; CHECK: st.v2.b32
+; CHECK: st.b64
define void @foo1(<2 x float> %val, ptr %ptr) {
store <2 x float> %val, ptr %ptr
ret void
>From 600af45a055b3be6dc3711a4237d2bf646ef86d3 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Thu, 6 Mar 2025 15:26:37 -0800
Subject: [PATCH 21/32] [NVPTX] support generic LDG/LDU for packed data types
Support ld.global.nc.b64/ldu.global.b64 for v2f32 and
ld.global.nc.b32/ldu.global.b32 for v2f16/v2bf16/v2i16/v4i8
Update test cases.
---
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 128 ++++++++++++--------
llvm/test/CodeGen/NVPTX/ldu-ldg.ll | 30 +++++
2 files changed, 105 insertions(+), 53 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 1144c00ba9857..cbe1b0847f32c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -1256,11 +1256,14 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
EltVT = MVT::i64;
NumElts = 2;
}
+
+ std::optional<unsigned> Opcode;
+
if (EltVT.isVector()) {
NumElts = EltVT.getVectorNumElements();
EltVT = EltVT.getVectorElementType();
- // vectors of 8/16bits type are loaded/stored as multiples of v4i8/v2x16
- // elements.
+ // vectors of 8/16/32bits type are loaded/stored as multiples of
+ // v4i8/v2x16/v2x32 elements.
if ((EltVT == MVT::f32 && OrigType == MVT::v2f32) ||
(EltVT == MVT::f16 && OrigType == MVT::v2f16) ||
(EltVT == MVT::bf16 && OrigType == MVT::v2bf16) ||
@@ -1268,6 +1271,24 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
(EltVT == MVT::i8 && OrigType == MVT::v4i8)) {
assert(NumElts % OrigType.getVectorNumElements() == 0 &&
"NumElts must be divisible by the number of elts in subvectors");
+ if (N->getOpcode() == ISD::LOAD ||
+ N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
+ switch (OrigType.getSimpleVT().SimpleTy) {
+ case MVT::v2f32:
+ Opcode = N->getOpcode() == ISD::LOAD ? NVPTX::INT_PTX_LDG_GLOBAL_i64
+ : NVPTX::INT_PTX_LDU_GLOBAL_i64;
+ break;
+ case MVT::v2f16:
+ case MVT::v2bf16:
+ case MVT::v2i16:
+ case MVT::v4i8:
+ Opcode = N->getOpcode() == ISD::LOAD ? NVPTX::INT_PTX_LDG_GLOBAL_i32
+ : NVPTX::INT_PTX_LDU_GLOBAL_i32;
+ break;
+ default:
+ llvm_unreachable("Unhandled packed vector type");
+ }
+ }
EltVT = OrigType;
NumElts /= OrigType.getVectorNumElements();
}
@@ -1287,57 +1308,58 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
SelectADDR(Op1, Base, Offset);
SDValue Ops[] = {Base, Offset, Chain};
- std::optional<unsigned> Opcode;
- switch (N->getOpcode()) {
- default:
- return false;
- case ISD::LOAD:
- Opcode = pickOpcodeForVT(
- EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_GLOBAL_i8,
- NVPTX::INT_PTX_LDG_GLOBAL_i16, NVPTX::INT_PTX_LDG_GLOBAL_i32,
- NVPTX::INT_PTX_LDG_GLOBAL_i64, NVPTX::INT_PTX_LDG_GLOBAL_f32,
- NVPTX::INT_PTX_LDG_GLOBAL_f64);
- break;
- case ISD::INTRINSIC_W_CHAIN:
- Opcode = pickOpcodeForVT(
- EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_GLOBAL_i8,
- NVPTX::INT_PTX_LDU_GLOBAL_i16, NVPTX::INT_PTX_LDU_GLOBAL_i32,
- NVPTX::INT_PTX_LDU_GLOBAL_i64, NVPTX::INT_PTX_LDU_GLOBAL_f32,
- NVPTX::INT_PTX_LDU_GLOBAL_f64);
- break;
- case NVPTXISD::LoadV2:
- Opcode = pickOpcodeForVT(
- EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_G_v2i8_ELE,
- NVPTX::INT_PTX_LDG_G_v2i16_ELE, NVPTX::INT_PTX_LDG_G_v2i32_ELE,
- NVPTX::INT_PTX_LDG_G_v2i64_ELE, NVPTX::INT_PTX_LDG_G_v2f32_ELE,
- NVPTX::INT_PTX_LDG_G_v2f64_ELE);
- break;
- case NVPTXISD::LDUV2:
- Opcode = pickOpcodeForVT(
- EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_G_v2i8_ELE,
- NVPTX::INT_PTX_LDU_G_v2i16_ELE, NVPTX::INT_PTX_LDU_G_v2i32_ELE,
- NVPTX::INT_PTX_LDU_G_v2i64_ELE, NVPTX::INT_PTX_LDU_G_v2f32_ELE,
- NVPTX::INT_PTX_LDU_G_v2f64_ELE);
- break;
- case NVPTXISD::LoadV4:
- Opcode = pickOpcodeForVT(
- EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_G_v4i8_ELE,
- NVPTX::INT_PTX_LDG_G_v4i16_ELE, NVPTX::INT_PTX_LDG_G_v4i32_ELE,
- NVPTX::INT_PTX_LDG_G_v4i64_ELE, NVPTX::INT_PTX_LDG_G_v4f32_ELE,
- NVPTX::INT_PTX_LDG_G_v4f64_ELE);
- break;
- case NVPTXISD::LDUV4:
- Opcode = pickOpcodeForVT(
- EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_G_v4i8_ELE,
- NVPTX::INT_PTX_LDU_G_v4i16_ELE, NVPTX::INT_PTX_LDU_G_v4i32_ELE,
- {/* no v4i64 */}, NVPTX::INT_PTX_LDU_G_v4f32_ELE, {/* no v4f64 */});
- break;
- case NVPTXISD::LoadV8:
- Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, {/* no v8i8 */},
- {/* no v8i16 */}, NVPTX::INT_PTX_LDG_G_v8i32_ELE,
- {/* no v8i64 */}, NVPTX::INT_PTX_LDG_G_v8f32_ELE,
- {/* no v8f64 */});
- break;
+ if (!Opcode) {
+ switch (N->getOpcode()) {
+ default:
+ return false;
+ case ISD::LOAD:
+ Opcode = pickOpcodeForVT(
+ EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_GLOBAL_i8,
+ NVPTX::INT_PTX_LDG_GLOBAL_i16, NVPTX::INT_PTX_LDG_GLOBAL_i32,
+ NVPTX::INT_PTX_LDG_GLOBAL_i64, NVPTX::INT_PTX_LDG_GLOBAL_f32,
+ NVPTX::INT_PTX_LDG_GLOBAL_f64);
+ break;
+ case ISD::INTRINSIC_W_CHAIN:
+ Opcode = pickOpcodeForVT(
+ EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_GLOBAL_i8,
+ NVPTX::INT_PTX_LDU_GLOBAL_i16, NVPTX::INT_PTX_LDU_GLOBAL_i32,
+ NVPTX::INT_PTX_LDU_GLOBAL_i64, NVPTX::INT_PTX_LDU_GLOBAL_f32,
+ NVPTX::INT_PTX_LDU_GLOBAL_f64);
+ break;
+ case NVPTXISD::LoadV2:
+ Opcode = pickOpcodeForVT(
+ EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_G_v2i8_ELE,
+ NVPTX::INT_PTX_LDG_G_v2i16_ELE, NVPTX::INT_PTX_LDG_G_v2i32_ELE,
+ NVPTX::INT_PTX_LDG_G_v2i64_ELE, NVPTX::INT_PTX_LDG_G_v2f32_ELE,
+ NVPTX::INT_PTX_LDG_G_v2f64_ELE);
+ break;
+ case NVPTXISD::LDUV2:
+ Opcode = pickOpcodeForVT(
+ EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_G_v2i8_ELE,
+ NVPTX::INT_PTX_LDU_G_v2i16_ELE, NVPTX::INT_PTX_LDU_G_v2i32_ELE,
+ NVPTX::INT_PTX_LDU_G_v2i64_ELE, NVPTX::INT_PTX_LDU_G_v2f32_ELE,
+ NVPTX::INT_PTX_LDU_G_v2f64_ELE);
+ break;
+ case NVPTXISD::LoadV4:
+ Opcode = pickOpcodeForVT(
+ EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_G_v4i8_ELE,
+ NVPTX::INT_PTX_LDG_G_v4i16_ELE, NVPTX::INT_PTX_LDG_G_v4i32_ELE,
+ NVPTX::INT_PTX_LDG_G_v4i64_ELE, NVPTX::INT_PTX_LDG_G_v4f32_ELE,
+ NVPTX::INT_PTX_LDG_G_v4f64_ELE);
+ break;
+ case NVPTXISD::LDUV4:
+ Opcode = pickOpcodeForVT(
+ EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_G_v4i8_ELE,
+ NVPTX::INT_PTX_LDU_G_v4i16_ELE, NVPTX::INT_PTX_LDU_G_v4i32_ELE,
+ {/* no v4i64 */}, NVPTX::INT_PTX_LDU_G_v4f32_ELE, {/* no v4f64 */});
+ break;
+ case NVPTXISD::LoadV8:
+ Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, {/* no v8i8 */},
+ {/* no v8i16 */}, NVPTX::INT_PTX_LDG_G_v8i32_ELE,
+ {/* no v8i64 */}, NVPTX::INT_PTX_LDG_G_v8f32_ELE,
+ {/* no v8f64 */});
+ break;
+ }
}
if (!Opcode)
return false;
diff --git a/llvm/test/CodeGen/NVPTX/ldu-ldg.ll b/llvm/test/CodeGen/NVPTX/ldu-ldg.ll
index 7ac697c4ce203..9221fad073bea 100644
--- a/llvm/test/CodeGen/NVPTX/ldu-ldg.ll
+++ b/llvm/test/CodeGen/NVPTX/ldu-ldg.ll
@@ -12,6 +12,7 @@ declare float @llvm.nvvm.ldu.global.f.f32.p1(ptr addrspace(1) %ptr, i32 %align)
declare double @llvm.nvvm.ldu.global.f.f64.p1(ptr addrspace(1) %ptr, i32 %align)
declare half @llvm.nvvm.ldu.global.f.f16.p1(ptr addrspace(1) %ptr, i32 %align)
declare <2 x half> @llvm.nvvm.ldu.global.f.v2f16.p1(ptr addrspace(1) %ptr, i32 %align)
+declare <2 x float> @llvm.nvvm.ldu.global.f.v2f32.p1(ptr addrspace(1) %ptr, i32 %align)
declare i8 @llvm.nvvm.ldg.global.i.i8.p1(ptr addrspace(1) %ptr, i32 %align)
declare i16 @llvm.nvvm.ldg.global.i.i16.p1(ptr addrspace(1) %ptr, i32 %align)
@@ -22,6 +23,7 @@ declare float @llvm.nvvm.ldg.global.f.f32.p1(ptr addrspace(1) %ptr, i32 %align)
declare double @llvm.nvvm.ldg.global.f.f64.p1(ptr addrspace(1) %ptr, i32 %align)
declare half @llvm.nvvm.ldg.global.f.f16.p1(ptr addrspace(1) %ptr, i32 %align)
declare <2 x half> @llvm.nvvm.ldg.global.f.v2f16.p1(ptr addrspace(1) %ptr, i32 %align)
+declare <2 x float> @llvm.nvvm.ldg.global.f.v2f32.p1(ptr addrspace(1) %ptr, i32 %align)
define i8 @test_ldu_i8(ptr addrspace(1) %ptr) {
; CHECK-LABEL: test_ldu_i8(
@@ -160,6 +162,20 @@ define <2 x half> @test_ldu_v2f16(ptr addrspace(1) %ptr) {
ret <2 x half> %val
}
+define <2 x float> @test_ldu_v2f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: test_ldu_v2f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_ldu_v2f32_param_0];
+; CHECK-NEXT: ldu.global.b64 %rd2, [%rd1];
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd2;
+; CHECK-NEXT: ret;
+ %val = tail call <2 x float> @llvm.nvvm.ldu.global.f.v2f32.p1(ptr addrspace(1) %ptr, i32 8)
+ ret <2 x float> %val
+}
+
define i8 @test_ldg_i8(ptr addrspace(1) %ptr) {
; CHECK-LABEL: test_ldg_i8(
; CHECK: {
@@ -296,6 +312,20 @@ define <2 x half> @test_ldg_v2f16(ptr addrspace(1) %ptr) {
ret <2 x half> %val
}
+define <2 x float> @test_ldg_v2f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: test_ldg_v2f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_ldg_v2f32_param_0];
+; CHECK-NEXT: ld.global.nc.b64 %rd2, [%rd1];
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd2;
+; CHECK-NEXT: ret;
+ %val = tail call <2 x float> @llvm.nvvm.ldg.global.f.v2f32.p1(ptr addrspace(1) %ptr, i32 8)
+ ret <2 x float> %val
+}
+
@g = addrspace(1) global i32 0
define i32 @test_ldg_asi() {
>From eba9b258f6a35b4b36e23a01b2ace4e27474c9e6 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Thu, 6 Mar 2025 20:16:03 -0800
Subject: [PATCH 22/32] [NVPTX] fold v2f32 = bitcast (i64,i64,... =
NVPTXISD::Load*)
Fold i64->v2f32 bitcasts on the results of a NVPTXISD::Load* op.
---
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 17 ++--
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 59 +++++++++++++-
llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 80 +++++++++----------
llvm/test/CodeGen/NVPTX/vec-param-load.ll | 32 ++++----
4 files changed, 125 insertions(+), 63 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index cbe1b0847f32c..38e0f367b55b6 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -1127,7 +1127,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
return true;
}
-static bool isSubVectorPackedInI32(EVT EltVT) {
+static bool isSubVectorPackedInInteger(EVT EltVT) {
// Despite vectors like v8i8, v16i8, v8i16 being within the bit-limit for
// total load/store size, PTX syntax only supports v2/v4. Thus, we can't use
// vectorized loads/stores with the actual element type for i8/i16 as that
@@ -1135,7 +1135,9 @@ static bool isSubVectorPackedInI32(EVT EltVT) {
// In order to load/store such vectors efficiently, in Type Legalization
// we split the vector into word-sized chunks (v2x16/v4i8). Now, we will
// lower to PTX as vectors of b32.
- return Isv2x16VT(EltVT) || EltVT == MVT::v4i8;
+ // We also consider v2f32 as an upsized type, which may be used in packed
+ // (f32x2) instructions.
+ return Isv2x16VT(EltVT) || EltVT == MVT::v4i8 || EltVT == MVT::v2f32;
}
static unsigned getLoadStoreVectorNumElts(SDNode *N) {
@@ -1187,9 +1189,11 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
unsigned FromTypeWidth = TotalWidth / getLoadStoreVectorNumElts(N);
- if (isSubVectorPackedInI32(EltVT)) {
+ if (isSubVectorPackedInInteger(EltVT)) {
assert(ExtensionType == ISD::NON_EXTLOAD);
- EltVT = MVT::i32;
+ FromTypeWidth = EltVT.getSizeInBits();
+ EltVT = MVT::getIntegerVT(FromTypeWidth);
+ FromType = NVPTX::PTXLdStInstCode::Untyped;
}
assert(isPowerOf2_32(FromTypeWidth) && FromTypeWidth >= 8 &&
@@ -1497,8 +1501,9 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
SDValue N2 = N->getOperand(NumElts + 1);
unsigned ToTypeWidth = TotalWidth / NumElts;
- if (isSubVectorPackedInI32(EltVT)) {
- EltVT = MVT::i32;
+ if (isSubVectorPackedInInteger(EltVT)) {
+ ToTypeWidth = EltVT.getSizeInBits();
+ EltVT = MVT::getIntegerVT(ToTypeWidth);
}
assert(isPowerOf2_32(ToTypeWidth) && ToTypeWidth >= 8 && ToTypeWidth <= 128 &&
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 4b37805d61ac5..318afe7e9861a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -865,7 +865,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::EXTRACT_VECTOR_ELT, ISD::FADD,
ISD::MUL, ISD::SHL, ISD::SREM, ISD::UREM, ISD::VSELECT,
ISD::BUILD_VECTOR, ISD::ADDRSPACECAST, ISD::FP_ROUND,
- ISD::TRUNCATE, ISD::LOAD});
+ ISD::TRUNCATE, ISD::LOAD, ISD::BITCAST});
// setcc for f16x2 and bf16x2 needs special handling to prevent
// legalizer's attempt to scalarize it due to v2i1 not being legal.
@@ -6287,6 +6287,61 @@ static SDValue PerformTRUNCATECombine(SDNode *N,
return SDValue();
}
+static SDValue PerformBITCASTCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ if (N->getValueType(0) != MVT::v2f32)
+ return SDValue();
+
+ SDValue Operand = N->getOperand(0);
+ if (Operand.getValueType() != MVT::i64)
+ return SDValue();
+
+ // DAGCombiner handles bitcast(ISD::LOAD) already. For these, we'll do the
+ // same thing, by changing their output values from i64 to v2f32. Then the
+ // rule for combining loads (see PerformLoadCombine) may split these loads
+ // further.
+ if (Operand.getOpcode() == NVPTXISD::LoadV2 ||
+ Operand.getOpcode() == NVPTXISD::LoadParam ||
+ Operand.getOpcode() == NVPTXISD::LoadParamV2) {
+ // check for all bitcasts
+ SmallVector<std::pair<SDNode *, unsigned /* resno */>> OldUses;
+ for (SDUse &U : Operand->uses()) {
+ SDNode *User = U.getUser();
+ if (!(User->getOpcode() == ISD::BITCAST &&
+ User->getValueType(0) == MVT::v2f32 &&
+ U.getValueType() == MVT::i64))
+ return SDValue(); // unhandled pattern
+ OldUses.push_back({User, U.getResNo()});
+ }
+
+ auto *MemN = cast<MemSDNode>(Operand);
+ SmallVector<EVT> VTs;
+ for (const auto &VT : Operand->values()) {
+ if (VT == MVT::i64)
+ VTs.push_back(MVT::v2f32);
+ else
+ VTs.push_back(VT);
+ }
+
+ SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
+ Operand.getOpcode(), SDLoc(Operand), DCI.DAG.getVTList(VTs),
+ SmallVector<SDValue>(Operand->ops()), MemN->getMemoryVT(),
+ MemN->getMemOperand());
+
+ // replace all chain/glue uses of the old load
+ for (unsigned I = 0, E = Operand->getNumValues(); I != E; ++I)
+ if (Operand->getValueType(I) != MVT::i64)
+ DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(MemN, I),
+ NewLoad.getValue(I));
+
+ // replace all bitcasts with values from the new load
+ for (auto &[BC, ResultNum] : OldUses)
+ DCI.CombineTo(BC, NewLoad.getValue(ResultNum), false);
+ }
+
+ return SDValue();
+}
+
SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
CodeGenOptLevel OptLevel = getTargetMachine().getOptLevel();
@@ -6332,6 +6387,8 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
return PerformFP_ROUNDCombine(N, DCI);
case ISD::TRUNCATE:
return PerformTRUNCATECombine(N, DCI);
+ case ISD::BITCAST:
+ return PerformBITCASTCombine(N, DCI);
}
return SDValue();
}
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
index 30ba29c11716b..8441bd9e7bbd0 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -117,14 +117,14 @@ define <2 x float> @test_fadd_imm_1(<2 x float> %a) #0 {
define <4 x float> @test_fadd_v4(<4 x float> %a, <4 x float> %b) #0 {
; CHECK-LABEL: test_fadd_v4(
; CHECK: {
-; CHECK-NEXT: .reg .b64 %rd<11>;
+; CHECK-NEXT: .reg .b64 %rd<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd5, %rd6}, [test_fadd_v4_param_1];
-; CHECK-NEXT: ld.param.v2.b64 {%rd7, %rd8}, [test_fadd_v4_param_0];
-; CHECK-NEXT: add.rn.f32x2 %rd9, %rd8, %rd6;
-; CHECK-NEXT: add.rn.f32x2 %rd10, %rd7, %rd5;
-; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_v4_param_1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_v4_param_0];
+; CHECK-NEXT: add.rn.f32x2 %rd5, %rd2, %rd4;
+; CHECK-NEXT: add.rn.f32x2 %rd6, %rd1, %rd3;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd5};
; CHECK-NEXT: ret;
%r = fadd <4 x float> %a, %b
ret <4 x float> %r
@@ -134,19 +134,19 @@ define <4 x float> @test_fadd_imm_0_v4(<4 x float> %a) #0 {
; CHECK-LABEL: test_fadd_imm_0_v4(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<5>;
-; CHECK-NEXT: .reg .b64 %rd<9>;
+; CHECK-NEXT: .reg .b64 %rd<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_imm_0_v4_param_0];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_imm_0_v4_param_0];
; CHECK-NEXT: mov.b32 %r1, 0f40800000;
; CHECK-NEXT: mov.b32 %r2, 0f40400000;
-; CHECK-NEXT: mov.b64 %rd5, {%r2, %r1};
-; CHECK-NEXT: add.rn.f32x2 %rd6, %rd4, %rd5;
+; CHECK-NEXT: mov.b64 %rd3, {%r2, %r1};
+; CHECK-NEXT: add.rn.f32x2 %rd4, %rd2, %rd3;
; CHECK-NEXT: mov.b32 %r3, 0f40000000;
; CHECK-NEXT: mov.b32 %r4, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd7, {%r4, %r3};
-; CHECK-NEXT: add.rn.f32x2 %rd8, %rd3, %rd7;
-; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-NEXT: mov.b64 %rd5, {%r4, %r3};
+; CHECK-NEXT: add.rn.f32x2 %rd6, %rd1, %rd5;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd4};
; CHECK-NEXT: ret;
%r = fadd <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %a
ret <4 x float> %r
@@ -156,19 +156,19 @@ define <4 x float> @test_fadd_imm_1_v4(<4 x float> %a) #0 {
; CHECK-LABEL: test_fadd_imm_1_v4(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<5>;
-; CHECK-NEXT: .reg .b64 %rd<9>;
+; CHECK-NEXT: .reg .b64 %rd<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_imm_1_v4_param_0];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_imm_1_v4_param_0];
; CHECK-NEXT: mov.b32 %r1, 0f40800000;
; CHECK-NEXT: mov.b32 %r2, 0f40400000;
-; CHECK-NEXT: mov.b64 %rd5, {%r2, %r1};
-; CHECK-NEXT: add.rn.f32x2 %rd6, %rd4, %rd5;
+; CHECK-NEXT: mov.b64 %rd3, {%r2, %r1};
+; CHECK-NEXT: add.rn.f32x2 %rd4, %rd2, %rd3;
; CHECK-NEXT: mov.b32 %r3, 0f40000000;
; CHECK-NEXT: mov.b32 %r4, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd7, {%r4, %r3};
-; CHECK-NEXT: add.rn.f32x2 %rd8, %rd3, %rd7;
-; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-NEXT: mov.b64 %rd5, {%r4, %r3};
+; CHECK-NEXT: add.rn.f32x2 %rd6, %rd1, %rd5;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd4};
; CHECK-NEXT: ret;
%r = fadd <4 x float> %a, <float 1.0, float 2.0, float 3.0, float 4.0>
ret <4 x float> %r
@@ -340,14 +340,14 @@ define <2 x float> @test_fadd_imm_1_ftz(<2 x float> %a) #2 {
define <4 x float> @test_fadd_v4_ftz(<4 x float> %a, <4 x float> %b) #2 {
; CHECK-LABEL: test_fadd_v4_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .b64 %rd<11>;
+; CHECK-NEXT: .reg .b64 %rd<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd5, %rd6}, [test_fadd_v4_ftz_param_1];
-; CHECK-NEXT: ld.param.v2.b64 {%rd7, %rd8}, [test_fadd_v4_ftz_param_0];
-; CHECK-NEXT: add.rn.ftz.f32x2 %rd9, %rd8, %rd6;
-; CHECK-NEXT: add.rn.ftz.f32x2 %rd10, %rd7, %rd5;
-; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd10, %rd9};
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_v4_ftz_param_1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_v4_ftz_param_0];
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd5, %rd2, %rd4;
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd1, %rd3;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd5};
; CHECK-NEXT: ret;
%r = fadd <4 x float> %a, %b
ret <4 x float> %r
@@ -357,19 +357,19 @@ define <4 x float> @test_fadd_imm_0_v4_ftz(<4 x float> %a) #2 {
; CHECK-LABEL: test_fadd_imm_0_v4_ftz(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<5>;
-; CHECK-NEXT: .reg .b64 %rd<9>;
+; CHECK-NEXT: .reg .b64 %rd<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_imm_0_v4_ftz_param_0];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_imm_0_v4_ftz_param_0];
; CHECK-NEXT: mov.b32 %r1, 0f40800000;
; CHECK-NEXT: mov.b32 %r2, 0f40400000;
-; CHECK-NEXT: mov.b64 %rd5, {%r2, %r1};
-; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd4, %rd5;
+; CHECK-NEXT: mov.b64 %rd3, {%r2, %r1};
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd4, %rd2, %rd3;
; CHECK-NEXT: mov.b32 %r3, 0f40000000;
; CHECK-NEXT: mov.b32 %r4, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd7, {%r4, %r3};
-; CHECK-NEXT: add.rn.ftz.f32x2 %rd8, %rd3, %rd7;
-; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-NEXT: mov.b64 %rd5, {%r4, %r3};
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd1, %rd5;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd4};
; CHECK-NEXT: ret;
%r = fadd <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %a
ret <4 x float> %r
@@ -379,19 +379,19 @@ define <4 x float> @test_fadd_imm_1_v4_ftz(<4 x float> %a) #2 {
; CHECK-LABEL: test_fadd_imm_1_v4_ftz(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<5>;
-; CHECK-NEXT: .reg .b64 %rd<9>;
+; CHECK-NEXT: .reg .b64 %rd<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_imm_1_v4_ftz_param_0];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_imm_1_v4_ftz_param_0];
; CHECK-NEXT: mov.b32 %r1, 0f40800000;
; CHECK-NEXT: mov.b32 %r2, 0f40400000;
-; CHECK-NEXT: mov.b64 %rd5, {%r2, %r1};
-; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd4, %rd5;
+; CHECK-NEXT: mov.b64 %rd3, {%r2, %r1};
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd4, %rd2, %rd3;
; CHECK-NEXT: mov.b32 %r3, 0f40000000;
; CHECK-NEXT: mov.b32 %r4, 0f3F800000;
-; CHECK-NEXT: mov.b64 %rd7, {%r4, %r3};
-; CHECK-NEXT: add.rn.ftz.f32x2 %rd8, %rd3, %rd7;
-; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6};
+; CHECK-NEXT: mov.b64 %rd5, {%r4, %r3};
+; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd1, %rd5;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd4};
; CHECK-NEXT: ret;
%r = fadd <4 x float> %a, <float 1.0, float 2.0, float 3.0, float 4.0>
ret <4 x float> %r
diff --git a/llvm/test/CodeGen/NVPTX/vec-param-load.ll b/llvm/test/CodeGen/NVPTX/vec-param-load.ll
index 765e50554c8d2..2480d26692b09 100644
--- a/llvm/test/CodeGen/NVPTX/vec-param-load.ll
+++ b/llvm/test/CodeGen/NVPTX/vec-param-load.ll
@@ -5,40 +5,40 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define <16 x float> @test_v16f32(<16 x float> %a) {
; CHECK-LABEL: test_v16f32(
-; CHECK-DAG: ld.param.v4.b32 {[[V_12_15:(%r[0-9]+[, ]*){4}]]}, [test_v16f32_param_0+48];
-; CHECK-DAG: ld.param.v4.b32 {[[V_8_11:(%r[0-9]+[, ]*){4}]]}, [test_v16f32_param_0+32];
-; CHECK-DAG: ld.param.v4.b32 {[[V_4_7:(%r[0-9]+[, ]*){4}]]}, [test_v16f32_param_0+16];
-; CHECK-DAG: ld.param.v4.b32 {[[V_0_3:(%r[0-9]+[, ]*){4}]]}, [test_v16f32_param_0];
-; CHECK-DAG: st.param.v4.b32 [func_retval0], {[[V_0_3]]}
-; CHECK-DAG: st.param.v4.b32 [func_retval0+16], {[[V_4_7]]}
-; CHECK-DAG: st.param.v4.b32 [func_retval0+32], {[[V_8_11]]}
-; CHECK-DAG: st.param.v4.b32 [func_retval0+48], {[[V_12_15]]}
+; CHECK-DAG: ld.param.v2.b64 {[[V_12_15:(%rd[0-9]+[, ]*){2}]]}, [test_v16f32_param_0+48];
+; CHECK-DAG: ld.param.v2.b64 {[[V_8_11:(%rd[0-9]+[, ]*){2}]]}, [test_v16f32_param_0+32];
+; CHECK-DAG: ld.param.v2.b64 {[[V_4_7:(%rd[0-9]+[, ]*){2}]]}, [test_v16f32_param_0+16];
+; CHECK-DAG: ld.param.v2.b64 {[[V_0_3:(%rd[0-9]+[, ]*){2}]]}, [test_v16f32_param_0];
+; CHECK-DAG: st.param.v2.b64 [func_retval0], {[[V_0_3]]}
+; CHECK-DAG: st.param.v2.b64 [func_retval0+16], {[[V_4_7]]}
+; CHECK-DAG: st.param.v2.b64 [func_retval0+32], {[[V_8_11]]}
+; CHECK-DAG: st.param.v2.b64 [func_retval0+48], {[[V_12_15]]}
; CHECK: ret;
ret <16 x float> %a
}
define <8 x float> @test_v8f32(<8 x float> %a) {
; CHECK-LABEL: test_v8f32(
-; CHECK-DAG: ld.param.v4.b32 {[[V_4_7:(%r[0-9]+[, ]*){4}]]}, [test_v8f32_param_0+16];
-; CHECK-DAG: ld.param.v4.b32 {[[V_0_3:(%r[0-9]+[, ]*){4}]]}, [test_v8f32_param_0];
-; CHECK-DAG: st.param.v4.b32 [func_retval0], {[[V_0_3]]}
-; CHECK-DAG: st.param.v4.b32 [func_retval0+16], {[[V_4_7]]}
+; CHECK-DAG: ld.param.v2.b64 {[[V_4_7:(%rd[0-9]+[, ]*){2}]]}, [test_v8f32_param_0+16];
+; CHECK-DAG: ld.param.v2.b64 {[[V_0_3:(%rd[0-9]+[, ]*){2}]]}, [test_v8f32_param_0];
+; CHECK-DAG: st.param.v2.b64 [func_retval0], {[[V_0_3]]}
+; CHECK-DAG: st.param.v2.b64 [func_retval0+16], {[[V_4_7]]}
; CHECK: ret;
ret <8 x float> %a
}
define <4 x float> @test_v4f32(<4 x float> %a) {
; CHECK-LABEL: test_v4f32(
-; CHECK-DAG: ld.param.v4.b32 {[[V_0_3:(%r[0-9]+[, ]*){4}]]}, [test_v4f32_param_0];
-; CHECK-DAG: st.param.v4.b32 [func_retval0], {[[V_0_3]]}
+; CHECK-DAG: ld.param.v2.b64 {[[V_0_3:(%rd[0-9]+[, ]*){2}]]}, [test_v4f32_param_0];
+; CHECK-DAG: st.param.v2.b64 [func_retval0], {[[V_0_3]]}
; CHECK: ret;
ret <4 x float> %a
}
define <2 x float> @test_v2f32(<2 x float> %a) {
; CHECK-LABEL: test_v2f32(
-; CHECK-DAG: ld.param.v2.b32 {[[V_0_3:(%r[0-9]+[, ]*){2}]]}, [test_v2f32_param_0];
-; CHECK-DAG: st.param.v2.b32 [func_retval0], {[[V_0_3]]}
+; CHECK-DAG: ld.param.b64 [[V_0_3:%rd[0-9]+]], [test_v2f32_param_0];
+; CHECK-DAG: st.param.b64 [func_retval0], [[V_0_3]]
; CHECK: ret;
ret <2 x float> %a
}
>From b4c7b0f0d877481c8f4743279776ec1c266fa720 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Wed, 12 Mar 2025 16:54:40 -0700
Subject: [PATCH 23/32] [NVPTX] handle more cases for loads and stores
Split unaligned stores and loads of v2f32.
Add DAGCombiner rules for:
- target-independent stores that store a v2f32 BUILD_VECTOR. We
scalarize the value and rewrite the store
Fix test cases.
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 59 ++++++++++++++-----
llvm/test/CodeGen/NVPTX/aggregate-return.ll | 4 +-
llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 7 +--
.../NVPTX/load-with-non-coherent-cache.ll | 4 +-
.../CodeGen/NVPTX/misaligned-vector-ldst.ll | 2 +-
5 files changed, 53 insertions(+), 23 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 318afe7e9861a..2fc9f4d616961 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -865,7 +865,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::EXTRACT_VECTOR_ELT, ISD::FADD,
ISD::MUL, ISD::SHL, ISD::SREM, ISD::UREM, ISD::VSELECT,
ISD::BUILD_VECTOR, ISD::ADDRSPACECAST, ISD::FP_ROUND,
- ISD::TRUNCATE, ISD::LOAD, ISD::BITCAST});
+ ISD::TRUNCATE, ISD::LOAD, ISD::STORE, ISD::BITCAST});
// setcc for f16x2 and bf16x2 needs special handling to prevent
// legalizer's attempt to scalarize it due to v2i1 not being legal.
@@ -3242,10 +3242,10 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
if (Op.getValueType() == MVT::i1)
return LowerLOADi1(Op, DAG);
- // v2f16/v2bf16/v2i16/v4i8 are legal, so we can't rely on legalizer to handle
- // unaligned loads and have to handle it here.
+ // v2f16/v2bf16/v2i16/v4i8/v2f32 are legal, so we can't rely on legalizer to
+ // handle unaligned loads and have to handle it here.
EVT VT = Op.getValueType();
- if (Isv2x16VT(VT) || VT == MVT::v4i8) {
+ if (Isv2x16VT(VT) || VT == MVT::v4i8 || VT == MVT::v2f32) {
LoadSDNode *Load = cast<LoadSDNode>(Op);
EVT MemVT = Load->getMemoryVT();
if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
@@ -3289,22 +3289,23 @@ SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
if (VT == MVT::i1)
return LowerSTOREi1(Op, DAG);
- // v2f16 is legal, so we can't rely on legalizer to handle unaligned
- // stores and have to handle it here.
- if ((Isv2x16VT(VT) || VT == MVT::v4i8) &&
+ // v2f16/v2bf16/v2i16/v4i8/v2f32 are legal, so we can't rely on legalizer to
+ // handle unaligned stores and have to handle it here.
+ if ((Isv2x16VT(VT) || VT == MVT::v4i8 || VT == MVT::v2f32) &&
!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
VT, *Store->getMemOperand()))
return expandUnalignedStore(Store, DAG);
- // v2f16, v2bf16 and v2i16 don't need special handling.
- if (Isv2x16VT(VT) || VT == MVT::v4i8)
+ // v2f16/v2bf16/v2i16/v4i8/v2f32 don't need special handling.
+ if (Isv2x16VT(VT) || VT == MVT::v4i8 || VT == MVT::v2f32)
return SDValue();
return LowerSTOREVector(Op, DAG);
}
-SDValue
-NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
+static SDValue convertVectorStore(SDValue Op, SelectionDAG &DAG,
+ const SmallVectorImpl<SDValue> &Elements,
+ const NVPTXSubtarget &STI) {
MemSDNode *N = cast<MemSDNode>(Op.getNode());
SDValue Val = N->getOperand(1);
SDLoc DL(N);
@@ -3369,6 +3370,8 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
NumEltsPerSubVector);
Ops.push_back(DAG.getBuildVector(EltVT, DL, SubVectorElts));
}
+ } else if (!Elements.empty()) {
+ Ops.insert(Ops.end(), Elements.begin(), Elements.end());
} else {
SDValue V = DAG.getBitcast(MVT::getVectorVT(EltVT, NumElts), Val);
for (const unsigned I : llvm::seq(NumElts)) {
@@ -3392,10 +3395,20 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
N->getMemoryVT(), N->getMemOperand());
- // return DCI.CombineTo(N, NewSt, true);
return NewSt;
}
+// Default variant where we don't pass in elements.
+static SDValue convertVectorStore(SDValue Op, SelectionDAG &DAG,
+ const NVPTXSubtarget &STI) {
+ return convertVectorStore(Op, DAG, SmallVector<SDValue>{}, STI);
+}
+
+SDValue NVPTXTargetLowering::LowerSTOREVector(SDValue Op,
+ SelectionDAG &DAG) const {
+ return convertVectorStore(Op, DAG, STI);
+}
+
// st i1 v, addr
// =>
// v1 = zxt v to i16
@@ -5539,6 +5552,9 @@ static SDValue PerformStoreCombineHelper(SDNode *N,
// -->
// StoreRetvalV2 {a, b}
// likewise for V2 -> V4 case
+ //
+ // We also handle target-independent stores, which require us to first
+ // convert to StoreV2.
std::optional<NVPTXISD::NodeType> NewOpcode;
switch (N->getOpcode()) {
@@ -5564,8 +5580,8 @@ static SDValue PerformStoreCombineHelper(SDNode *N,
SDValue CurrentOp = N->getOperand(I);
if (CurrentOp->getOpcode() == ISD::BUILD_VECTOR) {
assert(CurrentOp.getValueType() == MVT::v2f32);
- NewOps.push_back(CurrentOp.getNode()->getOperand(0));
- NewOps.push_back(CurrentOp.getNode()->getOperand(1));
+ NewOps.push_back(CurrentOp.getOperand(0));
+ NewOps.push_back(CurrentOp.getOperand(1));
} else {
NewOps.clear();
break;
@@ -6342,6 +6358,19 @@ static SDValue PerformBITCASTCombine(SDNode *N,
return SDValue();
}
+static SDValue PerformStoreCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &STI) {
+ // check if the store'd value can be scalarized
+ SDValue StoredVal = N->getOperand(1);
+ if (StoredVal.getValueType() == MVT::v2f32 &&
+ StoredVal.getOpcode() == ISD::BUILD_VECTOR) {
+ SmallVector<SDValue> Elements(StoredVal->op_values());
+ return convertVectorStore(SDValue(N, 0), DCI.DAG, Elements, STI);
+ }
+ return SDValue();
+}
+
SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
CodeGenOptLevel OptLevel = getTargetMachine().getOptLevel();
@@ -6371,6 +6400,8 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
case NVPTXISD::LoadParam:
case NVPTXISD::LoadParamV2:
return PerformLoadCombine(N, DCI, STI);
+ case ISD::STORE:
+ return PerformStoreCombine(N, DCI, STI);
case NVPTXISD::StoreParam:
case NVPTXISD::StoreParamV2:
case NVPTXISD::StoreParamV4:
diff --git a/llvm/test/CodeGen/NVPTX/aggregate-return.ll b/llvm/test/CodeGen/NVPTX/aggregate-return.ll
index 784355d96551e..1f537f03af202 100644
--- a/llvm/test/CodeGen/NVPTX/aggregate-return.ll
+++ b/llvm/test/CodeGen/NVPTX/aggregate-return.ll
@@ -10,9 +10,9 @@ define void @test_v2f32(<2 x float> %input, ptr %output) {
; CHECK-LABEL: @test_v2f32
%call = tail call <2 x float> @barv(<2 x float> %input)
; CHECK: .param .align 8 .b8 retval0[8];
-; CHECK: ld.param.v2.b32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [retval0];
+; CHECK: ld.param.b64 [[E0_1:%rd[0-9]+]], [retval0];
store <2 x float> %call, ptr %output, align 8
-; CHECK: st.v2.b32 [{{%rd[0-9]+}}], {[[E0]], [[E1]]}
+; CHECK: st.b64 [{{%rd[0-9]+}}], [[E0_1]]
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
index 8441bd9e7bbd0..dac04097f2af7 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -512,14 +512,13 @@ define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 {
define void @test_ldst_v2f32(ptr %a, ptr %b) #0 {
; CHECK-LABEL: test_ldst_v2f32(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_ldst_v2f32_param_1];
; CHECK-NEXT: ld.param.b64 %rd1, [test_ldst_v2f32_param_0];
-; CHECK-NEXT: ld.v2.b32 {%r1, %r2}, [%rd1];
-; CHECK-NEXT: st.v2.b32 [%rd2], {%r1, %r2};
+; CHECK-NEXT: ld.b64 %rd3, [%rd1];
+; CHECK-NEXT: st.b64 [%rd2], %rd3;
; CHECK-NEXT: ret;
%t1 = load <2 x float>, ptr %a
store <2 x float> %t1, ptr %b, align 32
diff --git a/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll b/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
index 4d7a4b50e8940..63887038eaee0 100644
--- a/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
+++ b/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
@@ -108,9 +108,9 @@ define ptx_kernel void @foo10(ptr noalias readonly %from, ptr %to) {
}
; SM20-LABEL: .visible .entry foo11(
-; SM20: ld.global.v2.b32
+; SM20: ld.global.b64
; SM35-LABEL: .visible .entry foo11(
-; SM35: ld.global.nc.v2.b32
+; SM35: ld.global.nc.b64
define ptx_kernel void @foo11(ptr noalias readonly %from, ptr %to) {
%1 = load <2 x float>, ptr %from
store <2 x float> %1, ptr %to
diff --git a/llvm/test/CodeGen/NVPTX/misaligned-vector-ldst.ll b/llvm/test/CodeGen/NVPTX/misaligned-vector-ldst.ll
index db8733da5b7e4..6c7d3c20b2c97 100644
--- a/llvm/test/CodeGen/NVPTX/misaligned-vector-ldst.ll
+++ b/llvm/test/CodeGen/NVPTX/misaligned-vector-ldst.ll
@@ -26,7 +26,7 @@ define <4 x float> @t2(ptr %p1) {
; CHECK-LABEL: t3
define <4 x float> @t3(ptr %p1) {
; CHECK-NOT: ld.v4
-; CHECK: ld.v2
+; CHECK: ld.b64
%r = load <4 x float>, ptr %p1, align 8
ret <4 x float> %r
}
>From ff967327cccc944010f228a9a5c03169637ce3b0 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Fri, 14 Mar 2025 18:56:29 -0700
Subject: [PATCH 24/32] [NVPTX] add coverage for v2f32 in ldg-invariant and
fp-contract
for fp-contract:
- test folding of fma.f32x2
- bump SM version to 100
for ldg-invariant:
- test proper splitting of loads on vectors of f32
---
llvm/test/CodeGen/NVPTX/ldg-invariant.ll | 70 ++++++++++++++++++++++++
1 file changed, 70 insertions(+)
diff --git a/llvm/test/CodeGen/NVPTX/ldg-invariant.ll b/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
index c5c5de4c1b85e..2b22c83b88d3b 100644
--- a/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
+++ b/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
@@ -125,6 +125,76 @@ define half @ld_global_v8f16(ptr addrspace(1) %ptr) {
ret half %sum
}
+define float @ld_global_v2f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: ld_global_v2f32(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [ld_global_v2f32_param_0];
+; CHECK-NEXT: ld.global.nc.v2.f32 {%f1, %f2}, [%rd1];
+; CHECK-NEXT: add.rn.f32 %f3, %f1, %f2;
+; CHECK-NEXT: st.param.f32 [func_retval0], %f3;
+; CHECK-NEXT: ret;
+ %a = load <2 x float>, ptr addrspace(1) %ptr, !invariant.load !0
+ %v1 = extractelement <2 x float> %a, i32 0
+ %v2 = extractelement <2 x float> %a, i32 1
+ %sum = fadd float %v1, %v2
+ ret float %sum
+}
+
+define float @ld_global_v4f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: ld_global_v4f32(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<8>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [ld_global_v4f32_param_0];
+; CHECK-NEXT: ld.global.nc.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
+; CHECK-NEXT: add.rn.f32 %f5, %f1, %f2;
+; CHECK-NEXT: add.rn.f32 %f6, %f3, %f4;
+; CHECK-NEXT: add.rn.f32 %f7, %f5, %f6;
+; CHECK-NEXT: st.param.f32 [func_retval0], %f7;
+; CHECK-NEXT: ret;
+ %a = load <4 x float>, ptr addrspace(1) %ptr, !invariant.load !0
+ %v1 = extractelement <4 x float> %a, i32 0
+ %v2 = extractelement <4 x float> %a, i32 1
+ %v3 = extractelement <4 x float> %a, i32 2
+ %v4 = extractelement <4 x float> %a, i32 3
+ %sum1 = fadd float %v1, %v2
+ %sum2 = fadd float %v3, %v4
+ %sum = fadd float %sum1, %sum2
+ ret float %sum
+}
+
+define float @ld_global_v8f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: ld_global_v8f32(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<12>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [ld_global_v8f32_param_0];
+; CHECK-NEXT: ld.global.nc.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1+16];
+; CHECK-NEXT: ld.global.nc.v4.f32 {%f5, %f6, %f7, %f8}, [%rd1];
+; CHECK-NEXT: add.rn.f32 %f9, %f5, %f7;
+; CHECK-NEXT: add.rn.f32 %f10, %f1, %f3;
+; CHECK-NEXT: add.rn.f32 %f11, %f9, %f10;
+; CHECK-NEXT: st.param.f32 [func_retval0], %f11;
+; CHECK-NEXT: ret;
+ %a = load <8 x float>, ptr addrspace(1) %ptr, !invariant.load !0
+ %v1 = extractelement <8 x float> %a, i32 0
+ %v2 = extractelement <8 x float> %a, i32 2
+ %v3 = extractelement <8 x float> %a, i32 4
+ %v4 = extractelement <8 x float> %a, i32 6
+ %sum1 = fadd float %v1, %v2
+ %sum2 = fadd float %v3, %v4
+ %sum = fadd float %sum1, %sum2
+ ret float %sum
+}
+
define i8 @ld_global_v8i8(ptr addrspace(1) %ptr) {
; CHECK-LABEL: ld_global_v8i8(
; CHECK: {
>From b065c15f5aecb19975487ef19f09a7c8aeee1e10 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 1 Apr 2025 11:38:40 -0700
Subject: [PATCH 25/32] [NVPTX] expand v2f32 SELECT_CC and BR_CC
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 2fc9f4d616961..97867eb9f79ef 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -681,8 +681,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
// Operations not directly supported by NVPTX.
for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
- MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16, MVT::v4i8,
- MVT::i32, MVT::i64}) {
+ MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
+ MVT::v4i8, MVT::i32, MVT::i64}) {
setOperationAction(ISD::SELECT_CC, VT, Expand);
setOperationAction(ISD::BR_CC, VT, Expand);
}
>From bc36772a40c3ad02896d44cc40934f1cbbd86fa4 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 1 Apr 2025 13:34:23 -0700
Subject: [PATCH 26/32] [NVPTX] update tests for mov.b32 canonicalization
---
llvm/test/CodeGen/NVPTX/ldg-invariant.ll | 26 ++++++++++++------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/llvm/test/CodeGen/NVPTX/ldg-invariant.ll b/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
index 2b22c83b88d3b..1590ad835877c 100644
--- a/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
+++ b/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
@@ -128,14 +128,14 @@ define half @ld_global_v8f16(ptr addrspace(1) %ptr) {
define float @ld_global_v2f32(ptr addrspace(1) %ptr) {
; CHECK-LABEL: ld_global_v2f32(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u64 %rd1, [ld_global_v2f32_param_0];
-; CHECK-NEXT: ld.global.nc.v2.f32 {%f1, %f2}, [%rd1];
+; CHECK-NEXT: ld.param.b64 %rd1, [ld_global_v2f32_param_0];
+; CHECK-NEXT: ld.global.nc.v2.b32 {%f1, %f2}, [%rd1];
; CHECK-NEXT: add.rn.f32 %f3, %f1, %f2;
-; CHECK-NEXT: st.param.f32 [func_retval0], %f3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %f3;
; CHECK-NEXT: ret;
%a = load <2 x float>, ptr addrspace(1) %ptr, !invariant.load !0
%v1 = extractelement <2 x float> %a, i32 0
@@ -147,16 +147,16 @@ define float @ld_global_v2f32(ptr addrspace(1) %ptr) {
define float @ld_global_v4f32(ptr addrspace(1) %ptr) {
; CHECK-LABEL: ld_global_v4f32(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<8>;
+; CHECK-NEXT: .reg .b32 %f<8>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u64 %rd1, [ld_global_v4f32_param_0];
-; CHECK-NEXT: ld.global.nc.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1];
+; CHECK-NEXT: ld.param.b64 %rd1, [ld_global_v4f32_param_0];
+; CHECK-NEXT: ld.global.nc.v4.b32 {%f1, %f2, %f3, %f4}, [%rd1];
; CHECK-NEXT: add.rn.f32 %f5, %f1, %f2;
; CHECK-NEXT: add.rn.f32 %f6, %f3, %f4;
; CHECK-NEXT: add.rn.f32 %f7, %f5, %f6;
-; CHECK-NEXT: st.param.f32 [func_retval0], %f7;
+; CHECK-NEXT: st.param.b32 [func_retval0], %f7;
; CHECK-NEXT: ret;
%a = load <4 x float>, ptr addrspace(1) %ptr, !invariant.load !0
%v1 = extractelement <4 x float> %a, i32 0
@@ -172,17 +172,17 @@ define float @ld_global_v4f32(ptr addrspace(1) %ptr) {
define float @ld_global_v8f32(ptr addrspace(1) %ptr) {
; CHECK-LABEL: ld_global_v8f32(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<12>;
+; CHECK-NEXT: .reg .b32 %f<12>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u64 %rd1, [ld_global_v8f32_param_0];
-; CHECK-NEXT: ld.global.nc.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1+16];
-; CHECK-NEXT: ld.global.nc.v4.f32 {%f5, %f6, %f7, %f8}, [%rd1];
+; CHECK-NEXT: ld.param.b64 %rd1, [ld_global_v8f32_param_0];
+; CHECK-NEXT: ld.global.nc.v4.b32 {%f1, %f2, %f3, %f4}, [%rd1+16];
+; CHECK-NEXT: ld.global.nc.v4.b32 {%f5, %f6, %f7, %f8}, [%rd1];
; CHECK-NEXT: add.rn.f32 %f9, %f5, %f7;
; CHECK-NEXT: add.rn.f32 %f10, %f1, %f3;
; CHECK-NEXT: add.rn.f32 %f11, %f9, %f10;
-; CHECK-NEXT: st.param.f32 [func_retval0], %f11;
+; CHECK-NEXT: st.param.b32 [func_retval0], %f11;
; CHECK-NEXT: ret;
%a = load <8 x float>, ptr addrspace(1) %ptr, !invariant.load !0
%v1 = extractelement <8 x float> %a, i32 0
>From 852a4a9dcbfc79d01f1eeaa10ac505ce82d38b14 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 1 Apr 2025 13:34:48 -0700
Subject: [PATCH 27/32] [NVPTX] add f32x2 version of fp-contract test
---
llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll | 112 +++++++++++++++++++
1 file changed, 112 insertions(+)
create mode 100644 llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll
diff --git a/llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll b/llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll
new file mode 100644
index 0000000000000..3ffbae53934a8
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll
@@ -0,0 +1,112 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_100 -fp-contract=fast | FileCheck %s --check-prefixes=CHECK,FAST
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_100 | FileCheck %s --check-prefixes=CHECK,DEFAULT
+; RUN: %if ptxas-12.8 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_100 -fp-contract=fast | %ptxas-verify -arch sm_100 %}
+; RUN: %if ptxas-12.8 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_100 | %ptxas-verify -arch sm_100 %}
+
+target triple = "nvptx64-unknown-cuda"
+
+;; FAST-LABEL: @t0
+;; DEFAULT-LABEL: @t0
+define <2 x float> @t0(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; FAST-LABEL: t0(
+; FAST: {
+; FAST-NEXT: .reg .b64 %rd<5>;
+; FAST-EMPTY:
+; FAST-NEXT: // %bb.0:
+; FAST-NEXT: ld.param.b64 %rd1, [t0_param_2];
+; FAST-NEXT: ld.param.b64 %rd2, [t0_param_1];
+; FAST-NEXT: ld.param.b64 %rd3, [t0_param_0];
+; FAST-NEXT: fma.rn.f32x2 %rd4, %rd3, %rd2, %rd1;
+; FAST-NEXT: st.param.b64 [func_retval0], %rd4;
+; FAST-NEXT: ret;
+;
+; DEFAULT-LABEL: t0(
+; DEFAULT: {
+; DEFAULT-NEXT: .reg .b64 %rd<6>;
+; DEFAULT-EMPTY:
+; DEFAULT-NEXT: // %bb.0:
+; DEFAULT-NEXT: ld.param.b64 %rd1, [t0_param_2];
+; DEFAULT-NEXT: ld.param.b64 %rd2, [t0_param_1];
+; DEFAULT-NEXT: ld.param.b64 %rd3, [t0_param_0];
+; DEFAULT-NEXT: mul.rn.f32x2 %rd4, %rd3, %rd2;
+; DEFAULT-NEXT: add.rn.f32x2 %rd5, %rd4, %rd1;
+; DEFAULT-NEXT: st.param.b64 [func_retval0], %rd5;
+; DEFAULT-NEXT: ret;
+ %v0 = fmul <2 x float> %a, %b
+ %v1 = fadd <2 x float> %v0, %c
+ ret <2 x float> %v1
+}
+
+;; We cannot form an fma here, but make sure we explicitly emit add.rn.f32x2
+;; to prevent ptxas from fusing this with anything else.
+define <2 x float> @t1(<2 x float> %a, <2 x float> %b) {
+; FAST-LABEL: t1(
+; FAST: {
+; FAST-NEXT: .reg .b64 %rd<6>;
+; FAST-EMPTY:
+; FAST-NEXT: // %bb.0:
+; FAST-NEXT: ld.param.b64 %rd1, [t1_param_1];
+; FAST-NEXT: ld.param.b64 %rd2, [t1_param_0];
+; FAST-NEXT: add.f32x2 %rd3, %rd2, %rd1;
+; FAST-NEXT: sub.f32x2 %rd4, %rd2, %rd1;
+; FAST-NEXT: mul.f32x2 %rd5, %rd3, %rd4;
+; FAST-NEXT: st.param.b64 [func_retval0], %rd5;
+; FAST-NEXT: ret;
+;
+; DEFAULT-LABEL: t1(
+; DEFAULT: {
+; DEFAULT-NEXT: .reg .b64 %rd<6>;
+; DEFAULT-EMPTY:
+; DEFAULT-NEXT: // %bb.0:
+; DEFAULT-NEXT: ld.param.b64 %rd1, [t1_param_1];
+; DEFAULT-NEXT: ld.param.b64 %rd2, [t1_param_0];
+; DEFAULT-NEXT: add.rn.f32x2 %rd3, %rd2, %rd1;
+; DEFAULT-NEXT: sub.rn.f32x2 %rd4, %rd2, %rd1;
+; DEFAULT-NEXT: mul.rn.f32x2 %rd5, %rd3, %rd4;
+; DEFAULT-NEXT: st.param.b64 [func_retval0], %rd5;
+; DEFAULT-NEXT: ret;
+ %v1 = fadd <2 x float> %a, %b
+ %v2 = fsub <2 x float> %a, %b
+ %v3 = fmul <2 x float> %v1, %v2
+ ret <2 x float> %v3
+}
+
+;; Make sure we generate the non ".rn" version when the "contract" flag is
+;; present on the instructions
+define <2 x float> @t2(<2 x float> %a, <2 x float> %b) {
+; CHECK-LABEL: t2(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<6>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [t2_param_1];
+; CHECK-NEXT: ld.param.b64 %rd2, [t2_param_0];
+; CHECK-NEXT: add.f32x2 %rd3, %rd2, %rd1;
+; CHECK-NEXT: sub.f32x2 %rd4, %rd2, %rd1;
+; CHECK-NEXT: mul.f32x2 %rd5, %rd3, %rd4;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd5;
+; CHECK-NEXT: ret;
+ %v1 = fadd contract <2 x float> %a, %b
+ %v2 = fsub contract <2 x float> %a, %b
+ %v3 = fmul contract <2 x float> %v1, %v2
+ ret <2 x float> %v3
+}
+
+;; Make sure we always fold to fma when the "contract" flag is present
+define <2 x float> @t3(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; CHECK-LABEL: t3(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [t3_param_2];
+; CHECK-NEXT: ld.param.b64 %rd2, [t3_param_1];
+; CHECK-NEXT: ld.param.b64 %rd3, [t3_param_0];
+; CHECK-NEXT: fma.rn.f32x2 %rd4, %rd3, %rd2, %rd1;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd4;
+; CHECK-NEXT: ret;
+ %v0 = fmul contract <2 x float> %a, %b
+ %v1 = fadd contract <2 x float> %v0, %c
+ ret <2 x float> %v1
+}
>From fd940025b07d28b6065562f4b8cf70e4b6ae628b Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 8 Apr 2025 21:58:30 -0700
Subject: [PATCH 28/32] [NVPTX] use sink symbol for single-element unpacking of
v2f32s
---
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 16 ++++++++-
llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 4 +--
llvm/test/CodeGen/NVPTX/ldg-invariant.ll | 34 +++++++++----------
3 files changed, 34 insertions(+), 20 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 90c3c1f412820..1fd628c21977e 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -2933,9 +2933,18 @@ let hasSideEffects = false in {
(ins Int64Regs:$s),
"{{ .reg .b32 tmp; mov.b64 {$low, tmp}, $s; }}",
[]>;
-
// PTX 7.1 lets you avoid a temp register and just use _ as a "sink" for the
// unused high/low part.
+ def I64toF32H_Sink : NVPTXInst<(outs Float32Regs:$high),
+ (ins Int64Regs:$s),
+ "mov.b64 {{_, $high}}, $s;",
+ []>,
+ Requires<[hasPTX<71>]>;
+ def I64toF32L_Sink : NVPTXInst<(outs Float32Regs:$low),
+ (ins Int64Regs:$s),
+ "mov.b64 {{$low, _}}, $s;",
+ []>,
+ Requires<[hasPTX<71>]>;
def I32toI16H_Sink : NVPTXInst<(outs Int16Regs:$high),
(ins Int32Regs:$s),
"mov.b32 \t{{_, $high}}, $s;",
@@ -2976,6 +2985,11 @@ foreach vt = [v2f16, v2bf16, v2i16] in {
def : Pat<(extractelt vt:$src, 1), (I32toI16H $src)>;
}
+def : Pat<(extractelt v2f32:$src, 0),
+ (I64toF32L_Sink $src)>, Requires<[hasPTX<71>]>;
+def : Pat<(extractelt v2f32:$src, 1),
+ (I64toF32H_Sink $src)>, Requires<[hasPTX<71>]>;
+
def : Pat<(extractelt v2f32:$src, 0),
(I64toF32L $src)>;
def : Pat<(extractelt v2f32:$src, 1),
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
index dac04097f2af7..a3823d07c76ef 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -33,7 +33,7 @@ define float @test_extract_0(<2 x float> %a) #0 {
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_extract_0_param_0];
-; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {%r1, tmp}, %rd1; }
+; CHECK-NEXT: mov.b64 {%r1, _}, %rd1;
; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
; CHECK-NEXT: ret;
%e = extractelement <2 x float> %a, i32 0
@@ -48,7 +48,7 @@ define float @test_extract_1(<2 x float> %a) #0 {
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [test_extract_1_param_0];
-; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r1}, %rd1; }
+; CHECK-NEXT: mov.b64 {_, %r1}, %rd1;
; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
; CHECK-NEXT: ret;
%e = extractelement <2 x float> %a, i32 1
diff --git a/llvm/test/CodeGen/NVPTX/ldg-invariant.ll b/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
index 1590ad835877c..49954c71f2987 100644
--- a/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
+++ b/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
@@ -128,14 +128,14 @@ define half @ld_global_v8f16(ptr addrspace(1) %ptr) {
define float @ld_global_v2f32(ptr addrspace(1) %ptr) {
; CHECK-LABEL: ld_global_v2f32(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %f<4>;
+; CHECK-NEXT: .reg .b32 %r<4>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [ld_global_v2f32_param_0];
-; CHECK-NEXT: ld.global.nc.v2.b32 {%f1, %f2}, [%rd1];
-; CHECK-NEXT: add.rn.f32 %f3, %f1, %f2;
-; CHECK-NEXT: st.param.b32 [func_retval0], %f3;
+; CHECK-NEXT: ld.global.nc.v2.b32 {%r1, %r2}, [%rd1];
+; CHECK-NEXT: add.rn.f32 %r3, %r1, %r2;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r3;
; CHECK-NEXT: ret;
%a = load <2 x float>, ptr addrspace(1) %ptr, !invariant.load !0
%v1 = extractelement <2 x float> %a, i32 0
@@ -147,16 +147,16 @@ define float @ld_global_v2f32(ptr addrspace(1) %ptr) {
define float @ld_global_v4f32(ptr addrspace(1) %ptr) {
; CHECK-LABEL: ld_global_v4f32(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %f<8>;
+; CHECK-NEXT: .reg .b32 %r<8>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [ld_global_v4f32_param_0];
-; CHECK-NEXT: ld.global.nc.v4.b32 {%f1, %f2, %f3, %f4}, [%rd1];
-; CHECK-NEXT: add.rn.f32 %f5, %f1, %f2;
-; CHECK-NEXT: add.rn.f32 %f6, %f3, %f4;
-; CHECK-NEXT: add.rn.f32 %f7, %f5, %f6;
-; CHECK-NEXT: st.param.b32 [func_retval0], %f7;
+; CHECK-NEXT: ld.global.nc.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; CHECK-NEXT: add.rn.f32 %r5, %r1, %r2;
+; CHECK-NEXT: add.rn.f32 %r6, %r3, %r4;
+; CHECK-NEXT: add.rn.f32 %r7, %r5, %r6;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r7;
; CHECK-NEXT: ret;
%a = load <4 x float>, ptr addrspace(1) %ptr, !invariant.load !0
%v1 = extractelement <4 x float> %a, i32 0
@@ -172,17 +172,17 @@ define float @ld_global_v4f32(ptr addrspace(1) %ptr) {
define float @ld_global_v8f32(ptr addrspace(1) %ptr) {
; CHECK-LABEL: ld_global_v8f32(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %f<12>;
+; CHECK-NEXT: .reg .b32 %r<12>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [ld_global_v8f32_param_0];
-; CHECK-NEXT: ld.global.nc.v4.b32 {%f1, %f2, %f3, %f4}, [%rd1+16];
-; CHECK-NEXT: ld.global.nc.v4.b32 {%f5, %f6, %f7, %f8}, [%rd1];
-; CHECK-NEXT: add.rn.f32 %f9, %f5, %f7;
-; CHECK-NEXT: add.rn.f32 %f10, %f1, %f3;
-; CHECK-NEXT: add.rn.f32 %f11, %f9, %f10;
-; CHECK-NEXT: st.param.b32 [func_retval0], %f11;
+; CHECK-NEXT: ld.global.nc.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; CHECK-NEXT: ld.global.nc.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; CHECK-NEXT: add.rn.f32 %r9, %r5, %r7;
+; CHECK-NEXT: add.rn.f32 %r10, %r1, %r3;
+; CHECK-NEXT: add.rn.f32 %r11, %r9, %r10;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r11;
; CHECK-NEXT: ret;
%a = load <8 x float>, ptr addrspace(1) %ptr, !invariant.load !0
%v1 = extractelement <8 x float> %a, i32 0
>From 94a88a6af8ff17e3c5475dcaa1e22e51fe10a9e9 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Mon, 28 Apr 2025 18:48:35 -0700
Subject: [PATCH 29/32] [NVPTX] in combiner rule, fix propagation of offset
into load results
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 97867eb9f79ef..21c8c0f2e5943 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5341,7 +5341,8 @@ static SDValue PerformLoadCombine(SDNode *N,
SmallDenseMap<SDNode *, unsigned> ExtractElts;
SmallVector<SDNode *> ProxyRegs(OrigNumResults, nullptr);
- SmallVector<std::pair<SDNode *, unsigned /*offset*/>> WorkList{{N, 0}};
+ SmallVector<std::pair<SDNode *, unsigned>> WorkList{{N, {}}};
+ bool ProcessingInitialLoad = true;
while (!WorkList.empty()) {
auto [V, Offset] = WorkList.pop_back_val();
@@ -5351,10 +5352,12 @@ static SDValue PerformLoadCombine(SDNode *N,
if (U.getValueType() == MVT::Other || U.getValueType() == MVT::Glue)
continue; // we'll process chain/glue later
+ if (ProcessingInitialLoad)
+ Offset = U.getResNo();
+
SDNode *User = U.getUser();
if (User->getOpcode() == NVPTXISD::ProxyReg) {
- Offset = U.getResNo() * 2;
- SDNode *&ProxyReg = ProxyRegs[Offset / 2];
+ SDNode *&ProxyReg = ProxyRegs[Offset];
// We shouldn't have multiple proxy regs for the same value from the
// load, but bail out anyway since we don't handle this.
@@ -5366,13 +5369,13 @@ static SDValue PerformLoadCombine(SDNode *N,
User->getValueType(0) == MVT::v2f32 &&
U.getValueType() == MVT::i64) {
// match v2f32 = bitcast i64
- Offset = U.getResNo() * 2;
+ // continue and push the instruction
} else if (User->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
User->getValueType(0) == MVT::f32) {
// match f32 = extractelt v2f32
if (auto *CI = dyn_cast<ConstantSDNode>(User->getOperand(1))) {
unsigned Index = CI->getZExtValue();
- ExtractElts[User] = Offset + Index;
+ ExtractElts[User] = 2 * Offset + Index;
continue; // don't search
}
return SDValue(); // could not match
@@ -5382,6 +5385,9 @@ static SDValue PerformLoadCombine(SDNode *N,
// enqueue this to visit its uses
WorkList.push_back({User, Offset});
}
+
+ // After we're done with the load, propagate the result offsets.
+ ProcessingInitialLoad = false;
}
// (2) If the load's value is only used as f32 elements, replace all
>From 088391c7fd4ca43227e1d218918c06414b5b12b2 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Wed, 30 Apr 2025 20:28:01 -0700
Subject: [PATCH 30/32] [NVPTX] update how loads are optimized and disable on
O0
---
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 28 +++++++++
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 35 +++++++++++-
llvm/test/CodeGen/NVPTX/f16x2-instructions.ll | 22 ++++---
llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 57 ++++++++++++-------
4 files changed, 113 insertions(+), 29 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 38e0f367b55b6..5fecc31d76a6f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -1189,11 +1189,25 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
unsigned FromTypeWidth = TotalWidth / getLoadStoreVectorNumElts(N);
+ LLVM_DEBUG({
+ dbgs() << "tryLoadVector on " << TLI->getTargetNodeName(N->getOpcode())
+ << ":\n";
+ dbgs() << " load type: " << MemVT << "\n";
+ dbgs() << " total load width: " << TotalWidth << " bits\n";
+ dbgs() << " from type width: " << FromTypeWidth << " bits\n";
+ dbgs() << " element type: " << EltVT << "\n";
+ });
+
if (isSubVectorPackedInInteger(EltVT)) {
assert(ExtensionType == ISD::NON_EXTLOAD);
FromTypeWidth = EltVT.getSizeInBits();
EltVT = MVT::getIntegerVT(FromTypeWidth);
FromType = NVPTX::PTXLdStInstCode::Untyped;
+ LLVM_DEBUG({
+ dbgs() << " packed integers detected:\n";
+ dbgs() << " from type width: " << FromTypeWidth << " (new)\n";
+ dbgs() << " element type: " << EltVT << " (new)\n";
+ });
}
assert(isPowerOf2_32(FromTypeWidth) && FromTypeWidth >= 8 &&
@@ -1501,9 +1515,23 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
SDValue N2 = N->getOperand(NumElts + 1);
unsigned ToTypeWidth = TotalWidth / NumElts;
+ LLVM_DEBUG({
+ dbgs() << "tryStoreVector on " << TLI->getTargetNodeName(N->getOpcode())
+ << ":\n";
+ dbgs() << " store type: " << StoreVT << "\n";
+ dbgs() << " total store width: " << TotalWidth << " bits\n";
+ dbgs() << " to type width: " << ToTypeWidth << " bits\n";
+ dbgs() << " element type: " << EltVT << "\n";
+ });
+
if (isSubVectorPackedInInteger(EltVT)) {
ToTypeWidth = EltVT.getSizeInBits();
EltVT = MVT::getIntegerVT(ToTypeWidth);
+ LLVM_DEBUG({
+ dbgs() << " packed integers detected:\n";
+ dbgs() << " to type width: " << ToTypeWidth << " (new)\n";
+ dbgs() << " element type: " << EltVT << " (new)\n";
+ });
}
assert(isPowerOf2_32(ToTypeWidth) && ToTypeWidth >= 8 && ToTypeWidth <= 128 &&
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 21c8c0f2e5943..1e3c3eb722edb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5323,9 +5323,20 @@ convertVectorLoad(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI,
return {{NewLD, LoadChain}};
}
+static MachineMemOperand *
+getMachineMemOperandForType(const SelectionDAG &DAG,
+ const MachineMemOperand *MMO,
+ const MachinePointerInfo &PointerInfo, MVT VT) {
+ return DAG.getMachineFunction().getMachineMemOperand(MMO, PointerInfo,
+ LLT(VT));
+}
+
static SDValue PerformLoadCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const NVPTXSubtarget &STI) {
+ if (DCI.DAG.getOptLevel() == CodeGenOptLevel::None)
+ return {};
+
auto *MemN = cast<MemSDNode>(N);
// only operate on vectors of f32s / i64s
if (EVT MemVT = MemN->getMemoryVT();
@@ -5406,9 +5417,13 @@ static SDValue PerformLoadCombine(SDNode *N,
// Do we have to tweak the opcode for an NVPTXISD::Load* or do we have to
// rewrite an ISD::LOAD?
std::optional<NVPTXISD::NodeType> NewOpcode;
+
+ // LoadV's are handled slightly different in ISelDAGToDAG.
+ bool IsLoadV = false;
switch (N->getOpcode()) {
case NVPTXISD::LoadV2:
NewOpcode = NVPTXISD::LoadV4;
+ IsLoadV = true;
break;
case NVPTXISD::LoadParam:
NewOpcode = NVPTXISD::LoadParamV2;
@@ -5449,9 +5464,22 @@ static SDValue PerformLoadCombine(SDNode *N,
}
}
+ MVT LoadVT = MVT::f32;
+ MachineMemOperand *MMO = MemN->getMemOperand();
+
+ if (IsLoadV) {
+ // Some loads must have an operand type that matches the number of results
+ // and the type of each result. Because we changed a vNi64 to v(N*2)f32 we
+ // have to update it here. Note that LoadParam is not handled the same way
+ // in NVPXISelDAGToDAG so we only do this for LoadV*.
+ LoadVT = MVT::getVectorVT(MVT::f32, NumElts);
+ MMO = getMachineMemOperandForType(DCI.DAG, MMO, MemN->getPointerInfo(),
+ LoadVT);
+ }
+
NewLoad = DCI.DAG.getMemIntrinsicNode(
*NewOpcode, SDLoc(N), DCI.DAG.getVTList(VTs),
- SmallVector<SDValue>(N->ops()), MVT::f32, MemN->getMemOperand());
+ SmallVector<SDValue>(N->ops()), LoadVT, MMO);
NewChain = NewLoad.getValue(*NewChainIdx);
if (NewGlueIdx)
NewGlue = NewLoad.getValue(*NewGlueIdx);
@@ -5550,6 +5578,9 @@ static SDValue PerformStoreCombineHelper(SDNode *N,
// as the previous value will become unused and eliminated later.
return N->getOperand(0);
+ if (DCI.DAG.getOptLevel() == CodeGenOptLevel::None)
+ return {};
+
auto *MemN = cast<MemSDNode>(N);
if (MemN->getMemoryVT() == MVT::v2f32) {
// try to fold, and expand:
@@ -5581,6 +5612,7 @@ static SDValue PerformStoreCombineHelper(SDNode *N,
if (NewOpcode) {
// copy chain, offset from existing store
SmallVector<SDValue> NewOps = {N->getOperand(0), N->getOperand(1)};
+ unsigned NumElts = 0;
// gather all operands to expand
for (unsigned I = 2, E = N->getNumOperands(); I < E; ++I) {
SDValue CurrentOp = N->getOperand(I);
@@ -5588,6 +5620,7 @@ static SDValue PerformStoreCombineHelper(SDNode *N,
assert(CurrentOp.getValueType() == MVT::v2f32);
NewOps.push_back(CurrentOp.getOperand(0));
NewOps.push_back(CurrentOp.getOperand(1));
+ NumElts += 2;
} else {
NewOps.clear();
break;
diff --git a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
index ece55a8fb44f8..d6569cccd74b9 100644
--- a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
@@ -614,7 +614,7 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
; CHECK-F16: {
; CHECK-F16-NEXT: .reg .pred %p<3>;
; CHECK-F16-NEXT: .reg .b32 %r<9>;
-; CHECK-F16-NEXT: .reg .b64 %rd<3>;
+; CHECK-F16-NEXT: .reg .b64 %rd<4>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.b32 %r2, [test_select_cc_f32_f16_param_3];
@@ -626,7 +626,8 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
; CHECK-F16-NEXT: mov.b64 {%r5, %r6}, %rd1;
; CHECK-F16-NEXT: selp.f32 %r7, %r6, %r4, %p2;
; CHECK-F16-NEXT: selp.f32 %r8, %r5, %r3, %p1;
-; CHECK-F16-NEXT: st.param.v2.b32 [func_retval0], {%r8, %r7};
+; CHECK-F16-NEXT: mov.b64 %rd3, {%r8, %r7};
+; CHECK-F16-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-F16-NEXT: ret;
;
; CHECK-NOF16-LABEL: test_select_cc_f32_f16(
@@ -634,7 +635,7 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<5>;
; CHECK-NOF16-NEXT: .reg .b32 %r<13>;
-; CHECK-NOF16-NEXT: .reg .b64 %rd<3>;
+; CHECK-NOF16-NEXT: .reg .b64 %rd<4>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_select_cc_f32_f16_param_3];
@@ -653,7 +654,8 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
; CHECK-NOF16-NEXT: mov.b64 {%r9, %r10}, %rd1;
; CHECK-NOF16-NEXT: selp.f32 %r11, %r10, %r8, %p2;
; CHECK-NOF16-NEXT: selp.f32 %r12, %r9, %r7, %p1;
-; CHECK-NOF16-NEXT: st.param.v2.b32 [func_retval0], {%r12, %r11};
+; CHECK-NOF16-NEXT: mov.b64 %rd3, {%r12, %r11};
+; CHECK-NOF16-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NOF16-NEXT: ret;
<2 x half> %c, <2 x half> %d) #0 {
%cc = fcmp une <2 x half> %c, %d
@@ -1563,13 +1565,15 @@ define <2 x float> @test_fpext_2xfloat(<2 x half> %a) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_fpext_2xfloat_param_0];
; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1;
; CHECK-NEXT: cvt.f32.f16 %r2, %rs2;
; CHECK-NEXT: cvt.f32.f16 %r3, %rs1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r3, %r2};
+; CHECK-NEXT: mov.b64 %rd1, {%r3, %r2};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
; CHECK-NEXT: ret;
%r = fpext <2 x half> %a to <2 x float>
ret <2 x float> %r
@@ -2054,6 +2058,7 @@ define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-F16: {
; CHECK-F16-NEXT: .reg .b16 %rs<3>;
; CHECK-F16-NEXT: .reg .b32 %r<8>;
+; CHECK-F16-NEXT: .reg .b64 %rd<2>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.b32 %r2, [test_copysign_extended_param_1];
@@ -2064,13 +2069,15 @@ define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-F16-NEXT: mov.b32 {%rs1, %rs2}, %r5;
; CHECK-F16-NEXT: cvt.f32.f16 %r6, %rs2;
; CHECK-F16-NEXT: cvt.f32.f16 %r7, %rs1;
-; CHECK-F16-NEXT: st.param.v2.b32 [func_retval0], {%r7, %r6};
+; CHECK-F16-NEXT: mov.b64 %rd1, {%r7, %r6};
+; CHECK-F16-NEXT: st.param.b64 [func_retval0], %rd1;
; CHECK-F16-NEXT: ret;
;
; CHECK-NOF16-LABEL: test_copysign_extended(
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<11>;
; CHECK-NOF16-NEXT: .reg .b32 %r<5>;
+; CHECK-NOF16-NEXT: .reg .b64 %rd<2>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_copysign_extended_param_1];
@@ -2085,7 +2092,8 @@ define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: or.b16 %rs10, %rs9, %rs8;
; CHECK-NOF16-NEXT: cvt.f32.f16 %r3, %rs10;
; CHECK-NOF16-NEXT: cvt.f32.f16 %r4, %rs7;
-; CHECK-NOF16-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOF16-NEXT: mov.b64 %rd1, {%r4, %r3};
+; CHECK-NOF16-NEXT: st.param.b64 [func_retval0], %rd1;
; CHECK-NOF16-NEXT: ret;
%r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b)
%xr = fpext <2 x half> %r to <2 x float>
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
index a3823d07c76ef..defad1b483c6b 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -16,11 +16,13 @@ define <2 x float> @test_ret_const() #0 {
; CHECK-LABEL: test_ret_const(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b32 %r1, 0f40000000;
; CHECK-NEXT: mov.b32 %r2, 0f3F800000;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT: mov.b64 %rd1, {%r2, %r1};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
; CHECK-NEXT: ret;
ret <2 x float> <float 1.0, float 2.0>
}
@@ -241,7 +243,7 @@ define <2 x float> @test_fdiv(<2 x float> %a, <2 x float> %b) #0 {
; CHECK-LABEL: test_fdiv(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<7>;
-; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fdiv_param_1];
@@ -250,7 +252,8 @@ define <2 x float> @test_fdiv(<2 x float> %a, <2 x float> %b) #0 {
; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
; CHECK-NEXT: div.rn.f32 %r5, %r4, %r2;
; CHECK-NEXT: div.rn.f32 %r6, %r3, %r1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NEXT: mov.b64 %rd3, {%r6, %r5};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
%r = fdiv <2 x float> %a, %b
ret <2 x float> %r
@@ -261,7 +264,7 @@ define <2 x float> @test_frem(<2 x float> %a, <2 x float> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b32 %r<15>;
-; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_frem_param_1];
@@ -280,7 +283,8 @@ define <2 x float> @test_frem(<2 x float> %a, <2 x float> %b) #0 {
; CHECK-NEXT: fma.rn.f32 %r13, %r12, %r1, %r3;
; CHECK-NEXT: testp.infinite.f32 %p2, %r1;
; CHECK-NEXT: selp.f32 %r14, %r3, %r13, %p2;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r14, %r9};
+; CHECK-NEXT: mov.b64 %rd3, {%r14, %r9};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
%r = frem <2 x float> %a, %b
ret <2 x float> %r
@@ -464,7 +468,7 @@ define <2 x float> @test_fdiv_ftz(<2 x float> %a, <2 x float> %b) #2 {
; CHECK-LABEL: test_fdiv_ftz(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<7>;
-; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_fdiv_ftz_param_1];
@@ -473,7 +477,8 @@ define <2 x float> @test_fdiv_ftz(<2 x float> %a, <2 x float> %b) #2 {
; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
; CHECK-NEXT: div.rn.ftz.f32 %r5, %r4, %r2;
; CHECK-NEXT: div.rn.ftz.f32 %r6, %r3, %r1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NEXT: mov.b64 %rd3, {%r6, %r5};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
%r = fdiv <2 x float> %a, %b
ret <2 x float> %r
@@ -484,7 +489,7 @@ define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b32 %r<15>;
-; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd2, [test_frem_ftz_param_1];
@@ -503,7 +508,8 @@ define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 {
; CHECK-NEXT: fma.rn.ftz.f32 %r13, %r12, %r1, %r3;
; CHECK-NEXT: testp.infinite.f32 %p2, %r1;
; CHECK-NEXT: selp.f32 %r14, %r3, %r13, %p2;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r14, %r9};
+; CHECK-NEXT: mov.b64 %rd3, {%r14, %r9};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
%r = frem <2 x float> %a, %b
ret <2 x float> %r
@@ -691,7 +697,7 @@ define <2 x float> @test_select_cc(<2 x float> %a, <2 x float> %b, <2 x float> %
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b32 %r<11>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-NEXT: .reg .b64 %rd<6>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd4, [test_select_cc_param_3];
@@ -706,7 +712,8 @@ define <2 x float> @test_select_cc(<2 x float> %a, <2 x float> %b, <2 x float> %
; CHECK-NEXT: mov.b64 {%r7, %r8}, %rd1;
; CHECK-NEXT: selp.f32 %r9, %r8, %r6, %p2;
; CHECK-NEXT: selp.f32 %r10, %r7, %r5, %p1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r10, %r9};
+; CHECK-NEXT: mov.b64 %rd5, {%r10, %r9};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd5;
; CHECK-NEXT: ret;
%cc = fcmp une <2 x float> %c, %d
%r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
@@ -743,7 +750,7 @@ define <2 x float> @test_select_cc_f32_f64(<2 x float> %a, <2 x float> %b, <2 x
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b32 %r<7>;
-; CHECK-NEXT: .reg .b64 %rd<7>;
+; CHECK-NEXT: .reg .b64 %rd<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.b64 {%rd5, %rd6}, [test_select_cc_f32_f64_param_3];
@@ -756,7 +763,8 @@ define <2 x float> @test_select_cc_f32_f64(<2 x float> %a, <2 x float> %b, <2 x
; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd1;
; CHECK-NEXT: selp.f32 %r5, %r4, %r2, %p2;
; CHECK-NEXT: selp.f32 %r6, %r3, %r1, %p1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NEXT: mov.b64 %rd7, {%r6, %r5};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd7;
; CHECK-NEXT: ret;
%cc = fcmp une <2 x double> %c, %d
%r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
@@ -1171,12 +1179,14 @@ define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 {
; CHECK-LABEL: test_uitofp_2xi32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
; CHECK-NEXT: cvt.rn.f32.u32 %r3, %r2;
; CHECK-NEXT: cvt.rn.f32.u32 %r4, %r1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT: mov.b64 %rd1, {%r4, %r3};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
; CHECK-NEXT: ret;
%r = uitofp <2 x i32> %a to <2 x float>
ret <2 x float> %r
@@ -1186,13 +1196,14 @@ define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 {
; CHECK-LABEL: test_uitofp_2xi64(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_uitofp_2xi64_param_0];
; CHECK-NEXT: cvt.rn.f32.u64 %r1, %rd2;
; CHECK-NEXT: cvt.rn.f32.u64 %r2, %rd1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT: mov.b64 %rd3, {%r2, %r1};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
%r = uitofp <2 x i64> %a to <2 x float>
ret <2 x float> %r
@@ -1202,12 +1213,14 @@ define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 {
; CHECK-LABEL: test_sitofp_2xi32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
; CHECK-NEXT: cvt.rn.f32.s32 %r3, %r2;
; CHECK-NEXT: cvt.rn.f32.s32 %r4, %r1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT: mov.b64 %rd1, {%r4, %r3};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
; CHECK-NEXT: ret;
%r = sitofp <2 x i32> %a to <2 x float>
ret <2 x float> %r
@@ -1217,13 +1230,14 @@ define <2 x float> @test_sitofp_2xi64(<2 x i64> %a) #0 {
; CHECK-LABEL: test_sitofp_2xi64(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_sitofp_2xi64_param_0];
; CHECK-NEXT: cvt.rn.f32.s64 %r1, %rd2;
; CHECK-NEXT: cvt.rn.f32.s64 %r2, %rd1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT: mov.b64 %rd3, {%r2, %r1};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
%r = sitofp <2 x i64> %a to <2 x float>
ret <2 x float> %r
@@ -1253,13 +1267,14 @@ define <2 x float> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
; CHECK-LABEL: test_fptrunc_2xdouble(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fptrunc_2xdouble_param_0];
; CHECK-NEXT: cvt.rn.f32.f64 %r1, %rd2;
; CHECK-NEXT: cvt.rn.f32.f64 %r2, %rd1;
-; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT: mov.b64 %rd3, {%r2, %r1};
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd3;
; CHECK-NEXT: ret;
%r = fptrunc <2 x double> %a to <2 x float>
ret <2 x float> %r
>From 40a2396ba001cb4fc18c7d7f200206d116f6bd85 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Mon, 5 May 2025 17:12:38 -0700
Subject: [PATCH 31/32] [NVPTX] Expand VSELECT on v2f32 and other types
VSELECT instructions are always expanded because the predicate type they
use (vNi1) is unsupported by our backend. However, TLI doesn't check the
predicate type when determining whether to rewrite certain operations
(ex: FMAX/FMIN) as a VSELECT, only the value type. So in the case of
FMAX/FMIN on v2f32, the value type is now legal and therefore TLI thinks
it can rewrite it as VSELECT. Eventually this is scalarized into setp,
which is not what we want.
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 1 +
.../CodeGen/NVPTX/reduction-intrinsics.ll | 446 ++++++++++++------
2 files changed, 302 insertions(+), 145 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 1e3c3eb722edb..dc2e9e7cd2cf4 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -683,6 +683,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
MVT::v4i8, MVT::i32, MVT::i64}) {
+ setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);
setOperationAction(ISD::BR_CC, VT, Expand);
}
diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
index d5b451dad7bc3..f7ea3e82cdde6 100644
--- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
+++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
@@ -116,18 +116,23 @@ define float @reduce_fadd_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fadd_float(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<17>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fadd_float_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_param_0];
-; CHECK-NEXT: add.rn.f32 %r9, %r1, 0f00000000;
-; CHECK-NEXT: add.rn.f32 %r10, %r9, %r2;
-; CHECK-NEXT: add.rn.f32 %r11, %r10, %r3;
-; CHECK-NEXT: add.rn.f32 %r12, %r11, %r4;
-; CHECK-NEXT: add.rn.f32 %r13, %r12, %r5;
-; CHECK-NEXT: add.rn.f32 %r14, %r13, %r6;
-; CHECK-NEXT: add.rn.f32 %r15, %r14, %r7;
-; CHECK-NEXT: add.rn.f32 %r16, %r15, %r8;
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fadd_float_param_0+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fadd_float_param_0];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-NEXT: add.rn.f32 %r3, %r1, 0f00000000;
+; CHECK-NEXT: add.rn.f32 %r4, %r3, %r2;
+; CHECK-NEXT: mov.b64 {%r5, %r6}, %rd2;
+; CHECK-NEXT: add.rn.f32 %r7, %r4, %r5;
+; CHECK-NEXT: add.rn.f32 %r8, %r7, %r6;
+; CHECK-NEXT: mov.b64 {%r9, %r10}, %rd3;
+; CHECK-NEXT: add.rn.f32 %r11, %r8, %r9;
+; CHECK-NEXT: add.rn.f32 %r12, %r11, %r10;
+; CHECK-NEXT: mov.b64 {%r13, %r14}, %rd4;
+; CHECK-NEXT: add.rn.f32 %r15, %r12, %r13;
+; CHECK-NEXT: add.rn.f32 %r16, %r15, %r14;
; CHECK-NEXT: st.param.b32 [func_retval0], %r16;
; CHECK-NEXT: ret;
%res = call float @llvm.vector.reduce.fadd(float 0.0, <8 x float> %in)
@@ -135,45 +140,95 @@ define float @reduce_fadd_float(<8 x float> %in) {
}
define float @reduce_fadd_float_reassoc(<8 x float> %in) {
-; CHECK-LABEL: reduce_fadd_float_reassoc(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<17>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fadd_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_reassoc_param_0];
-; CHECK-NEXT: add.rn.f32 %r9, %r3, %r7;
-; CHECK-NEXT: add.rn.f32 %r10, %r1, %r5;
-; CHECK-NEXT: add.rn.f32 %r11, %r4, %r8;
-; CHECK-NEXT: add.rn.f32 %r12, %r2, %r6;
-; CHECK-NEXT: add.rn.f32 %r13, %r12, %r11;
-; CHECK-NEXT: add.rn.f32 %r14, %r10, %r9;
-; CHECK-NEXT: add.rn.f32 %r15, %r14, %r13;
-; CHECK-NEXT: add.rn.f32 %r16, %r15, 0f00000000;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r16;
-; CHECK-NEXT: ret;
+; CHECK-SM80-LABEL: reduce_fadd_float_reassoc(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<17>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fadd_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fadd_float_reassoc_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: add.rn.f32 %r5, %r3, %r1;
+; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1;
+; CHECK-SM80-NEXT: add.rn.f32 %r10, %r8, %r6;
+; CHECK-SM80-NEXT: add.rn.f32 %r11, %r4, %r2;
+; CHECK-SM80-NEXT: add.rn.f32 %r12, %r9, %r7;
+; CHECK-SM80-NEXT: add.rn.f32 %r13, %r12, %r11;
+; CHECK-SM80-NEXT: add.rn.f32 %r14, %r10, %r5;
+; CHECK-SM80-NEXT: add.rn.f32 %r15, %r14, %r13;
+; CHECK-SM80-NEXT: add.rn.f32 %r16, %r15, 0f00000000;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r16;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fadd_float_reassoc(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<5>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<10>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fadd_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fadd_float_reassoc_param_0];
+; CHECK-SM100-NEXT: add.rn.f32x2 %rd5, %rd2, %rd4;
+; CHECK-SM100-NEXT: add.rn.f32x2 %rd6, %rd1, %rd3;
+; CHECK-SM100-NEXT: add.rn.f32x2 %rd7, %rd6, %rd5;
+; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7;
+; CHECK-SM100-NEXT: // implicit-def: %r2
+; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2};
+; CHECK-SM100-NEXT: add.rn.f32x2 %rd9, %rd7, %rd8;
+; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9;
+; CHECK-SM100-NEXT: add.rn.f32 %r4, %r3, 0f00000000;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fadd(float 0.0, <8 x float> %in)
ret float %res
}
define float @reduce_fadd_float_reassoc_nonpow2(<7 x float> %in) {
-; CHECK-LABEL: reduce_fadd_float_reassoc_nonpow2(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<15>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r7, [reduce_fadd_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fadd_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT: add.rn.f32 %r8, %r3, %r7;
-; CHECK-NEXT: add.rn.f32 %r9, %r1, %r5;
-; CHECK-NEXT: add.rn.f32 %r10, %r9, %r8;
-; CHECK-NEXT: add.rn.f32 %r11, %r2, %r6;
-; CHECK-NEXT: add.rn.f32 %r12, %r11, %r4;
-; CHECK-NEXT: add.rn.f32 %r13, %r10, %r12;
-; CHECK-NEXT: add.rn.f32 %r14, %r13, 0f00000000;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r14;
-; CHECK-NEXT: ret;
+; CHECK-SM80-LABEL: reduce_fadd_float_reassoc_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<15>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<2>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b64 %rd1, [reduce_fadd_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd1;
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fadd_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT: add.rn.f32 %r8, %r3, %r7;
+; CHECK-SM80-NEXT: add.rn.f32 %r9, %r1, %r5;
+; CHECK-SM80-NEXT: add.rn.f32 %r10, %r9, %r8;
+; CHECK-SM80-NEXT: add.rn.f32 %r11, %r2, %r6;
+; CHECK-SM80-NEXT: add.rn.f32 %r12, %r11, %r4;
+; CHECK-SM80-NEXT: add.rn.f32 %r13, %r10, %r12;
+; CHECK-SM80-NEXT: add.rn.f32 %r14, %r13, 0f00000000;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r14;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fadd_float_reassoc_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<8>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b64 %rd1, [reduce_fadd_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd1;
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT: mov.b64 %rd2, {%r1, %r2};
+; CHECK-SM100-NEXT: mov.b64 %rd3, {%r3, %r4};
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fadd_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: mov.b32 %r8, 0f80000000;
+; CHECK-SM100-NEXT: mov.b64 %rd4, {%r7, %r8};
+; CHECK-SM100-NEXT: add.rn.f32x2 %rd5, %rd3, %rd4;
+; CHECK-SM100-NEXT: add.rn.f32x2 %rd6, %rd2, %rd1;
+; CHECK-SM100-NEXT: add.rn.f32x2 %rd7, %rd6, %rd5;
+; CHECK-SM100-NEXT: mov.b64 {%r9, %r10}, %rd7;
+; CHECK-SM100-NEXT: add.rn.f32 %r11, %r9, %r10;
+; CHECK-SM100-NEXT: add.rn.f32 %r12, %r11, 0f00000000;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fadd(float 0.0, <7 x float> %in)
ret float %res
}
@@ -275,17 +330,22 @@ define float @reduce_fmul_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fmul_float(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<16>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmul_float_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_param_0];
-; CHECK-NEXT: mul.rn.f32 %r9, %r1, %r2;
-; CHECK-NEXT: mul.rn.f32 %r10, %r9, %r3;
-; CHECK-NEXT: mul.rn.f32 %r11, %r10, %r4;
-; CHECK-NEXT: mul.rn.f32 %r12, %r11, %r5;
-; CHECK-NEXT: mul.rn.f32 %r13, %r12, %r6;
-; CHECK-NEXT: mul.rn.f32 %r14, %r13, %r7;
-; CHECK-NEXT: mul.rn.f32 %r15, %r14, %r8;
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmul_float_param_0+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmul_float_param_0];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-NEXT: mul.rn.f32 %r3, %r1, %r2;
+; CHECK-NEXT: mov.b64 {%r4, %r5}, %rd2;
+; CHECK-NEXT: mul.rn.f32 %r6, %r3, %r4;
+; CHECK-NEXT: mul.rn.f32 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd3;
+; CHECK-NEXT: mul.rn.f32 %r10, %r7, %r8;
+; CHECK-NEXT: mul.rn.f32 %r11, %r10, %r9;
+; CHECK-NEXT: mov.b64 {%r12, %r13}, %rd4;
+; CHECK-NEXT: mul.rn.f32 %r14, %r11, %r12;
+; CHECK-NEXT: mul.rn.f32 %r15, %r14, %r13;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call float @llvm.vector.reduce.fmul(float 1.0, <8 x float> %in)
@@ -293,43 +353,91 @@ define float @reduce_fmul_float(<8 x float> %in) {
}
define float @reduce_fmul_float_reassoc(<8 x float> %in) {
-; CHECK-LABEL: reduce_fmul_float_reassoc(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmul_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_reassoc_param_0];
-; CHECK-NEXT: mul.rn.f32 %r9, %r3, %r7;
-; CHECK-NEXT: mul.rn.f32 %r10, %r1, %r5;
-; CHECK-NEXT: mul.rn.f32 %r11, %r4, %r8;
-; CHECK-NEXT: mul.rn.f32 %r12, %r2, %r6;
-; CHECK-NEXT: mul.rn.f32 %r13, %r12, %r11;
-; CHECK-NEXT: mul.rn.f32 %r14, %r10, %r9;
-; CHECK-NEXT: mul.rn.f32 %r15, %r14, %r13;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT: ret;
+; CHECK-SM80-LABEL: reduce_fmul_float_reassoc(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmul_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmul_float_reassoc_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: mul.rn.f32 %r5, %r3, %r1;
+; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1;
+; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r8, %r6;
+; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r4, %r2;
+; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r9, %r7;
+; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r12, %r11;
+; CHECK-SM80-NEXT: mul.rn.f32 %r14, %r10, %r5;
+; CHECK-SM80-NEXT: mul.rn.f32 %r15, %r14, %r13;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmul_float_reassoc(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<4>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<10>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmul_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmul_float_reassoc_param_0];
+; CHECK-SM100-NEXT: mul.rn.f32x2 %rd5, %rd2, %rd4;
+; CHECK-SM100-NEXT: mul.rn.f32x2 %rd6, %rd1, %rd3;
+; CHECK-SM100-NEXT: mul.rn.f32x2 %rd7, %rd6, %rd5;
+; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7;
+; CHECK-SM100-NEXT: // implicit-def: %r2
+; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2};
+; CHECK-SM100-NEXT: mul.rn.f32x2 %rd9, %rd7, %rd8;
+; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r3;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmul(float 1.0, <8 x float> %in)
ret float %res
}
define float @reduce_fmul_float_reassoc_nonpow2(<7 x float> %in) {
-; CHECK-LABEL: reduce_fmul_float_reassoc_nonpow2(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<14>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmul_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmul_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT: mul.rn.f32 %r8, %r3, %r7;
-; CHECK-NEXT: mul.rn.f32 %r9, %r1, %r5;
-; CHECK-NEXT: mul.rn.f32 %r10, %r9, %r8;
-; CHECK-NEXT: mul.rn.f32 %r11, %r2, %r6;
-; CHECK-NEXT: mul.rn.f32 %r12, %r11, %r4;
-; CHECK-NEXT: mul.rn.f32 %r13, %r10, %r12;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r13;
-; CHECK-NEXT: ret;
+; CHECK-SM80-LABEL: reduce_fmul_float_reassoc_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<2>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b64 %rd1, [reduce_fmul_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd1;
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmul_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT: mul.rn.f32 %r8, %r3, %r7;
+; CHECK-SM80-NEXT: mul.rn.f32 %r9, %r1, %r5;
+; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r9, %r8;
+; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r2, %r6;
+; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r11, %r4;
+; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r10, %r12;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmul_float_reassoc_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<12>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<8>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b64 %rd1, [reduce_fmul_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd1;
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT: mov.b64 %rd2, {%r1, %r2};
+; CHECK-SM100-NEXT: mov.b64 %rd3, {%r3, %r4};
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmul_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: mov.b32 %r8, 0f3F800000;
+; CHECK-SM100-NEXT: mov.b64 %rd4, {%r7, %r8};
+; CHECK-SM100-NEXT: mul.rn.f32x2 %rd5, %rd3, %rd4;
+; CHECK-SM100-NEXT: mul.rn.f32x2 %rd6, %rd2, %rd1;
+; CHECK-SM100-NEXT: mul.rn.f32x2 %rd7, %rd6, %rd5;
+; CHECK-SM100-NEXT: mov.b64 {%r9, %r10}, %rd7;
+; CHECK-SM100-NEXT: mul.rn.f32 %r11, %r9, %r10;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r11;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmul(float 1.0, <7 x float> %in)
ret float %res
}
@@ -405,15 +513,20 @@ define float @reduce_fmax_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fmax_float(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<16>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmax_float_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_param_0];
-; CHECK-NEXT: max.f32 %r9, %r4, %r8;
-; CHECK-NEXT: max.f32 %r10, %r2, %r6;
-; CHECK-NEXT: max.f32 %r11, %r10, %r9;
-; CHECK-NEXT: max.f32 %r12, %r3, %r7;
-; CHECK-NEXT: max.f32 %r13, %r1, %r5;
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-NEXT: max.f32 %r5, %r4, %r2;
+; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
+; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
+; CHECK-NEXT: max.f32 %r10, %r9, %r7;
+; CHECK-NEXT: max.f32 %r11, %r10, %r5;
+; CHECK-NEXT: max.f32 %r12, %r3, %r1;
+; CHECK-NEXT: max.f32 %r13, %r8, %r6;
; CHECK-NEXT: max.f32 %r14, %r13, %r12;
; CHECK-NEXT: max.f32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
@@ -427,15 +540,20 @@ define float @reduce_fmax_float_reassoc(<8 x float> %in) {
; CHECK-LABEL: reduce_fmax_float_reassoc(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<16>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmax_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_param_0];
-; CHECK-NEXT: max.f32 %r9, %r4, %r8;
-; CHECK-NEXT: max.f32 %r10, %r2, %r6;
-; CHECK-NEXT: max.f32 %r11, %r10, %r9;
-; CHECK-NEXT: max.f32 %r12, %r3, %r7;
-; CHECK-NEXT: max.f32 %r13, %r1, %r5;
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-NEXT: max.f32 %r5, %r4, %r2;
+; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
+; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
+; CHECK-NEXT: max.f32 %r10, %r9, %r7;
+; CHECK-NEXT: max.f32 %r11, %r10, %r5;
+; CHECK-NEXT: max.f32 %r12, %r3, %r1;
+; CHECK-NEXT: max.f32 %r13, %r8, %r6;
; CHECK-NEXT: max.f32 %r14, %r13, %r12;
; CHECK-NEXT: max.f32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
@@ -449,10 +567,12 @@ define float @reduce_fmax_float_reassoc_nonpow2(<7 x float> %in) {
; CHECK-LABEL: reduce_fmax_float_reassoc_nonpow2(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<14>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [reduce_fmax_float_reassoc_nonpow2_param_0+16];
+; CHECK-NEXT: mov.b64 {%r5, %r6}, %rd1;
; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0];
; CHECK-NEXT: max.f32 %r8, %r3, %r7;
; CHECK-NEXT: max.f32 %r9, %r1, %r5;
@@ -537,15 +657,20 @@ define float @reduce_fmin_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fmin_float(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<16>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmin_float_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_param_0];
-; CHECK-NEXT: min.f32 %r9, %r4, %r8;
-; CHECK-NEXT: min.f32 %r10, %r2, %r6;
-; CHECK-NEXT: min.f32 %r11, %r10, %r9;
-; CHECK-NEXT: min.f32 %r12, %r3, %r7;
-; CHECK-NEXT: min.f32 %r13, %r1, %r5;
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-NEXT: min.f32 %r5, %r4, %r2;
+; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
+; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
+; CHECK-NEXT: min.f32 %r10, %r9, %r7;
+; CHECK-NEXT: min.f32 %r11, %r10, %r5;
+; CHECK-NEXT: min.f32 %r12, %r3, %r1;
+; CHECK-NEXT: min.f32 %r13, %r8, %r6;
; CHECK-NEXT: min.f32 %r14, %r13, %r12;
; CHECK-NEXT: min.f32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
@@ -559,15 +684,20 @@ define float @reduce_fmin_float_reassoc(<8 x float> %in) {
; CHECK-LABEL: reduce_fmin_float_reassoc(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<16>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmin_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_param_0];
-; CHECK-NEXT: min.f32 %r9, %r4, %r8;
-; CHECK-NEXT: min.f32 %r10, %r2, %r6;
-; CHECK-NEXT: min.f32 %r11, %r10, %r9;
-; CHECK-NEXT: min.f32 %r12, %r3, %r7;
-; CHECK-NEXT: min.f32 %r13, %r1, %r5;
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-NEXT: min.f32 %r5, %r4, %r2;
+; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
+; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
+; CHECK-NEXT: min.f32 %r10, %r9, %r7;
+; CHECK-NEXT: min.f32 %r11, %r10, %r5;
+; CHECK-NEXT: min.f32 %r12, %r3, %r1;
+; CHECK-NEXT: min.f32 %r13, %r8, %r6;
; CHECK-NEXT: min.f32 %r14, %r13, %r12;
; CHECK-NEXT: min.f32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
@@ -581,10 +711,12 @@ define float @reduce_fmin_float_reassoc_nonpow2(<7 x float> %in) {
; CHECK-LABEL: reduce_fmin_float_reassoc_nonpow2(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<14>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [reduce_fmin_float_reassoc_nonpow2_param_0+16];
+; CHECK-NEXT: mov.b64 {%r5, %r6}, %rd1;
; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0];
; CHECK-NEXT: min.f32 %r8, %r3, %r7;
; CHECK-NEXT: min.f32 %r9, %r1, %r5;
@@ -669,15 +801,20 @@ define float @reduce_fmaximum_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fmaximum_float(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<16>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmaximum_float_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_param_0];
-; CHECK-NEXT: max.NaN.f32 %r9, %r4, %r8;
-; CHECK-NEXT: max.NaN.f32 %r10, %r2, %r6;
-; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r9;
-; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r7;
-; CHECK-NEXT: max.NaN.f32 %r13, %r1, %r5;
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2;
+; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
+; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
+; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7;
+; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5;
+; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1;
+; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6;
; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12;
; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
@@ -691,15 +828,20 @@ define float @reduce_fmaximum_float_reassoc(<8 x float> %in) {
; CHECK-LABEL: reduce_fmaximum_float_reassoc(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<16>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmaximum_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_param_0];
-; CHECK-NEXT: max.NaN.f32 %r9, %r4, %r8;
-; CHECK-NEXT: max.NaN.f32 %r10, %r2, %r6;
-; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r9;
-; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r7;
-; CHECK-NEXT: max.NaN.f32 %r13, %r1, %r5;
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2;
+; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
+; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
+; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7;
+; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5;
+; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1;
+; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6;
; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12;
; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
@@ -713,10 +855,12 @@ define float @reduce_fmaximum_float_reassoc_nonpow2(<7 x float> %in) {
; CHECK-LABEL: reduce_fmaximum_float_reassoc_nonpow2(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<14>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16];
+; CHECK-NEXT: mov.b64 {%r5, %r6}, %rd1;
; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0];
; CHECK-NEXT: max.NaN.f32 %r8, %r3, %r7;
; CHECK-NEXT: max.NaN.f32 %r9, %r1, %r5;
@@ -801,15 +945,20 @@ define float @reduce_fminimum_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fminimum_float(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<16>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fminimum_float_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_param_0];
-; CHECK-NEXT: min.NaN.f32 %r9, %r4, %r8;
-; CHECK-NEXT: min.NaN.f32 %r10, %r2, %r6;
-; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r9;
-; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r7;
-; CHECK-NEXT: min.NaN.f32 %r13, %r1, %r5;
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2;
+; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
+; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
+; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7;
+; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5;
+; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1;
+; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6;
; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12;
; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
@@ -823,15 +972,20 @@ define float @reduce_fminimum_float_reassoc(<8 x float> %in) {
; CHECK-LABEL: reduce_fminimum_float_reassoc(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<16>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fminimum_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_param_0];
-; CHECK-NEXT: min.NaN.f32 %r9, %r4, %r8;
-; CHECK-NEXT: min.NaN.f32 %r10, %r2, %r6;
-; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r9;
-; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r7;
-; CHECK-NEXT: min.NaN.f32 %r13, %r1, %r5;
+; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0];
+; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2;
+; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
+; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
+; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7;
+; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5;
+; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1;
+; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6;
; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12;
; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
@@ -845,10 +999,12 @@ define float @reduce_fminimum_float_reassoc_nonpow2(<7 x float> %in) {
; CHECK-LABEL: reduce_fminimum_float_reassoc_nonpow2(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<14>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [reduce_fminimum_float_reassoc_nonpow2_param_0+16];
+; CHECK-NEXT: mov.b64 {%r5, %r6}, %rd1;
; CHECK-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0];
; CHECK-NEXT: min.NaN.f32 %r8, %r3, %r7;
; CHECK-NEXT: min.NaN.f32 %r9, %r1, %r5;
>From 2ff6c05af191cc764d98b661e70f68441c889a88 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Mon, 12 May 2025 20:29:35 -0700
Subject: [PATCH 32/32] [NVPTX] add more comments to PerformLoadCombine
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 136 +++++++++++++++-----
1 file changed, 104 insertions(+), 32 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index dc2e9e7cd2cf4..e01d0df43b40e 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5332,6 +5332,55 @@ getMachineMemOperandForType(const SelectionDAG &DAG,
LLT(VT));
}
+// These are Combiner rules for expanding v2f32 load results when they are
+// really being used as their individual f32 components. Now that v2f32 is a
+// legal type for a register, LowerFormalArguments() and ReplaceLoadVector()
+// will pack two f32s into a single 64-bit register, leading to ld.b64 instead
+// of ld.v2.f32 or ld.v2.b64 instead of ld.v4.f32. Sometimes this is ideal if
+// the results stay packed because they're passed to another instruction that
+// supports packed f32s (e.g. fmul.f32x2) or (rarely) if v2f32 really is being
+// reinterpreted as an i64, and then stored.
+//
+// Otherwise, SelectionDAG will unpack the results with a sequence of bitcasts,
+// extensions, and extracts if they go through any other kind of instruction.
+// This is not ideal, so we undo these patterns and rewrite the load to output
+// twice as many registers: two f32s for every one i64. This preserves PTX
+// codegen for programs that don't use packed f32s.
+//
+// Also, LowerFormalArguments() and ReplaceLoadVector() happen too early for us
+// to know whether the def-use chain for a particular load will eventually
+// include instructions supporting packed f32s. That is why we prefer to resolve
+// this problem within DAG Combiner.
+//
+// This rule proceeds in three general steps:
+//
+// 1. Identify the pattern, by traversing the def-use chain.
+// 2. Rewrite the load, by splitting each 64-bit result into two f32 registers.
+// 3. Rewrite all uses of the load, including chain and glue uses.
+//
+// This has the effect of combining multiple instructions into a single load.
+// For example:
+//
+// (before, ex1)
+// v: v2f32 = LoadParam [p]
+// f1: f32 = extractelt v, 0
+// f2: f32 = extractelt v, 1
+// r = add.f32 f1, f2
+//
+// ...or...
+//
+// (before, ex2)
+// i: i64 = LoadParam [p]
+// v: v2f32 = bitcast i
+// f1: f32 = extractelt v, 0
+// f2: f32 = extractelt v, 1
+// r = add.f32 f1, f2
+//
+// ...will become...
+//
+// (after for both)
+// vf: f32,f32 = LoadParamV2 [p]
+// r = add.f32 vf:0, vf:1
static SDValue PerformLoadCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const NVPTXSubtarget &STI) {
@@ -5351,6 +5400,7 @@ static SDValue PerformLoadCombine(SDNode *N,
return VT == MVT::i64 || VT == MVT::f32 || VT.isVector();
});
+ // (1) All we are doing here is looking for patterns.
SmallDenseMap<SDNode *, unsigned> ExtractElts;
SmallVector<SDNode *> ProxyRegs(OrigNumResults, nullptr);
SmallVector<std::pair<SDNode *, unsigned>> WorkList{{N, {}}};
@@ -5402,24 +5452,18 @@ static SDValue PerformLoadCombine(SDNode *N,
ProcessingInitialLoad = false;
}
- // (2) If the load's value is only used as f32 elements, replace all
- // extractelts with individual elements of the newly-created load. If there's
- // a ProxyReg, handle that too. After this check, we'll proceed in the
- // following way:
- // 1. Determine which type of load to create, which will split the results
- // of the original load into f32 components.
- // 2. If there's a ProxyReg, split that too.
- // 3. Replace all extractelts with references to the new load / proxy reg.
- // 4. Replace all glue/chain references with references to the new load /
- // proxy reg.
+ // Did we find any patterns? All patterns we're interested in end with an
+ // extractelt.
if (ExtractElts.empty())
return SDValue();
+ // (2) Now, we will decide what load to create.
+
// Do we have to tweak the opcode for an NVPTXISD::Load* or do we have to
// rewrite an ISD::LOAD?
std::optional<NVPTXISD::NodeType> NewOpcode;
- // LoadV's are handled slightly different in ISelDAGToDAG.
+ // LoadV's are handled slightly different in ISelDAGToDAG. See below.
bool IsLoadV = false;
switch (N->getOpcode()) {
case NVPTXISD::LoadV2:
@@ -5434,7 +5478,15 @@ static SDValue PerformLoadCombine(SDNode *N,
break;
}
- SDValue OldChain, OldGlue;
+ // We haven't created the new load yet, but we're saving some information
+ // about the old load because we will need to replace all uses of it later.
+ // Because our pattern is generic, we're matching ISD::LOAD and
+ // NVPTXISD::Load*, and we just search for the chain and glue outputs rather
+ // than have a case for each type of load.
+ const bool HaveProxyRegs =
+ llvm::any_of(ProxyRegs, [](const SDNode *PR) { return PR != nullptr; });
+
+ SDValue OldChain, OldGlue /* optional */;
for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) {
if (N->getValueType(I) == MVT::Other)
OldChain = SDValue(N, I);
@@ -5444,7 +5496,8 @@ static SDValue PerformLoadCombine(SDNode *N,
SDValue NewLoad, NewChain, NewGlue /* (optional) */;
unsigned NumElts = 0;
- if (NewOpcode) { // tweak NVPTXISD::Load* opcode
+ if (NewOpcode) {
+ // Here, we are tweaking a NVPTXISD::Load* opcode to output N*2 results.
SmallVector<EVT> VTs;
// should always be non-null after this
@@ -5485,6 +5538,15 @@ static SDValue PerformLoadCombine(SDNode *N,
if (NewGlueIdx)
NewGlue = NewLoad.getValue(*NewGlueIdx);
} else if (N->getOpcode() == ISD::LOAD) { // rewrite a load
+ // Here, we are lowering an ISD::LOAD to an NVPTXISD::Load*. For example:
+ //
+ // (before)
+ // v2f32,ch,glue = ISD::LOAD [p]
+ //
+ // ...becomes...
+ //
+ // (after)
+ // f32,f32,ch,glue = NVPTXISD::LoadV2 [p]
std::optional<EVT> CastToType;
EVT ResVT = N->getValueType(0);
if (ResVT == MVT::i64) {
@@ -5502,23 +5564,41 @@ static SDValue PerformLoadCombine(SDNode *N,
}
}
+ // If this was some other type of load we couldn't handle, we bail.
if (!NewLoad)
- return SDValue(); // could not match pattern
+ return SDValue();
- // (3) begin rewriting uses
+ // (3) We successfully rewrote the load. Now we must rewrite all uses of the
+ // old load.
SmallVector<SDValue> NewOutputsF32;
- if (llvm::any_of(ProxyRegs, [](const SDNode *PR) { return PR != nullptr; })) {
- // scalarize proxy regs, but first rewrite all uses of chain and glue from
- // the old load to the new load
+ if (!HaveProxyRegs) {
+ // The case without proxy registers in the def-use chain is simple. Each
+ // extractelt is matched to an output of the new load (see calls to
+ // DCI.CombineTo() below).
+ for (unsigned I = 0, E = NumElts; I != E; ++I)
+ if (NewLoad->getValueType(I) == MVT::f32)
+ NewOutputsF32.push_back(NewLoad.getValue(I));
+
+ // replace all glue and chain nodes
+ DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
+ if (OldGlue)
+ DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue);
+ } else {
+ // The case with proxy registers is slightly more complicated. We have to
+ // expand those too.
+
+ // First, rewrite all uses of chain and glue from the old load to the new
+ // load. This is one less thing to worry about.
DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue);
+ // Now we will expand all the proxy registers for each output.
for (unsigned ProxyI = 0, ProxyE = ProxyRegs.size(); ProxyI != ProxyE;
++ProxyI) {
SDNode *ProxyReg = ProxyRegs[ProxyI];
- // no proxy reg might mean this result is unused
+ // No proxy reg might mean this result is unused.
if (!ProxyReg)
continue;
@@ -5532,12 +5612,12 @@ static SDValue PerformLoadCombine(SDNode *N,
if (SDValue OldInGlue = ProxyReg->getOperand(2); OldInGlue.getNode() != N)
NewGlue = OldInGlue;
- // update OldChain, OldGlue to the outputs of ProxyReg, which we will
- // replace later
+ // Update OldChain, OldGlue to the outputs of ProxyReg, which we will
+ // replace later.
OldChain = SDValue(ProxyReg, 1);
OldGlue = SDValue(ProxyReg, 2);
- // generate the scalar proxy regs
+ // Generate the scalar proxy regs.
for (unsigned I = 0, E = 2; I != E; ++I) {
SDValue ProxyRegElem = DCI.DAG.getNode(
NVPTXISD::ProxyReg, SDLoc(ProxyReg),
@@ -5552,18 +5632,10 @@ static SDValue PerformLoadCombine(SDNode *N,
DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue);
}
- } else {
- for (unsigned I = 0, E = NumElts; I != E; ++I)
- if (NewLoad->getValueType(I) == MVT::f32)
- NewOutputsF32.push_back(NewLoad.getValue(I));
-
- // replace all glue and chain nodes
- DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
- if (OldGlue)
- DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue);
}
- // replace all extractelts with the new outputs
+ // Replace all extractelts with the new outputs. This leaves the old load and
+ // unpacking instructions dead.
for (auto &[Extract, Index] : ExtractElts)
DCI.CombineTo(Extract, NewOutputsF32[Index], false);
More information about the llvm-commits
mailing list