[llvm] 4372cab - Reland "[NVPTX] Add support for atomic add for f16 type" (#85197)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 15 00:14:02 PDT 2024
Author: Adrian Kuegel
Date: 2024-03-15T08:13:59+01:00
New Revision: 4372cab91476137c6637a277dcc6a9df02c12aae
URL: https://github.com/llvm/llvm-project/commit/4372cab91476137c6637a277dcc6a9df02c12aae
DIFF: https://github.com/llvm/llvm-project/commit/4372cab91476137c6637a277dcc6a9df02c12aae.diff
LOG: Reland "[NVPTX] Add support for atomic add for f16 type" (#85197)
atom.add.noftz.f16 is supported since SM 7.0
Added:
llvm/test/CodeGen/NVPTX/atomics-sm70.ll
Modified:
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
llvm/test/CodeGen/NVPTX/atomics.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index c979c03dc1b835..c411c8ef9528d7 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -6100,6 +6100,9 @@ NVPTXTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
if (AI->isFloatingPointOperation()) {
if (AI->getOperation() == AtomicRMWInst::BinOp::FAdd) {
+ if (Ty->isHalfTy() && STI.getSmVersion() >= 70 &&
+ STI.getPTXVersion() >= 63)
+ return AtomicExpansionKind::None;
if (Ty->isFloatTy())
return AtomicExpansionKind::None;
if (Ty->isDoubleTy() && STI.hasAtomAddF64())
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 477789a164ead2..c0c53380a13e9b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -1520,7 +1520,7 @@ multiclass F_ATOMIC_2_imp<ValueType ptrT, NVPTXRegClass ptrclass,
def imm : NVPTXInst<(outs regclass:$dst), (ins ptrclass:$addr, IMMType:$b),
!strconcat("atom", SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b;", ""),
[(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), IMM:$b))]>,
- Requires<Pred>;
+ Requires<!if(!eq(TypeStr, ".f16"), [Predicate<"false">], Pred)>;
}
multiclass F_ATOMIC_2<ValueType regT, NVPTXRegClass regclass, string SpaceStr, string TypeStr,
string OpcStr, PatFrag IntOp, Operand IMMType, SDNode IMM,
@@ -1630,6 +1630,13 @@ defm INT_PTX_ATOM_ADD_GEN_64 : F_ATOMIC_2<i64, Int64Regs, "", ".u64", ".add",
defm INT_PTX_ATOM_ADD_GEN_64_USE_G : F_ATOMIC_2<i64, Int64Regs, ".global", ".u64",
".add", atomic_load_add_64_gen, i64imm, imm>;
+defm INT_PTX_ATOM_ADD_G_F16 : F_ATOMIC_2<f16, Int16Regs, ".global", ".f16", ".add.noftz",
+ atomic_load_add_g, f16imm, fpimm, [hasSM<70>, hasPTX<63>]>;
+defm INT_PTX_ATOM_ADD_S_F16 : F_ATOMIC_2<f16, Int16Regs, ".shared", ".f16", ".add.noftz",
+ atomic_load_add_s, f16imm, fpimm, [hasSM<70>, hasPTX<63>]>;
+defm INT_PTX_ATOM_ADD_GEN_F16 : F_ATOMIC_2<f16, Int16Regs, "", ".f16", ".add.noftz",
+ atomic_load_add_gen, f16imm, fpimm, [hasSM<70>, hasPTX<63>]>;
+
defm INT_PTX_ATOM_ADD_G_F32 : F_ATOMIC_2<f32, Float32Regs, ".global", ".f32", ".add",
atomic_load_add_g, f32imm, fpimm>;
defm INT_PTX_ATOM_ADD_S_F32 : F_ATOMIC_2<f32, Float32Regs, ".shared", ".f32", ".add",
@@ -2007,6 +2014,9 @@ multiclass ATOM2P_impl<string AsmStr, Intrinsic Intr,
SDNode Imm, ValueType ImmTy,
list<Predicate> Preds> {
let AddedComplexity = 1 in {
+ def : ATOM23_impl<AsmStr, regT, regclass, Preds,
+ (ins Int16Regs:$src, regclass:$b),
+ (Intr (i16 Int16Regs:$src), (regT regclass:$b))>;
def : ATOM23_impl<AsmStr, regT, regclass, Preds,
(ins Int32Regs:$src, regclass:$b),
(Intr (i32 Int32Regs:$src), (regT regclass:$b))>;
@@ -2017,6 +2027,9 @@ multiclass ATOM2P_impl<string AsmStr, Intrinsic Intr,
// tablegen can't infer argument types from Intrinsic (though it can
// from Instruction) so we have to enforce specific type on
// immediates via explicit cast to ImmTy.
+ def : ATOM23_impl<AsmStr, regT, regclass, Preds,
+ (ins Int16Regs:$src, ImmType:$b),
+ (Intr (i16 Int16Regs:$src), (ImmTy Imm:$b))>;
def : ATOM23_impl<AsmStr, regT, regclass, Preds,
(ins Int32Regs:$src, ImmType:$b),
(Intr (i32 Int32Regs:$src), (ImmTy Imm:$b))>;
@@ -2136,6 +2149,8 @@ multiclass ATOM2_add_impl<string OpStr> {
defm _s32 : ATOM2S_impl<OpStr, "i", "s32", i32, Int32Regs, i32imm, imm, i32, []>;
defm _u32 : ATOM2S_impl<OpStr, "i", "u32", i32, Int32Regs, i32imm, imm, i32, []>;
defm _u64 : ATOM2S_impl<OpStr, "i", "u64", i64, Int64Regs, i64imm, imm, i64, []>;
+ defm _f16 : ATOM2S_impl<OpStr, "f", "f16", f16, Int16Regs, f16imm, fpimm, f16,
+ [hasSM<70>, hasPTX<63>]>;
defm _f32 : ATOM2S_impl<OpStr, "f", "f32", f32, Float32Regs, f32imm, fpimm, f32,
[]>;
defm _f64 : ATOM2S_impl<OpStr, "f", "f64", f64, Float64Regs, f64imm, fpimm, f64,
diff --git a/llvm/test/CodeGen/NVPTX/atomics-sm70.ll b/llvm/test/CodeGen/NVPTX/atomics-sm70.ll
new file mode 100644
index 00000000000000..9cc45fbe313b7e
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/atomics-sm70.ll
@@ -0,0 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx63 | FileCheck %s --check-prefixes=CHECK
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx63 | FileCheck %s --check-prefixes=CHECK64
+; RUN: llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx62 | FileCheck %s --check-prefixes=CHECKPTX62
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx63 | %ptxas-verify -arch=sm_70 %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx63 | %ptxas-verify -arch=sm_70 %}
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx62 | %ptxas-verify -arch=sm_70 %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+define void @test(ptr %dp0, ptr addrspace(1) %dp1, ptr addrspace(3) %dp3, half %val) {
+; CHECK-LABEL: test(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<7>;
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [test_param_0];
+; CHECK-NEXT: ld.param.b16 %rs1, [test_param_3];
+; CHECK-NEXT: atom.add.noftz.f16 %rs2, [%r1], %rs1;
+; CHECK-NEXT: ld.param.u32 %r2, [test_param_1];
+; CHECK-NEXT: mov.b16 %rs3, 0x3C00;
+; CHECK-NEXT: atom.add.noftz.f16 %rs4, [%r1], %rs3;
+; CHECK-NEXT: ld.param.u32 %r3, [test_param_2];
+; CHECK-NEXT: atom.global.add.noftz.f16 %rs5, [%r2], %rs1;
+; CHECK-NEXT: atom.shared.add.noftz.f16 %rs6, [%r3], %rs1;
+; CHECK-NEXT: ret;
+;
+; CHECK64-LABEL: test(
+; CHECK64: {
+; CHECK64-NEXT: .reg .b16 %rs<7>;
+; CHECK64-NEXT: .reg .b64 %rd<4>;
+; CHECK64-EMPTY:
+; CHECK64-NEXT: // %bb.0:
+; CHECK64-NEXT: ld.param.u64 %rd1, [test_param_0];
+; CHECK64-NEXT: ld.param.b16 %rs1, [test_param_3];
+; CHECK64-NEXT: atom.add.noftz.f16 %rs2, [%rd1], %rs1;
+; CHECK64-NEXT: ld.param.u64 %rd2, [test_param_1];
+; CHECK64-NEXT: mov.b16 %rs3, 0x3C00;
+; CHECK64-NEXT: atom.add.noftz.f16 %rs4, [%rd1], %rs3;
+; CHECK64-NEXT: ld.param.u64 %rd3, [test_param_2];
+; CHECK64-NEXT: atom.global.add.noftz.f16 %rs5, [%rd2], %rs1;
+; CHECK64-NEXT: atom.shared.add.noftz.f16 %rs6, [%rd3], %rs1;
+; CHECK64-NEXT: ret;
+;
+; CHECKPTX62-LABEL: test(
+; CHECKPTX62: {
+; CHECKPTX62-NEXT: .reg .pred %p<5>;
+; CHECKPTX62-NEXT: .reg .b16 %rs<19>;
+; CHECKPTX62-NEXT: .reg .b32 %r<58>;
+; CHECKPTX62-EMPTY:
+; CHECKPTX62-NEXT: // %bb.0:
+; CHECKPTX62-NEXT: ld.param.b16 %rs1, [test_param_3];
+; CHECKPTX62-NEXT: ld.param.u32 %r23, [test_param_2];
+; CHECKPTX62-NEXT: ld.param.u32 %r22, [test_param_1];
+; CHECKPTX62-NEXT: ld.param.u32 %r24, [test_param_0];
+; CHECKPTX62-NEXT: and.b32 %r1, %r24, -4;
+; CHECKPTX62-NEXT: and.b32 %r25, %r24, 3;
+; CHECKPTX62-NEXT: shl.b32 %r2, %r25, 3;
+; CHECKPTX62-NEXT: mov.b32 %r26, 65535;
+; CHECKPTX62-NEXT: shl.b32 %r27, %r26, %r2;
+; CHECKPTX62-NEXT: not.b32 %r3, %r27;
+; CHECKPTX62-NEXT: ld.u32 %r54, [%r1];
+; CHECKPTX62-NEXT: $L__BB0_1: // %atomicrmw.start
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r28, %r54, %r2;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs2, %r28;
+; CHECKPTX62-NEXT: add.rn.f16 %rs4, %rs2, %rs1;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r29, %rs4;
+; CHECKPTX62-NEXT: shl.b32 %r30, %r29, %r2;
+; CHECKPTX62-NEXT: and.b32 %r31, %r54, %r3;
+; CHECKPTX62-NEXT: or.b32 %r32, %r31, %r30;
+; CHECKPTX62-NEXT: atom.cas.b32 %r6, [%r1], %r54, %r32;
+; CHECKPTX62-NEXT: setp.ne.s32 %p1, %r6, %r54;
+; CHECKPTX62-NEXT: mov.u32 %r54, %r6;
+; CHECKPTX62-NEXT: @%p1 bra $L__BB0_1;
+; CHECKPTX62-NEXT: // %bb.2: // %atomicrmw.end
+; CHECKPTX62-NEXT: ld.u32 %r55, [%r1];
+; CHECKPTX62-NEXT: $L__BB0_3: // %atomicrmw.start9
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r33, %r55, %r2;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs6, %r33;
+; CHECKPTX62-NEXT: mov.b16 %rs8, 0x3C00;
+; CHECKPTX62-NEXT: add.rn.f16 %rs9, %rs6, %rs8;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r34, %rs9;
+; CHECKPTX62-NEXT: shl.b32 %r35, %r34, %r2;
+; CHECKPTX62-NEXT: and.b32 %r36, %r55, %r3;
+; CHECKPTX62-NEXT: or.b32 %r37, %r36, %r35;
+; CHECKPTX62-NEXT: atom.cas.b32 %r9, [%r1], %r55, %r37;
+; CHECKPTX62-NEXT: setp.ne.s32 %p2, %r9, %r55;
+; CHECKPTX62-NEXT: mov.u32 %r55, %r9;
+; CHECKPTX62-NEXT: @%p2 bra $L__BB0_3;
+; CHECKPTX62-NEXT: // %bb.4: // %atomicrmw.end8
+; CHECKPTX62-NEXT: and.b32 %r10, %r22, -4;
+; CHECKPTX62-NEXT: shl.b32 %r38, %r22, 3;
+; CHECKPTX62-NEXT: and.b32 %r11, %r38, 24;
+; CHECKPTX62-NEXT: shl.b32 %r40, %r26, %r11;
+; CHECKPTX62-NEXT: not.b32 %r12, %r40;
+; CHECKPTX62-NEXT: ld.global.u32 %r56, [%r10];
+; CHECKPTX62-NEXT: $L__BB0_5: // %atomicrmw.start27
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r41, %r56, %r11;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs11, %r41;
+; CHECKPTX62-NEXT: add.rn.f16 %rs13, %rs11, %rs1;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r42, %rs13;
+; CHECKPTX62-NEXT: shl.b32 %r43, %r42, %r11;
+; CHECKPTX62-NEXT: and.b32 %r44, %r56, %r12;
+; CHECKPTX62-NEXT: or.b32 %r45, %r44, %r43;
+; CHECKPTX62-NEXT: atom.global.cas.b32 %r15, [%r10], %r56, %r45;
+; CHECKPTX62-NEXT: setp.ne.s32 %p3, %r15, %r56;
+; CHECKPTX62-NEXT: mov.u32 %r56, %r15;
+; CHECKPTX62-NEXT: @%p3 bra $L__BB0_5;
+; CHECKPTX62-NEXT: // %bb.6: // %atomicrmw.end26
+; CHECKPTX62-NEXT: and.b32 %r16, %r23, -4;
+; CHECKPTX62-NEXT: shl.b32 %r46, %r23, 3;
+; CHECKPTX62-NEXT: and.b32 %r17, %r46, 24;
+; CHECKPTX62-NEXT: shl.b32 %r48, %r26, %r17;
+; CHECKPTX62-NEXT: not.b32 %r18, %r48;
+; CHECKPTX62-NEXT: ld.shared.u32 %r57, [%r16];
+; CHECKPTX62-NEXT: $L__BB0_7: // %atomicrmw.start45
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r49, %r57, %r17;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs15, %r49;
+; CHECKPTX62-NEXT: add.rn.f16 %rs17, %rs15, %rs1;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r50, %rs17;
+; CHECKPTX62-NEXT: shl.b32 %r51, %r50, %r17;
+; CHECKPTX62-NEXT: and.b32 %r52, %r57, %r18;
+; CHECKPTX62-NEXT: or.b32 %r53, %r52, %r51;
+; CHECKPTX62-NEXT: atom.shared.cas.b32 %r21, [%r16], %r57, %r53;
+; CHECKPTX62-NEXT: setp.ne.s32 %p4, %r21, %r57;
+; CHECKPTX62-NEXT: mov.u32 %r57, %r21;
+; CHECKPTX62-NEXT: @%p4 bra $L__BB0_7;
+; CHECKPTX62-NEXT: // %bb.8: // %atomicrmw.end44
+; CHECKPTX62-NEXT: ret;
+ %r1 = atomicrmw fadd ptr %dp0, half %val seq_cst
+ %r2 = atomicrmw fadd ptr %dp0, half 1.0 seq_cst
+ %r3 = atomicrmw fadd ptr addrspace(1) %dp1, half %val seq_cst
+ %r4 = atomicrmw fadd ptr addrspace(3) %dp3, half %val seq_cst
+ ret void
+}
+
+attributes #1 = { argmemonly nounwind }
diff --git a/llvm/test/CodeGen/NVPTX/atomics.ll b/llvm/test/CodeGen/NVPTX/atomics.ll
index e99d0fd05e346b..6f2b5dcf47f13b 100644
--- a/llvm/test/CodeGen/NVPTX/atomics.ll
+++ b/llvm/test/CodeGen/NVPTX/atomics.ll
@@ -175,6 +175,13 @@ define float @atomicrmw_add_f32_generic(ptr %addr, float %val) {
ret float %ret
}
+; CHECK-LABEL: atomicrmw_add_f16_generic
+define half @atomicrmw_add_f16_generic(ptr %addr, half %val) {
+; CHECK: atom.cas
+ %ret = atomicrmw fadd ptr %addr, half %val seq_cst
+ ret half %ret
+}
+
; CHECK-LABEL: atomicrmw_add_f32_addrspace1
define float @atomicrmw_add_f32_addrspace1(ptr addrspace(1) %addr, float %val) {
; CHECK: atom.global.add.f32
More information about the llvm-commits
mailing list