[llvm] b337237 - [NVPTX] Fixed few more corner cases for v4i8 lowering. (#69263)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 17 11:06:15 PDT 2023
Author: Artem Belevich
Date: 2023-10-17T11:06:11-07:00
New Revision: b33723710f5194080e8bfab9f21c8445647c976b
URL: https://github.com/llvm/llvm-project/commit/b33723710f5194080e8bfab9f21c8445647c976b
DIFF: https://github.com/llvm/llvm-project/commit/b33723710f5194080e8bfab9f21c8445647c976b.diff
LOG: [NVPTX] Fixed few more corner cases for v4i8 lowering. (#69263)
Fixes https://github.com/llvm/llvm-project/issues/69124
Added:
Modified:
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
llvm/lib/Target/NVPTX/NVPTXISelLowering.h
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
llvm/test/CodeGen/NVPTX/param-load-store.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 36da2e7b40efaab..a935c0e16a5523c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -504,13 +504,21 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
// Only logical ops can be done on v4i8 directly, others must be done
// elementwise.
setOperationAction(
- {ISD::ADD, ISD::MUL, ISD::ABS, ISD::SMIN,
- ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::CTPOP,
- ISD::CTLZ, ISD::ADD, ISD::SUB, ISD::MUL,
- ISD::SHL, ISD::SREM, ISD::UREM, ISD::SDIV,
- ISD::UDIV, ISD::SRA, ISD::SRL, ISD::MULHS,
- ISD::MULHU, ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP,
- ISD::UINT_TO_FP},
+ {ISD::ABS, ISD::ADD, ISD::ADDC, ISD::ADDE,
+ ISD::BITREVERSE, ISD::CTLZ, ISD::CTPOP, ISD::CTTZ,
+ ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FSHL, ISD::FSHR,
+ ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::PARITY,
+ ISD::ROTL, ISD::ROTR, ISD::SADDO, ISD::SADDO_CARRY,
+ ISD::SADDSAT, ISD::SDIV, ISD::SDIVREM, ISD::SELECT_CC,
+ ISD::SETCC, ISD::SHL, ISD::SINT_TO_FP, ISD::SMAX,
+ ISD::SMIN, ISD::SMULO, ISD::SMUL_LOHI, ISD::SRA,
+ ISD::SREM, ISD::SRL, ISD::SSHLSAT, ISD::SSUBO,
+ ISD::SSUBO_CARRY, ISD::SSUBSAT, ISD::SUB, ISD::SUBC,
+ ISD::SUBE, ISD::UADDO, ISD::UADDO_CARRY, ISD::UADDSAT,
+ ISD::UDIV, ISD::UDIVREM, ISD::UINT_TO_FP, ISD::UMAX,
+ ISD::UMIN, ISD::UMULO, ISD::UMUL_LOHI, ISD::UREM,
+ ISD::USHLSAT, ISD::USUBO, ISD::USUBO_CARRY, ISD::VSELECT,
+ ISD::USUBSAT},
MVT::v4i8, Expand);
// Operations not directly supported by NVPTX.
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
index 5c7c10965e2f2ca..f6932db2aeb0b9e 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -586,6 +586,12 @@ class NVPTXTargetLowering : public TargetLowering {
AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
+ bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override {
+ // There's rarely any point of packing something into a vector type if we
+ // already have the source data.
+ return true;
+ }
+
private:
const NVPTXSubtarget &STI; // cache the subtarget here
SDValue getParamSymbol(SelectionDAG &DAG, int idx, EVT) const;
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 84ed953ad18a9b4..b0b96b94a125752 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -3485,6 +3485,9 @@ def : Pat<(v2bf16 (build_vector (bf16 Int16Regs:$a), (bf16 Int16Regs:$b))),
def : Pat<(v2i16 (build_vector (i16 Int16Regs:$a), (i16 Int16Regs:$b))),
(V2I16toI32 Int16Regs:$a, Int16Regs:$b)>;
+def: Pat<(v2i16 (scalar_to_vector (i16 Int16Regs:$a))),
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+
// Count leading zeros
let hasSideEffects = false in {
def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
diff --git a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
index 18788c776ffbd71..464b3a754804feb 100644
--- a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
@@ -1319,10 +1319,8 @@ define <2 x half> @test_copysign_f64(<2 x half> %a, <2 x double> %b) #0 {
; CHECK-DAG: and.b16 [[BX1:%rs[0-9]+]], [[B1]], -32768;
; CHECK-DAG: or.b16 [[R0:%rs[0-9]+]], [[AX0]], [[BX0]];
; CHECK-DAG: or.b16 [[R1:%rs[0-9]+]], [[AX1]], [[BX1]];
-; CHECK-DAG: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: mov.b32 {[[RX0:%rs[0-9]+]], [[RX1:%rs[0-9]+]]}, [[R]]
-; CHECK-DAG: cvt.f32.f16 [[XR0:%f[0-9]+]], [[RX0]];
-; CHECK-DAG: cvt.f32.f16 [[XR1:%f[0-9]+]], [[RX1]];
+; CHECK-DAG: cvt.f32.f16 [[XR0:%f[0-9]+]], [[R0]];
+; CHECK-DAG: cvt.f32.f16 [[XR1:%f[0-9]+]], [[R1]];
; CHECK: st.param.v2.f32 [func_retval0+0], {[[XR0]], [[XR1]]};
; CHECK: ret;
define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 {
diff --git a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
index fd48313ad684847..ddad374a4dc119d 100644
--- a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
@@ -1269,4 +1269,158 @@ define <4 x i8> @test_fptoui_2xhalf_to_2xi8(<4 x half> %a) #0 {
ret <4 x i8> %r
}
+define void @test_srem_v4i8(ptr %a, ptr %b, ptr %c) {
+; CHECK-LABEL: test_srem_v4i8(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<13>;
+; CHECK-NEXT: .reg .b32 %r<18>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.u64 %rd3, [test_srem_v4i8_param_2];
+; CHECK-NEXT: ld.param.u64 %rd2, [test_srem_v4i8_param_1];
+; CHECK-NEXT: ld.param.u64 %rd1, [test_srem_v4i8_param_0];
+; CHECK-NEXT: ld.u32 %r1, [%rd1];
+; CHECK-NEXT: ld.u32 %r2, [%rd2];
+; CHECK-NEXT: bfe.s32 %r3, %r2, 0, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs1, %r3;
+; CHECK-NEXT: bfe.s32 %r4, %r1, 0, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs2, %r4;
+; CHECK-NEXT: rem.s16 %rs3, %rs2, %rs1;
+; CHECK-NEXT: cvt.u32.u16 %r5, %rs3;
+; CHECK-NEXT: bfe.s32 %r6, %r2, 8, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs4, %r6;
+; CHECK-NEXT: bfe.s32 %r7, %r1, 8, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs5, %r7;
+; CHECK-NEXT: rem.s16 %rs6, %rs5, %rs4;
+; CHECK-NEXT: cvt.u32.u16 %r8, %rs6;
+; CHECK-NEXT: bfi.b32 %r9, %r8, %r5, 8, 8;
+; CHECK-NEXT: bfe.s32 %r10, %r2, 16, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs7, %r10;
+; CHECK-NEXT: bfe.s32 %r11, %r1, 16, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs8, %r11;
+; CHECK-NEXT: rem.s16 %rs9, %rs8, %rs7;
+; CHECK-NEXT: cvt.u32.u16 %r12, %rs9;
+; CHECK-NEXT: bfi.b32 %r13, %r12, %r9, 16, 8;
+; CHECK-NEXT: bfe.s32 %r14, %r2, 24, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs10, %r14;
+; CHECK-NEXT: bfe.s32 %r15, %r1, 24, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs11, %r15;
+; CHECK-NEXT: rem.s16 %rs12, %rs11, %rs10;
+; CHECK-NEXT: cvt.u32.u16 %r16, %rs12;
+; CHECK-NEXT: bfi.b32 %r17, %r16, %r13, 24, 8;
+; CHECK-NEXT: st.u32 [%rd3], %r17;
+; CHECK-NEXT: ret;
+entry:
+ %t57 = load <4 x i8>, ptr %a, align 4
+ %t59 = load <4 x i8>, ptr %b, align 4
+ %x = srem <4 x i8> %t57, %t59
+ store <4 x i8> %x, ptr %c, align 4
+ ret void
+}
+
+;; v3i8 lowering, especially for unaligned loads is terrible. We end up doing
+;; tons of pointless scalar_to_vector/bitcast/extract_elt on v2i16/v4i8, which
+;; is further complicated by LLVM trying to use i16 as an intermediate type,
+;; because we don't have i8 registers. It's a mess.
+;; Ideally we want to split it into element-wise ops, but legalizer can't handle
+;; odd-sized vectors. TL;DR; don't use odd-sized vectors of v8.
+define void @test_srem_v3i8(ptr %a, ptr %b, ptr %c) {
+; CHECK-LABEL: test_srem_v3i8(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<20>;
+; CHECK-NEXT: .reg .b32 %r<16>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.u64 %rd3, [test_srem_v3i8_param_2];
+; CHECK-NEXT: ld.param.u64 %rd2, [test_srem_v3i8_param_1];
+; CHECK-NEXT: ld.param.u64 %rd1, [test_srem_v3i8_param_0];
+; CHECK-NEXT: ld.u8 %rs1, [%rd1];
+; CHECK-NEXT: ld.u8 %rs2, [%rd1+1];
+; CHECK-NEXT: shl.b16 %rs3, %rs2, 8;
+; CHECK-NEXT: or.b16 %rs4, %rs3, %rs1;
+; CHECK-NEXT: cvt.u32.u16 %r1, %rs4;
+; CHECK-NEXT: ld.s8 %rs5, [%rd1+2];
+; CHECK-NEXT: ld.u8 %rs6, [%rd2];
+; CHECK-NEXT: ld.u8 %rs7, [%rd2+1];
+; CHECK-NEXT: shl.b16 %rs8, %rs7, 8;
+; CHECK-NEXT: or.b16 %rs9, %rs8, %rs6;
+; CHECK-NEXT: cvt.u32.u16 %r3, %rs9;
+; CHECK-NEXT: ld.s8 %rs10, [%rd2+2];
+; CHECK-NEXT: bfe.s32 %r5, %r3, 0, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs11, %r5;
+; CHECK-NEXT: bfe.s32 %r6, %r1, 0, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs12, %r6;
+; CHECK-NEXT: rem.s16 %rs13, %rs12, %rs11;
+; CHECK-NEXT: cvt.u32.u16 %r7, %rs13;
+; CHECK-NEXT: bfe.s32 %r8, %r3, 8, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs14, %r8;
+; CHECK-NEXT: bfe.s32 %r9, %r1, 8, 8;
+; CHECK-NEXT: cvt.s8.s32 %rs15, %r9;
+; CHECK-NEXT: rem.s16 %rs16, %rs15, %rs14;
+; CHECK-NEXT: cvt.u32.u16 %r10, %rs16;
+; CHECK-NEXT: bfi.b32 %r11, %r10, %r7, 8, 8;
+; CHECK-NEXT: // implicit-def: %r13
+; CHECK-NEXT: bfi.b32 %r12, %r13, %r11, 16, 8;
+; CHECK-NEXT: // implicit-def: %r15
+; CHECK-NEXT: bfi.b32 %r14, %r15, %r12, 24, 8;
+; CHECK-NEXT: rem.s16 %rs17, %rs5, %rs10;
+; CHECK-NEXT: cvt.u16.u32 %rs18, %r14;
+; CHECK-NEXT: st.u8 [%rd3], %rs18;
+; CHECK-NEXT: shr.u16 %rs19, %rs18, 8;
+; CHECK-NEXT: st.u8 [%rd3+1], %rs19;
+; CHECK-NEXT: st.u8 [%rd3+2], %rs17;
+; CHECK-NEXT: ret;
+entry:
+ %t57 = load <3 x i8>, ptr %a, align 1
+ %t59 = load <3 x i8>, ptr %b, align 1
+ %x = srem <3 x i8> %t57, %t59
+ store <3 x i8> %x, ptr %c, align 1
+ ret void
+}
+
+define void @test_sext_v4i1_to_v4i8(ptr %a, ptr %b, ptr %c) {
+; CHECK-LABEL: test_sext_v4i1_to_v4i8(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<5>;
+; CHECK-NEXT: .reg .b32 %r<18>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.u64 %rd3, [test_sext_v4i1_to_v4i8_param_2];
+; CHECK-NEXT: ld.param.u64 %rd2, [test_sext_v4i1_to_v4i8_param_1];
+; CHECK-NEXT: ld.param.u64 %rd1, [test_sext_v4i1_to_v4i8_param_0];
+; CHECK-NEXT: ld.u32 %r1, [%rd1];
+; CHECK-NEXT: ld.u32 %r2, [%rd2];
+; CHECK-NEXT: bfe.s32 %r3, %r2, 24, 8;
+; CHECK-NEXT: bfe.s32 %r4, %r1, 24, 8;
+; CHECK-NEXT: setp.hi.u32 %p1, %r4, %r3;
+; CHECK-NEXT: bfe.s32 %r5, %r2, 16, 8;
+; CHECK-NEXT: bfe.s32 %r6, %r1, 16, 8;
+; CHECK-NEXT: setp.hi.u32 %p2, %r6, %r5;
+; CHECK-NEXT: bfe.s32 %r7, %r2, 8, 8;
+; CHECK-NEXT: bfe.s32 %r8, %r1, 8, 8;
+; CHECK-NEXT: setp.hi.u32 %p3, %r8, %r7;
+; CHECK-NEXT: bfe.s32 %r9, %r2, 0, 8;
+; CHECK-NEXT: bfe.s32 %r10, %r1, 0, 8;
+; CHECK-NEXT: setp.hi.u32 %p4, %r10, %r9;
+; CHECK-NEXT: selp.s32 %r11, -1, 0, %p4;
+; CHECK-NEXT: selp.s32 %r12, -1, 0, %p3;
+; CHECK-NEXT: bfi.b32 %r13, %r12, %r11, 8, 8;
+; CHECK-NEXT: selp.s32 %r14, -1, 0, %p2;
+; CHECK-NEXT: bfi.b32 %r15, %r14, %r13, 16, 8;
+; CHECK-NEXT: selp.s32 %r16, -1, 0, %p1;
+; CHECK-NEXT: bfi.b32 %r17, %r16, %r15, 24, 8;
+; CHECK-NEXT: st.u32 [%rd3], %r17;
+; CHECK-NEXT: ret;
+entry:
+ %t1 = load <4 x i8>, ptr %a, align 4
+ %t2 = load <4 x i8>, ptr %b, align 4
+ %t5 = icmp ugt <4 x i8> %t1, %t2
+ %t6 = sext <4 x i1> %t5 to <4 x i8>
+ store <4 x i8> %t6, ptr %c, align 4
+ ret void
+}
+
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/NVPTX/param-load-store.ll b/llvm/test/CodeGen/NVPTX/param-load-store.ll
index b4208c691c91dfa..c14dc88431d316e 100644
--- a/llvm/test/CodeGen/NVPTX/param-load-store.ll
+++ b/llvm/test/CodeGen/NVPTX/param-load-store.ll
@@ -364,10 +364,6 @@ define <4 x i16> @test_v4i16(<4 x i16> %a) {
; CHECK-NEXT: .param .align 16 .b8 test_v5i16_param_0[16]
; CHECK-DAG: ld.param.u16 [[E4:%rs[0-9]+]], [test_v5i16_param_0+8];
; CHECK-DAG: ld.param.v4.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i16_param_0]
-; CHECK-DAG: mov.b32 [[R0:%r[0-9]+]], {[[E0]], [[E1]]};
-; CHECK-DAG: mov.b32 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [[R0]];
-; CHECK-DAG: mov.b32 [[R1:%r[0-9]+]], {[[E2]], [[E3]]};
-; CHECK-DAG: mov.b32 {[[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [[R1]];
; CHECK: .param .align 16 .b8 param0[16];
; CHECK-DAG: st.param.v4.b16 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
; CHECK-DAG: st.param.b16 [param0+8], [[E4]];
@@ -496,7 +492,6 @@ define <4 x half> @test_v4f16(<4 x half> %a) {
; CHECK-LABEL: test_v5f16(
; CHECK: .param .align 16 .b8 test_v5f16_param_0[16]
; CHECK-DAG: ld.param.v4.b16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5f16_param_0];
-; CHECK-DAG: mov.b32 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [[HH01]];
; CHECK-DAG: ld.param.b16 [[E4:%rs[0-9]+]], [test_v5f16_param_0+8];
; CHECK: .param .align 16 .b8 param0[16];
; CHECK-DAG: st.param.v4.b16 [param0+0],
More information about the llvm-commits
mailing list