[llvm] bcd9177 - AMDGPU: Do binop of select of constant fold in AMDGPUCodeGenPrepare

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 22 07:17:12 PST 2020


Author: Matt Arsenault
Date: 2020-01-22T10:16:39-05:00
New Revision: bcd91778fe7e6fc66cdccc5ddc3ff3fc48909f6b

URL: https://github.com/llvm/llvm-project/commit/bcd91778fe7e6fc66cdccc5ddc3ff3fc48909f6b
DIFF: https://github.com/llvm/llvm-project/commit/bcd91778fe7e6fc66cdccc5ddc3ff3fc48909f6b.diff

LOG: AMDGPU: Do binop of select of constant fold in AMDGPUCodeGenPrepare

DAGCombiner does this, but divisions expanded here miss this
optimization. Since 67aa18f165640374cf0e0a6226dc793bbda6e74f,
divisions have been expanded here and missed out on this
optimization. Avoids test regressions in a future patch.

Added: 
    llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
    llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
    llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index cfa52ecbea29..fb23d07dfb26 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -17,6 +17,7 @@
 #include "AMDGPUTargetMachine.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
 #include "llvm/Analysis/Loads.h"
 #include "llvm/Analysis/ValueTracking.h"
@@ -152,6 +153,10 @@ class AMDGPUCodeGenPrepare : public FunctionPass,
   /// SelectionDAG has an issue where an and asserting the bits are known
   bool replaceMulWithMul24(BinaryOperator &I) const;
 
+  /// Perform same function as equivalently named function in DAGCombiner. Since
+  /// we expand some divisions here, we need to perform this before obscuring.
+  bool foldBinOpIntoSelect(BinaryOperator &I) const;
+
   /// Expands 24 bit div or rem.
   Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
                         Value *Num, Value *Den,
@@ -525,6 +530,53 @@ bool AMDGPUCodeGenPrepare::replaceMulWithMul24(BinaryOperator &I) const {
   return true;
 }
 
+bool AMDGPUCodeGenPrepare::foldBinOpIntoSelect(BinaryOperator &BO) const {
+  // Don't do this unless the old select is going away. We want to eliminate the
+  // binary operator, not replace a binop with a select.
+  int SelOpNo = 0;
+  SelectInst *Sel = dyn_cast<SelectInst>(BO.getOperand(0));
+  if (!Sel || !Sel->hasOneUse()) {
+    SelOpNo = 1;
+    Sel = dyn_cast<SelectInst>(BO.getOperand(1));
+  }
+
+  if (!Sel || !Sel->hasOneUse())
+    return false;
+
+  Constant *CT = dyn_cast<Constant>(Sel->getTrueValue());
+  Constant *CF = dyn_cast<Constant>(Sel->getFalseValue());
+  Constant *CBO = dyn_cast<Constant>(BO.getOperand(SelOpNo ^ 1));
+  if (!CBO || !CT || !CF)
+    return false;
+
+  // TODO: Handle special 0/-1 cases DAG combine does, although we only really
+  // need to handle divisions here.
+  Constant *FoldedT = SelOpNo ?
+    ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, *DL) :
+    ConstantFoldBinaryOpOperands(BO.getOpcode(), CT, CBO, *DL);
+  if (isa<ConstantExpr>(FoldedT))
+    return false;
+
+  Constant *FoldedF = SelOpNo ?
+    ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, *DL) :
+    ConstantFoldBinaryOpOperands(BO.getOpcode(), CF, CBO, *DL);
+  if (isa<ConstantExpr>(FoldedF))
+    return false;
+
+  IRBuilder<> Builder(&BO);
+  Builder.SetCurrentDebugLocation(BO.getDebugLoc());
+  if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO))
+    Builder.setFastMathFlags(FPOp->getFastMathFlags());
+
+  Value *NewSelect = Builder.CreateSelect(Sel->getCondition(),
+                                          FoldedT, FoldedF);
+  NewSelect->takeName(&BO);
+  BO.replaceAllUsesWith(NewSelect);
+  BO.eraseFromParent();
+  Sel->eraseFromParent();
+  return true;
+}
+
 static bool shouldKeepFDivF32(Value *Num, bool UnsafeDiv, bool HasDenormals) {
   const ConstantFP *CNum = dyn_cast<ConstantFP>(Num);
   if (!CNum)
@@ -883,6 +935,9 @@ Value* AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder,
 }
 
 bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
+  if (foldBinOpIntoSelect(I))
+    return true;
+
   if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
       DA->isUniform(&I) && promoteUniformOpToI32(I))
     return true;

diff  --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll
new file mode 100644
index 000000000000..7ee1cdb72fe1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll
@@ -0,0 +1,443 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -amdgpu-codegenprepare %s | FileCheck -check-prefix=IR %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -check-prefix=GCN %s
+
+define i32 @select_sdiv_lhs_const_i32(i1 %cond) {
+; IR-LABEL: @select_sdiv_lhs_const_i32(
+; IR-NEXT:    [[OP:%.*]] = select i1 [[COND:%.*]], i32 200000, i32 125000
+; IR-NEXT:    ret i32 [[OP]]
+;
+; GCN-LABEL: select_sdiv_lhs_const_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x1e848
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x30d40
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %select = select i1 %cond, i32 5, i32 8
+  %op = sdiv i32 1000000, %select
+  ret i32 %op
+}
+
+define i32 @select_sdiv_rhs_const_i32(i1 %cond) {
+; IR-LABEL: @select_sdiv_rhs_const_i32(
+; IR-NEXT:    [[OP:%.*]] = select i1 [[COND:%.*]], i32 1000, i32 10000
+; IR-NEXT:    ret i32 [[OP]]
+;
+; GCN-LABEL: select_sdiv_rhs_const_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x2710
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x3e8
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %select = select i1 %cond, i32 42000, i32 420000
+  %op = sdiv i32 %select, 42
+  ret i32 %op
+}
+
+define <2 x i32> @select_sdiv_lhs_const_v2i32(i1 %cond) {
+; IR-LABEL: @select_sdiv_lhs_const_v2i32(
+; IR-NEXT:    [[OP:%.*]] = select i1 [[COND:%.*]], <2 x i32> <i32 666, i32 undef>, <2 x i32> <i32 555, i32 1428>
+; IR-NEXT:    ret <2 x i32> [[OP]]
+;
+; GCN-LABEL: select_sdiv_lhs_const_v2i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x22b
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x29a
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x594
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %select = select i1 %cond, <2 x i32> <i32 5, i32 undef>, <2 x i32> <i32 6, i32 7>
+  %op = sdiv <2 x i32> <i32 3333, i32 9999>, %select
+  ret <2 x i32> %op
+}
+
+define <2 x i32> @select_sdiv_rhs_const_v2i32(i1 %cond) {
+; IR-LABEL: @select_sdiv_rhs_const_v2i32(
+; IR-NEXT:    [[OP:%.*]] = select i1 [[COND:%.*]], <2 x i32> <i32 198621, i32 20855308>, <2 x i32> <i32 222748, i32 2338858>
+; IR-NEXT:    ret <2 x i32> [[OP]]
+;
+; GCN-LABEL: select_sdiv_rhs_const_v2i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x3661c
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x307dd
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x23b02a
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x13e3a0c
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %select = select i1 %cond, <2 x i32> <i32 8342123, i32 834212353>, <2 x i32> <i32 9355456, i32 93554321>
+  %op = sdiv <2 x i32> %select, <i32 42, i32 40>
+  ret <2 x i32> %op
+}
+
+ at gv = external addrspace(1) global i32
+
+define i32 @select_sdiv_lhs_opaque_const0_i32(i1 %cond) {
+; IR-LABEL: @select_sdiv_lhs_opaque_const0_i32(
+; IR-NEXT:    [[SELECT:%.*]] = select i1 [[COND:%.*]], i32 ptrtoint (i32 addrspace(1)* @gv to i32), i32 5
+; IR-NEXT:    [[TMP1:%.*]] = ashr i32 [[SELECT]], 31
+; IR-NEXT:    [[TMP2:%.*]] = xor i32 0, [[TMP1]]
+; IR-NEXT:    [[TMP3:%.*]] = add i32 [[SELECT]], [[TMP1]]
+; IR-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1]]
+; IR-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP4]] to float
+; IR-NEXT:    [[TMP6:%.*]] = fdiv fast float 1.000000e+00, [[TMP5]]
+; IR-NEXT:    [[TMP7:%.*]] = fmul fast float [[TMP6]], 0x41F0000000000000
+; IR-NEXT:    [[TMP8:%.*]] = fptoui float [[TMP7]] to i32
+; IR-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
+; IR-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP4]] to i64
+; IR-NEXT:    [[TMP11:%.*]] = mul i64 [[TMP9]], [[TMP10]]
+; IR-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
+; IR-NEXT:    [[TMP13:%.*]] = lshr i64 [[TMP11]], 32
+; IR-NEXT:    [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
+; IR-NEXT:    [[TMP15:%.*]] = sub i32 0, [[TMP12]]
+; IR-NEXT:    [[TMP16:%.*]] = icmp eq i32 [[TMP14]], 0
+; IR-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP15]], i32 [[TMP12]]
+; IR-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
+; IR-NEXT:    [[TMP19:%.*]] = zext i32 [[TMP8]] to i64
+; IR-NEXT:    [[TMP20:%.*]] = mul i64 [[TMP18]], [[TMP19]]
+; IR-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-NEXT:    [[TMP22:%.*]] = lshr i64 [[TMP20]], 32
+; IR-NEXT:    [[TMP23:%.*]] = trunc i64 [[TMP22]] to i32
+; IR-NEXT:    [[TMP24:%.*]] = add i32 [[TMP8]], [[TMP23]]
+; IR-NEXT:    [[TMP25:%.*]] = sub i32 [[TMP8]], [[TMP23]]
+; IR-NEXT:    [[TMP26:%.*]] = select i1 [[TMP16]], i32 [[TMP24]], i32 [[TMP25]]
+; IR-NEXT:    [[TMP27:%.*]] = zext i32 [[TMP26]] to i64
+; IR-NEXT:    [[TMP28:%.*]] = mul i64 [[TMP27]], 1000000
+; IR-NEXT:    [[TMP29:%.*]] = trunc i64 [[TMP28]] to i32
+; IR-NEXT:    [[TMP30:%.*]] = lshr i64 [[TMP28]], 32
+; IR-NEXT:    [[TMP31:%.*]] = trunc i64 [[TMP30]] to i32
+; IR-NEXT:    [[TMP32:%.*]] = mul i32 [[TMP31]], [[TMP4]]
+; IR-NEXT:    [[TMP33:%.*]] = sub i32 1000000, [[TMP32]]
+; IR-NEXT:    [[TMP34:%.*]] = icmp uge i32 [[TMP33]], [[TMP4]]
+; IR-NEXT:    [[TMP35:%.*]] = select i1 [[TMP34]], i32 -1, i32 0
+; IR-NEXT:    [[TMP36:%.*]] = icmp uge i32 1000000, [[TMP32]]
+; IR-NEXT:    [[TMP37:%.*]] = select i1 [[TMP36]], i32 -1, i32 0
+; IR-NEXT:    [[TMP38:%.*]] = and i32 [[TMP35]], [[TMP37]]
+; IR-NEXT:    [[TMP39:%.*]] = icmp eq i32 [[TMP38]], 0
+; IR-NEXT:    [[TMP40:%.*]] = add i32 [[TMP31]], 1
+; IR-NEXT:    [[TMP41:%.*]] = sub i32 [[TMP31]], 1
+; IR-NEXT:    [[TMP42:%.*]] = select i1 [[TMP39]], i32 [[TMP31]], i32 [[TMP40]]
+; IR-NEXT:    [[TMP43:%.*]] = select i1 [[TMP36]], i32 [[TMP42]], i32 [[TMP41]]
+; IR-NEXT:    [[TMP44:%.*]] = xor i32 [[TMP43]], [[TMP2]]
+; IR-NEXT:    [[TMP45:%.*]] = sub i32 [[TMP44]], [[TMP2]]
+; IR-NEXT:    ret i32 [[TMP45]]
+;
+; GCN-LABEL: select_sdiv_lhs_opaque_const0_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_getpc_b64 s[4:5]
+; GCN-NEXT:    s_add_u32 s4, s4, gv at gotpcrel32@lo+4
+; GCN-NEXT:    s_addc_u32 s5, s5, gv at gotpcrel32@hi+4
+; GCN-NEXT:    s_load_dword s4, s[4:5], 0x0
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    s_mov_b32 s6, 0xf4240
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v1, s4
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 5, v1, vcc
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_xor_b32_e32 v0, v0, v1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, v0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v2
+; GCN-NEXT:    v_mul_f32_e32 v2, 0x4f800000, v2
+; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT:    v_mul_lo_u32 v3, v2, v0
+; GCN-NEXT:    v_mul_hi_u32 v4, v2, v0
+; GCN-NEXT:    v_sub_u32_e32 v5, vcc, 0, v3
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
+; GCN-NEXT:    v_mul_hi_u32 v3, v3, v2
+; GCN-NEXT:    v_add_u32_e64 v4, s[4:5], v2, v3
+; GCN-NEXT:    v_sub_u32_e64 v2, s[4:5], v2, v3
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GCN-NEXT:    v_mul_hi_u32 v2, v2, s6
+; GCN-NEXT:    s_mov_b32 s4, 0xf4241
+; GCN-NEXT:    v_mul_lo_u32 v3, v2, v0
+; GCN-NEXT:    v_add_u32_e32 v4, vcc, 1, v2
+; GCN-NEXT:    v_add_u32_e32 v5, vcc, -1, v2
+; GCN-NEXT:    v_sub_u32_e32 v6, vcc, s6, v3
+; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, s4, v3
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v6, v0
+; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v4, s[4:5]
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
+; GCN-NEXT:    v_xor_b32_e32 v0, v0, v1
+; GCN-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %select = select i1 %cond, i32 ptrtoint (i32 addrspace(1)* @gv to i32), i32 5
+  %op = sdiv i32 1000000, %select
+  ret i32 %op
+}
+
+define i32 @select_sdiv_lhs_opaque_const1_i32(i1 %cond) {
+; IR-LABEL: @select_sdiv_lhs_opaque_const1_i32(
+; IR-NEXT:    [[SELECT:%.*]] = select i1 [[COND:%.*]], i32 5, i32 ptrtoint (i32 addrspace(1)* @gv to i32)
+; IR-NEXT:    [[TMP1:%.*]] = ashr i32 [[SELECT]], 31
+; IR-NEXT:    [[TMP2:%.*]] = xor i32 0, [[TMP1]]
+; IR-NEXT:    [[TMP3:%.*]] = add i32 [[SELECT]], [[TMP1]]
+; IR-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1]]
+; IR-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP4]] to float
+; IR-NEXT:    [[TMP6:%.*]] = fdiv fast float 1.000000e+00, [[TMP5]]
+; IR-NEXT:    [[TMP7:%.*]] = fmul fast float [[TMP6]], 0x41F0000000000000
+; IR-NEXT:    [[TMP8:%.*]] = fptoui float [[TMP7]] to i32
+; IR-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
+; IR-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP4]] to i64
+; IR-NEXT:    [[TMP11:%.*]] = mul i64 [[TMP9]], [[TMP10]]
+; IR-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
+; IR-NEXT:    [[TMP13:%.*]] = lshr i64 [[TMP11]], 32
+; IR-NEXT:    [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
+; IR-NEXT:    [[TMP15:%.*]] = sub i32 0, [[TMP12]]
+; IR-NEXT:    [[TMP16:%.*]] = icmp eq i32 [[TMP14]], 0
+; IR-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP15]], i32 [[TMP12]]
+; IR-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
+; IR-NEXT:    [[TMP19:%.*]] = zext i32 [[TMP8]] to i64
+; IR-NEXT:    [[TMP20:%.*]] = mul i64 [[TMP18]], [[TMP19]]
+; IR-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-NEXT:    [[TMP22:%.*]] = lshr i64 [[TMP20]], 32
+; IR-NEXT:    [[TMP23:%.*]] = trunc i64 [[TMP22]] to i32
+; IR-NEXT:    [[TMP24:%.*]] = add i32 [[TMP8]], [[TMP23]]
+; IR-NEXT:    [[TMP25:%.*]] = sub i32 [[TMP8]], [[TMP23]]
+; IR-NEXT:    [[TMP26:%.*]] = select i1 [[TMP16]], i32 [[TMP24]], i32 [[TMP25]]
+; IR-NEXT:    [[TMP27:%.*]] = zext i32 [[TMP26]] to i64
+; IR-NEXT:    [[TMP28:%.*]] = mul i64 [[TMP27]], 1000000
+; IR-NEXT:    [[TMP29:%.*]] = trunc i64 [[TMP28]] to i32
+; IR-NEXT:    [[TMP30:%.*]] = lshr i64 [[TMP28]], 32
+; IR-NEXT:    [[TMP31:%.*]] = trunc i64 [[TMP30]] to i32
+; IR-NEXT:    [[TMP32:%.*]] = mul i32 [[TMP31]], [[TMP4]]
+; IR-NEXT:    [[TMP33:%.*]] = sub i32 1000000, [[TMP32]]
+; IR-NEXT:    [[TMP34:%.*]] = icmp uge i32 [[TMP33]], [[TMP4]]
+; IR-NEXT:    [[TMP35:%.*]] = select i1 [[TMP34]], i32 -1, i32 0
+; IR-NEXT:    [[TMP36:%.*]] = icmp uge i32 1000000, [[TMP32]]
+; IR-NEXT:    [[TMP37:%.*]] = select i1 [[TMP36]], i32 -1, i32 0
+; IR-NEXT:    [[TMP38:%.*]] = and i32 [[TMP35]], [[TMP37]]
+; IR-NEXT:    [[TMP39:%.*]] = icmp eq i32 [[TMP38]], 0
+; IR-NEXT:    [[TMP40:%.*]] = add i32 [[TMP31]], 1
+; IR-NEXT:    [[TMP41:%.*]] = sub i32 [[TMP31]], 1
+; IR-NEXT:    [[TMP42:%.*]] = select i1 [[TMP39]], i32 [[TMP31]], i32 [[TMP40]]
+; IR-NEXT:    [[TMP43:%.*]] = select i1 [[TMP36]], i32 [[TMP42]], i32 [[TMP41]]
+; IR-NEXT:    [[TMP44:%.*]] = xor i32 [[TMP43]], [[TMP2]]
+; IR-NEXT:    [[TMP45:%.*]] = sub i32 [[TMP44]], [[TMP2]]
+; IR-NEXT:    ret i32 [[TMP45]]
+;
+; GCN-LABEL: select_sdiv_lhs_opaque_const1_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_getpc_b64 s[4:5]
+; GCN-NEXT:    s_add_u32 s4, s4, gv at gotpcrel32@lo+4
+; GCN-NEXT:    s_addc_u32 s5, s5, gv at gotpcrel32@hi+4
+; GCN-NEXT:    s_load_dword s4, s[4:5], 0x0
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    s_mov_b32 s6, 0xf4240
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v1, s4
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v1, 5, vcc
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_xor_b32_e32 v0, v0, v1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, v0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v2
+; GCN-NEXT:    v_mul_f32_e32 v2, 0x4f800000, v2
+; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT:    v_mul_lo_u32 v3, v2, v0
+; GCN-NEXT:    v_mul_hi_u32 v4, v2, v0
+; GCN-NEXT:    v_sub_u32_e32 v5, vcc, 0, v3
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
+; GCN-NEXT:    v_mul_hi_u32 v3, v3, v2
+; GCN-NEXT:    v_add_u32_e64 v4, s[4:5], v2, v3
+; GCN-NEXT:    v_sub_u32_e64 v2, s[4:5], v2, v3
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GCN-NEXT:    v_mul_hi_u32 v2, v2, s6
+; GCN-NEXT:    s_mov_b32 s4, 0xf4241
+; GCN-NEXT:    v_mul_lo_u32 v3, v2, v0
+; GCN-NEXT:    v_add_u32_e32 v4, vcc, 1, v2
+; GCN-NEXT:    v_add_u32_e32 v5, vcc, -1, v2
+; GCN-NEXT:    v_sub_u32_e32 v6, vcc, s6, v3
+; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, s4, v3
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v6, v0
+; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v4, s[4:5]
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
+; GCN-NEXT:    v_xor_b32_e32 v0, v0, v1
+; GCN-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %select = select i1 %cond, i32 5, i32 ptrtoint (i32 addrspace(1)* @gv to i32)
+  %op = sdiv i32 1000000, %select
+  ret i32 %op
+}
+
+define i32 @select_sdiv_rhs_opaque_const0_i32(i1 %cond) {
+; IR-LABEL: @select_sdiv_rhs_opaque_const0_i32(
+; IR-NEXT:    [[SELECT:%.*]] = select i1 [[COND:%.*]], i32 ptrtoint (i32 addrspace(1)* @gv to i32), i32 234234
+; IR-NEXT:    [[OP:%.*]] = sdiv i32 [[SELECT]], 42
+; IR-NEXT:    ret i32 [[OP]]
+;
+; GCN-LABEL: select_sdiv_rhs_opaque_const0_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_getpc_b64 s[4:5]
+; GCN-NEXT:    s_add_u32 s4, s4, gv at gotpcrel32@lo+4
+; GCN-NEXT:    s_addc_u32 s5, s5, gv at gotpcrel32@hi+4
+; GCN-NEXT:    s_load_dword s4, s[4:5], 0x0
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x392fa
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    s_mov_b32 s5, 0x30c30c31
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v2, s4
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
+; GCN-NEXT:    v_mul_hi_i32 v0, v0, s5
+; GCN-NEXT:    v_lshrrev_b32_e32 v1, 31, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, 3, v0
+; GCN-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %select = select i1 %cond, i32 ptrtoint (i32 addrspace(1)* @gv to i32), i32 234234
+  %op = sdiv i32 %select, 42
+  ret i32 %op
+}
+
+define i32 @select_sdiv_rhs_opaque_const1_i32(i1 %cond) {
+; IR-LABEL: @select_sdiv_rhs_opaque_const1_i32(
+; IR-NEXT:    [[SELECT:%.*]] = select i1 [[COND:%.*]], i32 42000, i32 ptrtoint (i32 addrspace(1)* @gv to i32)
+; IR-NEXT:    [[OP:%.*]] = sdiv i32 [[SELECT]], 42
+; IR-NEXT:    ret i32 [[OP]]
+;
+; GCN-LABEL: select_sdiv_rhs_opaque_const1_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_getpc_b64 s[4:5]
+; GCN-NEXT:    s_add_u32 s4, s4, gv at gotpcrel32@lo+4
+; GCN-NEXT:    s_addc_u32 s5, s5, gv at gotpcrel32@hi+4
+; GCN-NEXT:    s_load_dword s4, s[4:5], 0x0
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0xa410
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    s_mov_b32 s5, 0x30c30c31
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v2, s4
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; GCN-NEXT:    v_mul_hi_i32 v0, v0, s5
+; GCN-NEXT:    v_lshrrev_b32_e32 v1, 31, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, 3, v0
+; GCN-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %select = select i1 %cond, i32 42000, i32 ptrtoint (i32 addrspace(1)* @gv to i32)
+  %op = sdiv i32 %select, 42
+  ret i32 %op
+}
+
+define i32 @select_add_lhs_const_i32(i1 %cond) {
+; IR-LABEL: @select_add_lhs_const_i32(
+; IR-NEXT:    [[OP:%.*]] = select i1 [[COND:%.*]], i32 1000005, i32 1000008
+; IR-NEXT:    ret i32 [[OP]]
+;
+; GCN-LABEL: select_add_lhs_const_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0xf4248
+; GCN-NEXT:    v_mov_b32_e32 v2, 0xf4245
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %select = select i1 %cond, i32 5, i32 8
+  %op = add i32 1000000, %select
+  ret i32 %op
+}
+
+define float @select_fadd_lhs_const_i32_fmf(i1 %cond) {
+; IR-LABEL: @select_fadd_lhs_const_i32_fmf(
+; IR-NEXT:    [[OP:%.*]] = select nnan nsz i1 [[COND:%.*]], float 3.000000e+00, float 5.000000e+00
+; IR-NEXT:    ret float [[OP]]
+; GCN-LABEL: select_fadd_lhs_const_i32_fmf:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x40a00000
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x40400000
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %select = select i1 %cond, float 2.0, float 4.0
+  %op = fadd nnan nsz float 1.0, %select
+  ret float %op
+}
+
+; Make sure we don't try to use mul24 instead
+define i32 @select_mul_lhs_const_i32(i1 %cond) {
+; GCN-LABEL: select_mul_lhs_const_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x1f40
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x1388
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+; IR-LABEL: @select_mul_lhs_const_i32(
+; IR-NEXT:    [[OP:%.*]] = select i1 [[COND:%.*]], i32 5000, i32 8000
+; IR-NEXT:    ret i32 [[OP]]
+  %select = select i1 %cond, i32 5, i32 8
+  %op = mul i32 1000, %select
+  ret i32 %op
+}
+
+; Make sure we don't try to use mul24 instead
+define i32 @select_mul_rhs_const_i32(i1 %cond) {
+; GCN-LABEL: select_mul_rhs_const_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x1f40
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x1388
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+; IR-LABEL: @select_mul_rhs_const_i32(
+; IR-NEXT:    [[OP:%.*]] = select i1 [[COND:%.*]], i32 5000, i32 8000
+; IR-NEXT:    ret i32 [[OP]]
+  %select = select i1 %cond, i32 5, i32 8
+  %op = mul i32 %select, 1000
+  ret i32 %op
+}
+
+; FIXME: Truncate from promoted select blocks this.
+define amdgpu_kernel void @select_add_lhs_const_i16(i1 %cond) {
+; GCN-LABEL: select_add_lhs_const_i16:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s0, s[4:5], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v0, 0x83
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x80
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b32 s0, 1, s0
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 1
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT:    flat_store_short v[0:1], v0
+; GCN-NEXT:    s_endpgm
+; IR-LABEL: @select_add_lhs_const_i16(
+; IR-NEXT:    [[TMP1:%.*]] = select i1 [[COND:%.*]], i32 5, i32 8
+; IR-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+; IR-NEXT:    [[TMP3:%.*]] = zext i16 [[TMP2]] to i32
+; IR-NEXT:    [[TMP4:%.*]] = add nuw nsw i32 [[TMP3]], 123
+; IR-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
+; IR-NEXT:    store i16 [[TMP5]], i16 addrspace(1)* undef
+; IR-NEXT:    ret void
+  %select = select i1 %cond, i16 5, i16 8
+  %op = add i16 %select, 123
+  store i16 %op, i16 addrspace(1)* undef
+  ret void
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
index 85f198702d0e..05e971124f1f 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
@@ -155,18 +155,27 @@ define amdgpu_kernel void @sel_constants_sub_constant_sel_constants_v4i32(<4 x i
   ret void
 }
 
-; GCN-LABEL: {{^}}sdiv_constant_sel_constants:
+; GCN-LABEL: {{^}}sdiv_constant_sel_constants_i64:
 ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 5, 0,
-define amdgpu_kernel void @sdiv_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) {
+define amdgpu_kernel void @sdiv_constant_sel_constants_i64(i64 addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, i64 121, i64 23
   %bo = sdiv i64 120, %sel
   store i64 %bo, i64 addrspace(1)* %p, align 8
   ret void
 }
 
-; GCN-LABEL: {{^}}udiv_constant_sel_constants:
+; GCN-LABEL: {{^}}sdiv_constant_sel_constants_i32:
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 8, 26,
+define amdgpu_kernel void @sdiv_constant_sel_constants_i32(i32 addrspace(1)* %p, i1 %cond) {
+  %sel = select i1 %cond, i32 7, i32 23
+  %bo = sdiv i32 184, %sel
+  store i32 %bo, i32 addrspace(1)* %p, align 8
+  ret void
+}
+
+; GCN-LABEL: {{^}}udiv_constant_sel_constants_i64:
 ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 5, 0,
-define amdgpu_kernel void @udiv_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) {
+define amdgpu_kernel void @udiv_constant_sel_constants_i64(i64 addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, i64 -4, i64 23
   %bo = udiv i64 120, %sel
   store i64 %bo, i64 addrspace(1)* %p, align 8

diff  --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
index 3dc547dd63c8..13b320567b59 100644
--- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
@@ -439,8 +439,8 @@ define amdgpu_kernel void @add_select_negliteralk_negliteralk_f32(i32 %c) #0 {
 ; GCN-LABEL: {{^}}add_select_fneg_negk_negk_f32:
 ; GCN: buffer_load_dword [[X:v[0-9]+]]
 
-; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -1.0, -2.0, s
-; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[X]], [[SELECT]]
+; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], 1.0, 2.0, s
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[X]]
 define amdgpu_kernel void @add_select_fneg_negk_negk_f32(i32 %c) #0 {
   %x = load volatile float, float addrspace(1)* undef
   %cmp = icmp eq i32 %c, 0


        


More information about the llvm-commits mailing list