[llvm] r335868 - [AMDGPU] Early expansion of 32 bit udiv/urem

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 28 08:59:18 PDT 2018


Author: rampitec
Date: Thu Jun 28 08:59:18 2018
New Revision: 335868

URL: http://llvm.org/viewvc/llvm-project?rev=335868&view=rev
Log:
[AMDGPU] Early expansion of 32 bit udiv/urem

This allows hoisting of a common code, for instance if denominator
is loop invariant. Current change is expansion only, adding licm to
the target pass list going to be a separate patch. Given this patch
changes to codegen are minor as the expansion is similar to that on
DAG. DAG expansion still must remain for R600.

Differential Revision: https://reviews.llvm.org/D48586

Added:
    llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
    llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll
    llvm/trunk/test/CodeGen/AMDGPU/dagcombine-select.ll

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp?rev=335868&r1=335867&r2=335868&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp Thu Jun 28 08:59:18 2018
@@ -19,6 +19,7 @@
 #include "llvm/ADT/StringRef.h"
 #include "llvm/Analysis/DivergenceAnalysis.h"
 #include "llvm/Analysis/Loads.h"
+#include "llvm/Analysis/ValueTracking.h"
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/CodeGen/TargetPassConfig.h"
 #include "llvm/IR/Attributes.h"
@@ -131,6 +132,15 @@ class AMDGPUCodeGenPrepare : public Func
   ///
   /// \returns True.
   bool promoteUniformBitreverseToI32(IntrinsicInst &I) const;
+
+  /// Expands 24 bit div or rem.
+  Value* expandDivRem24(IRBuilder<> &Builder, Value *Num, Value *Den,
+                        bool IsDiv, bool IsSigned) const;
+
+  /// Expands 32 bit div or rem.
+  Value* expandDivRem32(IRBuilder<> &Builder, Instruction::BinaryOps Opc,
+                        Value *Num, Value *Den) const;
+
   /// Widen a scalar load.
   ///
   /// \details \p Widen scalar load for uniform, small type loads from constant
@@ -256,7 +266,9 @@ bool AMDGPUCodeGenPrepare::promoteUnifor
          "I does not need promotion to i32");
 
   if (I.getOpcode() == Instruction::SDiv ||
-      I.getOpcode() == Instruction::UDiv)
+      I.getOpcode() == Instruction::UDiv ||
+      I.getOpcode() == Instruction::SRem ||
+      I.getOpcode() == Instruction::URem)
     return false;
 
   IRBuilder<> Builder(&I);
@@ -467,12 +479,312 @@ static bool hasUnsafeFPMath(const Functi
   return Attr.getValueAsString() == "true";
 }
 
+static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
+                                          Value *LHS, Value *RHS) {
+  Type *I32Ty = Builder.getInt32Ty();
+  Type *I64Ty = Builder.getInt64Ty();
+
+  Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty);
+  Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty);
+  Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64);
+  Value *Lo = Builder.CreateTrunc(MUL64, I32Ty);
+  Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32));
+  Hi = Builder.CreateTrunc(Hi, I32Ty);
+  return std::make_pair(Lo, Hi);
+}
+
+static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
+  return getMul64(Builder, LHS, RHS).second;
+}
+
+// The fractional part of a float is enough to accurately represent up to
+// a 24-bit signed integer.
+Value* AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder,
+                                            Value *Num, Value *Den,
+                                            bool IsDiv, bool IsSigned) const {
+  assert(Num->getType()->isIntegerTy(32));
+
+  const DataLayout &DL = Mod->getDataLayout();
+  unsigned LHSSignBits = ComputeNumSignBits(Num, DL);
+  if (LHSSignBits < 9)
+    return nullptr;
+
+  unsigned RHSSignBits = ComputeNumSignBits(Den, DL);
+  if (RHSSignBits < 9)
+    return nullptr;
+
+
+  unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
+  unsigned DivBits = 32 - SignBits;
+  if (IsSigned)
+    ++DivBits;
+
+  Type *Ty = Num->getType();
+  Type *I32Ty = Builder.getInt32Ty();
+  Type *F32Ty = Builder.getFloatTy();
+  ConstantInt *One = Builder.getInt32(1);
+  Value *JQ = One;
+
+  if (IsSigned) {
+    // char|short jq = ia ^ ib;
+    JQ = Builder.CreateXor(Num, Den);
+
+    // jq = jq >> (bitsize - 2)
+    JQ = Builder.CreateAShr(JQ, Builder.getInt32(30));
+
+    // jq = jq | 0x1
+    JQ = Builder.CreateOr(JQ, One);
+  }
+
+  // int ia = (int)LHS;
+  Value *IA = Num;
+
+  // int ib, (int)RHS;
+  Value *IB = Den;
+
+  // float fa = (float)ia;
+  Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty)
+                       : Builder.CreateUIToFP(IA, F32Ty);
+
+  // float fb = (float)ib;
+  Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
+                       : Builder.CreateUIToFP(IB,F32Ty);
+
+  Value *RCP = Builder.CreateFDiv(ConstantFP::get(F32Ty, 1.0), FB);
+  Value *FQM = Builder.CreateFMul(FA, RCP);
+
+  // fq = trunc(fqm);
+  CallInst* FQ = Builder.CreateIntrinsic(Intrinsic::trunc, { FQM });
+  FQ->copyFastMathFlags(Builder.getFastMathFlags());
+
+  // float fqneg = -fq;
+  Value *FQNeg = Builder.CreateFNeg(FQ);
+
+  // float fr = mad(fqneg, fb, fa);
+  Value *FR = Builder.CreateIntrinsic(Intrinsic::amdgcn_fmad_ftz,
+                                      { FQNeg, FB, FA }, FQ);
+
+  // int iq = (int)fq;
+  Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
+                       : Builder.CreateFPToUI(FQ, I32Ty);
+
+  // fr = fabs(fr);
+  FR = Builder.CreateIntrinsic(Intrinsic::fabs, { FR }, FQ);
+
+  // fb = fabs(fb);
+  FB = Builder.CreateIntrinsic(Intrinsic::fabs, { FB }, FQ);
+
+  // int cv = fr >= fb;
+  Value *CV = Builder.CreateFCmpOGE(FR, FB);
+
+  // jq = (cv ? jq : 0);
+  JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0));
+
+  // dst = iq + jq;
+  Value *Div = Builder.CreateAdd(IQ, JQ);
+
+  Value *Res = Div;
+  if (!IsDiv) {
+    // Rem needs compensation, it's easier to recompute it
+    Value *Rem = Builder.CreateMul(Div, Den);
+    Res = Builder.CreateSub(Num, Rem);
+  }
+
+  // Truncate to number of bits this divide really is.
+  if (IsSigned) {
+    Res = Builder.CreateTrunc(Res, Builder.getIntNTy(DivBits));
+    Res = Builder.CreateSExt(Res, Ty);
+  } else {
+    ConstantInt *TruncMask = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
+    Res = Builder.CreateAnd(Res, TruncMask);
+  }
+
+  return Res;
+}
+
+Value* AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder,
+                                            Instruction::BinaryOps Opc,
+                                            Value *Num, Value *Den) const {
+  assert(Opc == Instruction::URem || Opc == Instruction::UDiv ||
+         Opc == Instruction::SRem || Opc == Instruction::SDiv);
+
+  FastMathFlags FMF;
+  FMF.setFast();
+  Builder.setFastMathFlags(FMF);
+
+  if (isa<Constant>(Den))
+    return nullptr; // Keep it for optimization
+
+  bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv;
+  bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv;
+
+  Type *Ty = Num->getType();
+  Type *I32Ty = Builder.getInt32Ty();
+  Type *F32Ty = Builder.getFloatTy();
+
+  if (Ty->getScalarSizeInBits() < 32) {
+    if (IsSigned) {
+      Num = Builder.CreateSExt(Num, I32Ty);
+      Den = Builder.CreateSExt(Den, I32Ty);
+    } else {
+      Num = Builder.CreateZExt(Num, I32Ty);
+      Den = Builder.CreateZExt(Den, I32Ty);
+    }
+  }
+
+  if (Value *Res = expandDivRem24(Builder, Num, Den, IsDiv, IsSigned)) {
+    Res = Builder.CreateTrunc(Res, Ty);
+    return Res;
+  }
+
+  ConstantInt *Zero = Builder.getInt32(0);
+  ConstantInt *One = Builder.getInt32(1);
+  ConstantInt *MinusOne = Builder.getInt32(~0);
+
+  Value *Sign = nullptr;
+  if (IsSigned) {
+    ConstantInt *K31 = Builder.getInt32(31);
+    Value *LHSign = Builder.CreateAShr(Num, K31);
+    Value *RHSign = Builder.CreateAShr(Den, K31);
+    // Remainder sign is the same as LHS
+    Sign = IsDiv ? Builder.CreateXor(LHSign, RHSign) : LHSign;
+
+    Num = Builder.CreateAdd(Num, LHSign);
+    Den = Builder.CreateAdd(Den, RHSign);
+
+    Num = Builder.CreateXor(Num, LHSign);
+    Den = Builder.CreateXor(Den, RHSign);
+  }
+
+  // RCP =  URECIP(Den) = 2^32 / Den + e
+  // e is rounding error.
+  Value *DEN_F32 = Builder.CreateUIToFP(Den, F32Ty);
+  Value *RCP_F32 = Builder.CreateFDiv(ConstantFP::get(F32Ty, 1.0), DEN_F32);
+  Constant *UINT_MAX_PLUS_1 = ConstantFP::get(F32Ty, BitsToFloat(0x4f800000));
+  Value *RCP_SCALE = Builder.CreateFMul(RCP_F32, UINT_MAX_PLUS_1);
+  Value *RCP = Builder.CreateFPToUI(RCP_SCALE, I32Ty);
+
+  // RCP_LO, RCP_HI = mul(RCP, Den) */
+  Value *RCP_LO, *RCP_HI;
+  std::tie(RCP_LO, RCP_HI) = getMul64(Builder, RCP, Den);
+
+  // NEG_RCP_LO = -RCP_LO
+  Value *NEG_RCP_LO = Builder.CreateNeg(RCP_LO);
+
+  // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
+  Value *RCP_HI_0_CC = Builder.CreateICmpEQ(RCP_HI, Zero);
+  Value *ABS_RCP_LO = Builder.CreateSelect(RCP_HI_0_CC, NEG_RCP_LO, RCP_LO);
+
+  // Calculate the rounding error from the URECIP instruction
+  // E = mulhu(ABS_RCP_LO, RCP)
+  Value *E = getMulHu(Builder, ABS_RCP_LO, RCP);
+
+  // RCP_A_E = RCP + E
+  Value *RCP_A_E = Builder.CreateAdd(RCP, E);
+
+  // RCP_S_E = RCP - E
+  Value *RCP_S_E = Builder.CreateSub(RCP, E);
+
+  // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
+  Value *Tmp0 = Builder.CreateSelect(RCP_HI_0_CC, RCP_A_E, RCP_S_E);
+
+  // Quotient = mulhu(Tmp0, Num)
+  Value *Quotient = getMulHu(Builder, Tmp0, Num);
+
+  // Num_S_Remainder = Quotient * Den
+  Value *Num_S_Remainder = Builder.CreateMul(Quotient, Den);
+
+  // Remainder = Num - Num_S_Remainder
+  Value *Remainder = Builder.CreateSub(Num, Num_S_Remainder);
+
+  // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
+  Value *Rem_GE_Den_CC = Builder.CreateICmpUGE(Remainder, Den);
+  Value *Remainder_GE_Den = Builder.CreateSelect(Rem_GE_Den_CC, MinusOne, Zero);
+
+  // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
+  Value *Num_GE_Num_S_Rem_CC = Builder.CreateICmpUGE(Num, Num_S_Remainder);
+  Value *Remainder_GE_Zero = Builder.CreateSelect(Num_GE_Num_S_Rem_CC,
+                                                  MinusOne, Zero);
+
+  // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
+  Value *Tmp1 = Builder.CreateAnd(Remainder_GE_Den, Remainder_GE_Zero);
+  Value *Tmp1_0_CC = Builder.CreateICmpEQ(Tmp1, Zero);
+
+  Value *Res;
+  if (IsDiv) {
+    // Quotient_A_One = Quotient + 1
+    Value *Quotient_A_One = Builder.CreateAdd(Quotient, One);
+
+    // Quotient_S_One = Quotient - 1
+    Value *Quotient_S_One = Builder.CreateSub(Quotient, One);
+
+    // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
+    Value *Div = Builder.CreateSelect(Tmp1_0_CC, Quotient, Quotient_A_One);
+
+    // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
+    Res = Builder.CreateSelect(Num_GE_Num_S_Rem_CC, Div, Quotient_S_One);
+  } else {
+    // Remainder_S_Den = Remainder - Den
+    Value *Remainder_S_Den = Builder.CreateSub(Remainder, Den);
+
+    // Remainder_A_Den = Remainder + Den
+    Value *Remainder_A_Den = Builder.CreateAdd(Remainder, Den);
+
+    // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
+    Value *Rem = Builder.CreateSelect(Tmp1_0_CC, Remainder, Remainder_S_Den);
+
+    // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
+    Res = Builder.CreateSelect(Num_GE_Num_S_Rem_CC, Rem, Remainder_A_Den);
+  }
+
+  if (IsSigned) {
+    Res = Builder.CreateXor(Res, Sign);
+    Res = Builder.CreateSub(Res, Sign);
+  }
+
+  Res = Builder.CreateTrunc(Res, Ty);
+
+  return Res;
+}
+
 bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
+  if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
+      DA->isUniform(&I) && promoteUniformOpToI32(I))
+    return true;
+
   bool Changed = false;
+  Instruction::BinaryOps Opc = I.getOpcode();
+  Type *Ty = I.getType();
+  Value *NewDiv = nullptr;
+  if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
+       Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
+      Ty->getScalarSizeInBits() <= 32) {
+    Value *Num = I.getOperand(0);
+    Value *Den = I.getOperand(1);
+    IRBuilder<> Builder(&I);
+    Builder.SetCurrentDebugLocation(I.getDebugLoc());
+
+    if (VectorType *VT = dyn_cast<VectorType>(Ty)) {
+      NewDiv = UndefValue::get(VT);
+
+      for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) {
+        Value *NumEltI = Builder.CreateExtractElement(Num, I);
+        Value *DenEltI = Builder.CreateExtractElement(Den, I);
+        Value *NewElt = expandDivRem32(Builder, Opc, NumEltI, DenEltI);
+        if (!NewElt)
+          NewElt = Builder.CreateBinOp(Opc, NumEltI, DenEltI);
+        NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, I);
+      }
+    } else {
+      NewDiv = expandDivRem32(Builder, Opc, Num, Den);
+    }
 
-  if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
-      DA->isUniform(&I))
-    Changed |= promoteUniformOpToI32(I);
+    if (NewDiv) {
+      I.replaceAllUsesWith(NewDiv);
+      I.eraseFromParent();
+      Changed = true;
+    }
+  }
 
   return Changed;
 }

Modified: llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll?rev=335868&r1=335867&r2=335868&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll Thu Jun 28 08:59:18 2018
@@ -169,34 +169,6 @@ define amdgpu_kernel void @mul_nuw_nsw_i
   ret void
 }
 
-; GCN-LABEL: @urem_i3(
-; SI: %r = urem i3 %a, %b
-; SI-NEXT: store volatile i3 %r
-; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
-; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = urem i32 %[[A_32]], %[[B_32]]
-; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
-; VI-NEXT: store volatile i3 %[[R_3]]
-define amdgpu_kernel void @urem_i3(i3 %a, i3 %b) {
-  %r = urem i3 %a, %b
-  store volatile i3 %r, i3 addrspace(1)* undef
-  ret void
-}
-
-; GCN-LABEL: @srem_i3(
-; SI: %r = srem i3 %a, %b
-; SI-NEXT: store volatile i3 %r
-; VI: %[[A_32:[0-9]+]] = sext i3 %a to i32
-; VI-NEXT: %[[B_32:[0-9]+]] = sext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = srem i32 %[[A_32]], %[[B_32]]
-; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
-; VI-NEXT: store volatile i3 %[[R_3]]
-define amdgpu_kernel void @srem_i3(i3 %a, i3 %b) {
-  %r = srem i3 %a, %b
-  store volatile i3 %r, i3 addrspace(1)* undef
-  ret void
-}
-
 ; GCN-LABEL: @shl_i3(
 ; SI: %r = shl i3 %a, %b
 ; SI-NEXT: store volatile i3 %r
@@ -748,34 +720,6 @@ define amdgpu_kernel void @mul_nuw_nsw_i
   ret void
 }
 
-; GCN-LABEL: @urem_i16(
-; SI: %r = urem i16 %a, %b
-; SI-NEXT: store volatile i16 %r
-; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
-; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = urem i32 %[[A_32]], %[[B_32]]
-; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
-; VI-NEXT: store volatile i16 %[[R_16]]
-define amdgpu_kernel void @urem_i16(i16 %a, i16 %b) {
-  %r = urem i16 %a, %b
-  store volatile i16 %r, i16 addrspace(1)* undef
-  ret void
-}
-
-; GCN-LABEL: @srem_i16(
-; SI: %r = srem i16 %a, %b
-; SI-NEXT: store volatile i16 %r
-; VI: %[[A_32:[0-9]+]] = sext i16 %a to i32
-; VI-NEXT: %[[B_32:[0-9]+]] = sext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = srem i32 %[[A_32]], %[[B_32]]
-; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
-; VI-NEXT: store volatile i16 %[[R_16]]
-define amdgpu_kernel void @srem_i16(i16 %a, i16 %b) {
-  %r = srem i16 %a, %b
-  store volatile i16 %r, i16 addrspace(1)* undef
-  ret void
-}
-
 ; GCN-LABEL: @shl_i16(
 ; SI: %r = shl i16 %a, %b
 ; SI-NEXT: store volatile i16 %r
@@ -1312,34 +1256,6 @@ define amdgpu_kernel void @mul_nuw_nsw_3
   ret void
 }
 
-; GCN-LABEL: @urem_3xi15(
-; SI: %r = urem <3 x i15> %a, %b
-; SI-NEXT: store volatile <3 x i15> %r
-; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
-; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = urem <3 x i32> %[[A_32]], %[[B_32]]
-; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
-; VI-NEXT: store volatile <3 x i15> %[[R_15]]
-define amdgpu_kernel void @urem_3xi15(<3 x i15> %a, <3 x i15> %b) {
-  %r = urem <3 x i15> %a, %b
-  store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef
-  ret void
-}
-
-; GCN-LABEL: @srem_3xi15(
-; SI: %r = srem <3 x i15> %a, %b
-; SI-NEXT: store volatile <3 x i15> %r
-; VI: %[[A_32:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
-; VI-NEXT: %[[B_32:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = srem <3 x i32> %[[A_32]], %[[B_32]]
-; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
-; VI-NEXT: store volatile <3 x i15> %[[R_15]]
-define amdgpu_kernel void @srem_3xi15(<3 x i15> %a, <3 x i15> %b) {
-  %r = srem <3 x i15> %a, %b
-  store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef
-  ret void
-}
-
 ; GCN-LABEL: @shl_3xi15(
 ; SI: %r = shl <3 x i15> %a, %b
 ; SI-NEXT: store volatile <3 x i15> %r
@@ -1866,34 +1782,6 @@ define amdgpu_kernel void @mul_nuw_nsw_3
   store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef
   ret void
 }
-
-; GCN-LABEL: @urem_3xi16(
-; SI: %r = urem <3 x i16> %a, %b
-; SI-NEXT: store volatile <3 x i16> %r
-; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
-; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = urem <3 x i32> %[[A_32]], %[[B_32]]
-; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
-; VI-NEXT: store volatile <3 x i16> %[[R_16]]
-define amdgpu_kernel void @urem_3xi16(<3 x i16> %a, <3 x i16> %b) {
-  %r = urem <3 x i16> %a, %b
-  store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef
-  ret void
-}
-
-; GCN-LABEL: @srem_3xi16(
-; SI: %r = srem <3 x i16> %a, %b
-; SI-NEXT: store volatile <3 x i16> %r
-; VI: %[[A_32:[0-9]+]] = sext <3 x i16> %a to <3 x i32>
-; VI-NEXT: %[[B_32:[0-9]+]] = sext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = srem <3 x i32> %[[A_32]], %[[B_32]]
-; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
-; VI-NEXT: store volatile <3 x i16> %[[R_16]]
-define amdgpu_kernel void @srem_3xi16(<3 x i16> %a, <3 x i16> %b) {
-  %r = srem <3 x i16> %a, %b
-  store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef
-  ret void
-}
 
 ; GCN-LABEL: @shl_3xi16(
 ; SI: %r = shl <3 x i16> %a, %b

Added: llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll?rev=335868&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll Thu Jun 28 08:59:18 2018
@@ -0,0 +1,2415 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-- -amdgpu-codegenprepare %s | FileCheck %s
+
+define amdgpu_kernel void @udiv_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
+; CHECK-LABEL: @udiv_i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = uitofp i32 [[Y:%.*]] to float
+; CHECK-NEXT:    [[TMP2:%.*]] = fdiv fast float 1.000000e+00, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast float [[TMP2]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP4:%.*]] = fptoui float [[TMP3]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = zext i32 [[Y]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = mul i64 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32
+; CHECK-NEXT:    [[TMP9:%.*]] = lshr i64 [[TMP7]], 32
+; CHECK-NEXT:    [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP11:%.*]] = sub i32 0, [[TMP8]]
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i32 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP12]], i32 [[TMP11]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP4]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = mul i64 [[TMP14]], [[TMP15]]
+; CHECK-NEXT:    [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; CHECK-NEXT:    [[TMP18:%.*]] = lshr i64 [[TMP16]], 32
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP4]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = sub i32 [[TMP4]], [[TMP19]]
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP12]], i32 [[TMP20]], i32 [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = zext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = zext i32 [[X:%.*]] to i64
+; CHECK-NEXT:    [[TMP25:%.*]] = mul i64 [[TMP23]], [[TMP24]]
+; CHECK-NEXT:    [[TMP26:%.*]] = trunc i64 [[TMP25]] to i32
+; CHECK-NEXT:    [[TMP27:%.*]] = lshr i64 [[TMP25]], 32
+; CHECK-NEXT:    [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; CHECK-NEXT:    [[TMP29:%.*]] = mul i32 [[TMP28]], [[Y]]
+; CHECK-NEXT:    [[TMP30:%.*]] = sub i32 [[X]], [[TMP29]]
+; CHECK-NEXT:    [[TMP31:%.*]] = icmp uge i32 [[TMP30]], [[Y]]
+; CHECK-NEXT:    [[TMP32:%.*]] = select i1 [[TMP31]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP33:%.*]] = icmp uge i32 [[X]], [[TMP29]]
+; CHECK-NEXT:    [[TMP34:%.*]] = select i1 [[TMP33]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP35:%.*]] = and i32 [[TMP32]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = icmp eq i32 [[TMP35]], 0
+; CHECK-NEXT:    [[TMP37:%.*]] = add i32 [[TMP28]], 1
+; CHECK-NEXT:    [[TMP38:%.*]] = sub i32 [[TMP28]], 1
+; CHECK-NEXT:    [[TMP39:%.*]] = select i1 [[TMP36]], i32 [[TMP28]], i32 [[TMP37]]
+; CHECK-NEXT:    [[TMP40:%.*]] = select i1 [[TMP33]], i32 [[TMP39]], i32 [[TMP38]]
+; CHECK-NEXT:    store i32 [[TMP40]], i32 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = udiv i32 %x, %y
+  store i32 %r, i32 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @urem_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
+; CHECK-LABEL: @urem_i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = uitofp i32 [[Y:%.*]] to float
+; CHECK-NEXT:    [[TMP2:%.*]] = fdiv fast float 1.000000e+00, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast float [[TMP2]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP4:%.*]] = fptoui float [[TMP3]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = zext i32 [[Y]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = mul i64 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32
+; CHECK-NEXT:    [[TMP9:%.*]] = lshr i64 [[TMP7]], 32
+; CHECK-NEXT:    [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP11:%.*]] = sub i32 0, [[TMP8]]
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i32 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP12]], i32 [[TMP11]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP4]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = mul i64 [[TMP14]], [[TMP15]]
+; CHECK-NEXT:    [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; CHECK-NEXT:    [[TMP18:%.*]] = lshr i64 [[TMP16]], 32
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP4]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = sub i32 [[TMP4]], [[TMP19]]
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP12]], i32 [[TMP20]], i32 [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = zext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = zext i32 [[X:%.*]] to i64
+; CHECK-NEXT:    [[TMP25:%.*]] = mul i64 [[TMP23]], [[TMP24]]
+; CHECK-NEXT:    [[TMP26:%.*]] = trunc i64 [[TMP25]] to i32
+; CHECK-NEXT:    [[TMP27:%.*]] = lshr i64 [[TMP25]], 32
+; CHECK-NEXT:    [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; CHECK-NEXT:    [[TMP29:%.*]] = mul i32 [[TMP28]], [[Y]]
+; CHECK-NEXT:    [[TMP30:%.*]] = sub i32 [[X]], [[TMP29]]
+; CHECK-NEXT:    [[TMP31:%.*]] = icmp uge i32 [[TMP30]], [[Y]]
+; CHECK-NEXT:    [[TMP32:%.*]] = select i1 [[TMP31]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP33:%.*]] = icmp uge i32 [[X]], [[TMP29]]
+; CHECK-NEXT:    [[TMP34:%.*]] = select i1 [[TMP33]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP35:%.*]] = and i32 [[TMP32]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = icmp eq i32 [[TMP35]], 0
+; CHECK-NEXT:    [[TMP37:%.*]] = sub i32 [[TMP30]], [[Y]]
+; CHECK-NEXT:    [[TMP38:%.*]] = add i32 [[TMP30]], [[Y]]
+; CHECK-NEXT:    [[TMP39:%.*]] = select i1 [[TMP36]], i32 [[TMP30]], i32 [[TMP37]]
+; CHECK-NEXT:    [[TMP40:%.*]] = select i1 [[TMP33]], i32 [[TMP39]], i32 [[TMP38]]
+; CHECK-NEXT:    store i32 [[TMP40]], i32 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = urem i32 %x, %y
+  store i32 %r, i32 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @sdiv_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
+; CHECK-LABEL: @sdiv_i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr i32 [[X:%.*]], 31
+; CHECK-NEXT:    [[TMP2:%.*]] = ashr i32 [[Y:%.*]], 31
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = add i32 [[X]], [[TMP1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[Y]], [[TMP2]]
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i32 [[TMP4]], [[TMP1]]
+; CHECK-NEXT:    [[TMP7:%.*]] = xor i32 [[TMP5]], [[TMP2]]
+; CHECK-NEXT:    [[TMP8:%.*]] = uitofp i32 [[TMP7]] to float
+; CHECK-NEXT:    [[TMP9:%.*]] = fdiv fast float 1.000000e+00, [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = fmul fast float [[TMP9]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP11:%.*]] = fptoui float [[TMP10]] to i32
+; CHECK-NEXT:    [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT:    [[TMP14:%.*]] = mul i64 [[TMP12]], [[TMP13]]
+; CHECK-NEXT:    [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
+; CHECK-NEXT:    [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; CHECK-NEXT:    [[TMP18:%.*]] = sub i32 0, [[TMP15]]
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp eq i32 [[TMP17]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP18]], i32 [[TMP15]]
+; CHECK-NEXT:    [[TMP21:%.*]] = zext i32 [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT:    [[TMP23:%.*]] = mul i64 [[TMP21]], [[TMP22]]
+; CHECK-NEXT:    [[TMP24:%.*]] = trunc i64 [[TMP23]] to i32
+; CHECK-NEXT:    [[TMP25:%.*]] = lshr i64 [[TMP23]], 32
+; CHECK-NEXT:    [[TMP26:%.*]] = trunc i64 [[TMP25]] to i32
+; CHECK-NEXT:    [[TMP27:%.*]] = add i32 [[TMP11]], [[TMP26]]
+; CHECK-NEXT:    [[TMP28:%.*]] = sub i32 [[TMP11]], [[TMP26]]
+; CHECK-NEXT:    [[TMP29:%.*]] = select i1 [[TMP19]], i32 [[TMP27]], i32 [[TMP28]]
+; CHECK-NEXT:    [[TMP30:%.*]] = zext i32 [[TMP29]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT:    [[TMP32:%.*]] = mul i64 [[TMP30]], [[TMP31]]
+; CHECK-NEXT:    [[TMP33:%.*]] = trunc i64 [[TMP32]] to i32
+; CHECK-NEXT:    [[TMP34:%.*]] = lshr i64 [[TMP32]], 32
+; CHECK-NEXT:    [[TMP35:%.*]] = trunc i64 [[TMP34]] to i32
+; CHECK-NEXT:    [[TMP36:%.*]] = mul i32 [[TMP35]], [[TMP7]]
+; CHECK-NEXT:    [[TMP37:%.*]] = sub i32 [[TMP6]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = icmp uge i32 [[TMP37]], [[TMP7]]
+; CHECK-NEXT:    [[TMP39:%.*]] = select i1 [[TMP38]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP40:%.*]] = icmp uge i32 [[TMP6]], [[TMP36]]
+; CHECK-NEXT:    [[TMP41:%.*]] = select i1 [[TMP40]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP42:%.*]] = and i32 [[TMP39]], [[TMP41]]
+; CHECK-NEXT:    [[TMP43:%.*]] = icmp eq i32 [[TMP42]], 0
+; CHECK-NEXT:    [[TMP44:%.*]] = add i32 [[TMP35]], 1
+; CHECK-NEXT:    [[TMP45:%.*]] = sub i32 [[TMP35]], 1
+; CHECK-NEXT:    [[TMP46:%.*]] = select i1 [[TMP43]], i32 [[TMP35]], i32 [[TMP44]]
+; CHECK-NEXT:    [[TMP47:%.*]] = select i1 [[TMP40]], i32 [[TMP46]], i32 [[TMP45]]
+; CHECK-NEXT:    [[TMP48:%.*]] = xor i32 [[TMP47]], [[TMP3]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i32 [[TMP48]], [[TMP3]]
+; CHECK-NEXT:    store i32 [[TMP49]], i32 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = sdiv i32 %x, %y
+  store i32 %r, i32 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @srem_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
+; CHECK-LABEL: @srem_i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr i32 [[X:%.*]], 31
+; CHECK-NEXT:    [[TMP2:%.*]] = ashr i32 [[Y:%.*]], 31
+; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[X]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = add i32 [[Y]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP1]]
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i32 [[TMP4]], [[TMP2]]
+; CHECK-NEXT:    [[TMP7:%.*]] = uitofp i32 [[TMP6]] to float
+; CHECK-NEXT:    [[TMP8:%.*]] = fdiv fast float 1.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP8]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT:    [[TMP12:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = mul i64 [[TMP11]], [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
+; CHECK-NEXT:    [[TMP15:%.*]] = lshr i64 [[TMP13]], 32
+; CHECK-NEXT:    [[TMP16:%.*]] = trunc i64 [[TMP15]] to i32
+; CHECK-NEXT:    [[TMP17:%.*]] = sub i32 0, [[TMP14]]
+; CHECK-NEXT:    [[TMP18:%.*]] = icmp eq i32 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP17]], i32 [[TMP14]]
+; CHECK-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
+; CHECK-NEXT:    [[TMP21:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = mul i64 [[TMP20]], [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = trunc i64 [[TMP22]] to i32
+; CHECK-NEXT:    [[TMP24:%.*]] = lshr i64 [[TMP22]], 32
+; CHECK-NEXT:    [[TMP25:%.*]] = trunc i64 [[TMP24]] to i32
+; CHECK-NEXT:    [[TMP26:%.*]] = add i32 [[TMP10]], [[TMP25]]
+; CHECK-NEXT:    [[TMP27:%.*]] = sub i32 [[TMP10]], [[TMP25]]
+; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP18]], i32 [[TMP26]], i32 [[TMP27]]
+; CHECK-NEXT:    [[TMP29:%.*]] = zext i32 [[TMP28]] to i64
+; CHECK-NEXT:    [[TMP30:%.*]] = zext i32 [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = mul i64 [[TMP29]], [[TMP30]]
+; CHECK-NEXT:    [[TMP32:%.*]] = trunc i64 [[TMP31]] to i32
+; CHECK-NEXT:    [[TMP33:%.*]] = lshr i64 [[TMP31]], 32
+; CHECK-NEXT:    [[TMP34:%.*]] = trunc i64 [[TMP33]] to i32
+; CHECK-NEXT:    [[TMP35:%.*]] = mul i32 [[TMP34]], [[TMP6]]
+; CHECK-NEXT:    [[TMP36:%.*]] = sub i32 [[TMP5]], [[TMP35]]
+; CHECK-NEXT:    [[TMP37:%.*]] = icmp uge i32 [[TMP36]], [[TMP6]]
+; CHECK-NEXT:    [[TMP38:%.*]] = select i1 [[TMP37]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP39:%.*]] = icmp uge i32 [[TMP5]], [[TMP35]]
+; CHECK-NEXT:    [[TMP40:%.*]] = select i1 [[TMP39]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP41:%.*]] = and i32 [[TMP38]], [[TMP40]]
+; CHECK-NEXT:    [[TMP42:%.*]] = icmp eq i32 [[TMP41]], 0
+; CHECK-NEXT:    [[TMP43:%.*]] = sub i32 [[TMP36]], [[TMP6]]
+; CHECK-NEXT:    [[TMP44:%.*]] = add i32 [[TMP36]], [[TMP6]]
+; CHECK-NEXT:    [[TMP45:%.*]] = select i1 [[TMP42]], i32 [[TMP36]], i32 [[TMP43]]
+; CHECK-NEXT:    [[TMP46:%.*]] = select i1 [[TMP39]], i32 [[TMP45]], i32 [[TMP44]]
+; CHECK-NEXT:    [[TMP47:%.*]] = xor i32 [[TMP46]], [[TMP1]]
+; CHECK-NEXT:    [[TMP48:%.*]] = sub i32 [[TMP47]], [[TMP1]]
+; CHECK-NEXT:    store i32 [[TMP48]], i32 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = srem i32 %x, %y
+  store i32 %r, i32 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @udiv_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
+; CHECK-LABEL: @udiv_i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP5:%.*]] = fdiv fast float 1.000000e+00, [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP8:%.*]] = fsub fast float -0.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = and i32 [[TMP15]], 65535
+; CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
+; CHECK-NEXT:    store i16 [[TMP17]], i16 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = udiv i16 %x, %y
+  store i16 %r, i16 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @urem_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
+; CHECK-LABEL: @urem_i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP5:%.*]] = fdiv fast float 1.000000e+00, [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP8:%.*]] = fsub fast float -0.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP15]], [[TMP2]]
+; CHECK-NEXT:    [[TMP17:%.*]] = sub i32 [[TMP1]], [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 65535
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16
+; CHECK-NEXT:    store i16 [[TMP19]], i16 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = urem i16 %x, %y
+  store i16 %r, i16 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @sdiv_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
+; CHECK-LABEL: @sdiv_i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = sext i16 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
+; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP8:%.*]] = fdiv fast float 1.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP11:%.*]] = fsub fast float -0.000000e+00, [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
+; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
+; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
+; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16
+; CHECK-NEXT:    [[TMP20:%.*]] = sext i16 [[TMP19]] to i32
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
+; CHECK-NEXT:    store i16 [[TMP21]], i16 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = sdiv i16 %x, %y
+  store i16 %r, i16 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @srem_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
+; CHECK-LABEL: @srem_i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = sext i16 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
+; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP8:%.*]] = fdiv fast float 1.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP11:%.*]] = fsub fast float -0.000000e+00, [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
+; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
+; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
+; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], [[TMP2]]
+; CHECK-NEXT:    [[TMP20:%.*]] = sub i32 [[TMP1]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
+; CHECK-NEXT:    [[TMP22:%.*]] = sext i16 [[TMP21]] to i32
+; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i16
+; CHECK-NEXT:    store i16 [[TMP23]], i16 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = srem i16 %x, %y
+  store i16 %r, i16 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @udiv_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
+; CHECK-LABEL: @udiv_i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i8 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP5:%.*]] = fdiv fast float 1.000000e+00, [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP8:%.*]] = fsub fast float -0.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = and i32 [[TMP15]], 255
+; CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
+; CHECK-NEXT:    store i8 [[TMP17]], i8 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = udiv i8 %x, %y
+  store i8 %r, i8 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @urem_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
+; CHECK-LABEL: @urem_i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i8 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP5:%.*]] = fdiv fast float 1.000000e+00, [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP8:%.*]] = fsub fast float -0.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP15]], [[TMP2]]
+; CHECK-NEXT:    [[TMP17:%.*]] = sub i32 [[TMP1]], [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 255
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i8
+; CHECK-NEXT:    store i8 [[TMP19]], i8 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = urem i8 %x, %y
+  store i8 %r, i8 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @sdiv_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
+; CHECK-LABEL: @sdiv_i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i8 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = sext i8 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
+; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP8:%.*]] = fdiv fast float 1.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP11:%.*]] = fsub fast float -0.000000e+00, [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
+; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
+; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
+; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i8
+; CHECK-NEXT:    [[TMP20:%.*]] = sext i8 [[TMP19]] to i32
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i8
+; CHECK-NEXT:    store i8 [[TMP21]], i8 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = sdiv i8 %x, %y
+  store i8 %r, i8 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @srem_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
+; CHECK-LABEL: @srem_i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i8 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = sext i8 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
+; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP8:%.*]] = fdiv fast float 1.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP11:%.*]] = fsub fast float -0.000000e+00, [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
+; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
+; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
+; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], [[TMP2]]
+; CHECK-NEXT:    [[TMP20:%.*]] = sub i32 [[TMP1]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i8
+; CHECK-NEXT:    [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
+; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i8
+; CHECK-NEXT:    store i8 [[TMP23]], i8 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = srem i8 %x, %y
+  store i8 %r, i8 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @udiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @udiv_v4i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i32> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP4:%.*]] = fdiv fast float 1.000000e+00, [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast float [[TMP4]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP6:%.*]] = fptoui float [[TMP5]] to i32
+; CHECK-NEXT:    [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP7]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP11:%.*]] = lshr i64 [[TMP9]], 32
+; CHECK-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
+; CHECK-NEXT:    [[TMP13:%.*]] = sub i32 0, [[TMP10]]
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp eq i32 [[TMP12]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP13]], i32 [[TMP10]]
+; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
+; CHECK-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = mul i64 [[TMP16]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; CHECK-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; CHECK-NEXT:    [[TMP22:%.*]] = add i32 [[TMP6]], [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = sub i32 [[TMP6]], [[TMP21]]
+; CHECK-NEXT:    [[TMP24:%.*]] = select i1 [[TMP14]], i32 [[TMP22]], i32 [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = zext i32 [[TMP24]] to i64
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = mul i64 [[TMP25]], [[TMP26]]
+; CHECK-NEXT:    [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; CHECK-NEXT:    [[TMP29:%.*]] = lshr i64 [[TMP27]], 32
+; CHECK-NEXT:    [[TMP30:%.*]] = trunc i64 [[TMP29]] to i32
+; CHECK-NEXT:    [[TMP31:%.*]] = mul i32 [[TMP30]], [[TMP2]]
+; CHECK-NEXT:    [[TMP32:%.*]] = sub i32 [[TMP1]], [[TMP31]]
+; CHECK-NEXT:    [[TMP33:%.*]] = icmp uge i32 [[TMP32]], [[TMP2]]
+; CHECK-NEXT:    [[TMP34:%.*]] = select i1 [[TMP33]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP35:%.*]] = icmp uge i32 [[TMP1]], [[TMP31]]
+; CHECK-NEXT:    [[TMP36:%.*]] = select i1 [[TMP35]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP37:%.*]] = and i32 [[TMP34]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = icmp eq i32 [[TMP37]], 0
+; CHECK-NEXT:    [[TMP39:%.*]] = add i32 [[TMP30]], 1
+; CHECK-NEXT:    [[TMP40:%.*]] = sub i32 [[TMP30]], 1
+; CHECK-NEXT:    [[TMP41:%.*]] = select i1 [[TMP38]], i32 [[TMP30]], i32 [[TMP39]]
+; CHECK-NEXT:    [[TMP42:%.*]] = select i1 [[TMP35]], i32 [[TMP41]], i32 [[TMP40]]
+; CHECK-NEXT:    [[TMP43:%.*]] = insertelement <4 x i32> undef, i32 [[TMP42]], i64 0
+; CHECK-NEXT:    [[TMP44:%.*]] = extractelement <4 x i32> [[X]], i64 1
+; CHECK-NEXT:    [[TMP45:%.*]] = extractelement <4 x i32> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP46:%.*]] = uitofp i32 [[TMP45]] to float
+; CHECK-NEXT:    [[TMP47:%.*]] = fdiv fast float 1.000000e+00, [[TMP46]]
+; CHECK-NEXT:    [[TMP48:%.*]] = fmul fast float [[TMP47]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP49:%.*]] = fptoui float [[TMP48]] to i32
+; CHECK-NEXT:    [[TMP50:%.*]] = zext i32 [[TMP49]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = zext i32 [[TMP45]] to i64
+; CHECK-NEXT:    [[TMP52:%.*]] = mul i64 [[TMP50]], [[TMP51]]
+; CHECK-NEXT:    [[TMP53:%.*]] = trunc i64 [[TMP52]] to i32
+; CHECK-NEXT:    [[TMP54:%.*]] = lshr i64 [[TMP52]], 32
+; CHECK-NEXT:    [[TMP55:%.*]] = trunc i64 [[TMP54]] to i32
+; CHECK-NEXT:    [[TMP56:%.*]] = sub i32 0, [[TMP53]]
+; CHECK-NEXT:    [[TMP57:%.*]] = icmp eq i32 [[TMP55]], 0
+; CHECK-NEXT:    [[TMP58:%.*]] = select i1 [[TMP57]], i32 [[TMP56]], i32 [[TMP53]]
+; CHECK-NEXT:    [[TMP59:%.*]] = zext i32 [[TMP58]] to i64
+; CHECK-NEXT:    [[TMP60:%.*]] = zext i32 [[TMP49]] to i64
+; CHECK-NEXT:    [[TMP61:%.*]] = mul i64 [[TMP59]], [[TMP60]]
+; CHECK-NEXT:    [[TMP62:%.*]] = trunc i64 [[TMP61]] to i32
+; CHECK-NEXT:    [[TMP63:%.*]] = lshr i64 [[TMP61]], 32
+; CHECK-NEXT:    [[TMP64:%.*]] = trunc i64 [[TMP63]] to i32
+; CHECK-NEXT:    [[TMP65:%.*]] = add i32 [[TMP49]], [[TMP64]]
+; CHECK-NEXT:    [[TMP66:%.*]] = sub i32 [[TMP49]], [[TMP64]]
+; CHECK-NEXT:    [[TMP67:%.*]] = select i1 [[TMP57]], i32 [[TMP65]], i32 [[TMP66]]
+; CHECK-NEXT:    [[TMP68:%.*]] = zext i32 [[TMP67]] to i64
+; CHECK-NEXT:    [[TMP69:%.*]] = zext i32 [[TMP44]] to i64
+; CHECK-NEXT:    [[TMP70:%.*]] = mul i64 [[TMP68]], [[TMP69]]
+; CHECK-NEXT:    [[TMP71:%.*]] = trunc i64 [[TMP70]] to i32
+; CHECK-NEXT:    [[TMP72:%.*]] = lshr i64 [[TMP70]], 32
+; CHECK-NEXT:    [[TMP73:%.*]] = trunc i64 [[TMP72]] to i32
+; CHECK-NEXT:    [[TMP74:%.*]] = mul i32 [[TMP73]], [[TMP45]]
+; CHECK-NEXT:    [[TMP75:%.*]] = sub i32 [[TMP44]], [[TMP74]]
+; CHECK-NEXT:    [[TMP76:%.*]] = icmp uge i32 [[TMP75]], [[TMP45]]
+; CHECK-NEXT:    [[TMP77:%.*]] = select i1 [[TMP76]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP78:%.*]] = icmp uge i32 [[TMP44]], [[TMP74]]
+; CHECK-NEXT:    [[TMP79:%.*]] = select i1 [[TMP78]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP80:%.*]] = and i32 [[TMP77]], [[TMP79]]
+; CHECK-NEXT:    [[TMP81:%.*]] = icmp eq i32 [[TMP80]], 0
+; CHECK-NEXT:    [[TMP82:%.*]] = add i32 [[TMP73]], 1
+; CHECK-NEXT:    [[TMP83:%.*]] = sub i32 [[TMP73]], 1
+; CHECK-NEXT:    [[TMP84:%.*]] = select i1 [[TMP81]], i32 [[TMP73]], i32 [[TMP82]]
+; CHECK-NEXT:    [[TMP85:%.*]] = select i1 [[TMP78]], i32 [[TMP84]], i32 [[TMP83]]
+; CHECK-NEXT:    [[TMP86:%.*]] = insertelement <4 x i32> [[TMP43]], i32 [[TMP85]], i64 1
+; CHECK-NEXT:    [[TMP87:%.*]] = extractelement <4 x i32> [[X]], i64 2
+; CHECK-NEXT:    [[TMP88:%.*]] = extractelement <4 x i32> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP89:%.*]] = uitofp i32 [[TMP88]] to float
+; CHECK-NEXT:    [[TMP90:%.*]] = fdiv fast float 1.000000e+00, [[TMP89]]
+; CHECK-NEXT:    [[TMP91:%.*]] = fmul fast float [[TMP90]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP92:%.*]] = fptoui float [[TMP91]] to i32
+; CHECK-NEXT:    [[TMP93:%.*]] = zext i32 [[TMP92]] to i64
+; CHECK-NEXT:    [[TMP94:%.*]] = zext i32 [[TMP88]] to i64
+; CHECK-NEXT:    [[TMP95:%.*]] = mul i64 [[TMP93]], [[TMP94]]
+; CHECK-NEXT:    [[TMP96:%.*]] = trunc i64 [[TMP95]] to i32
+; CHECK-NEXT:    [[TMP97:%.*]] = lshr i64 [[TMP95]], 32
+; CHECK-NEXT:    [[TMP98:%.*]] = trunc i64 [[TMP97]] to i32
+; CHECK-NEXT:    [[TMP99:%.*]] = sub i32 0, [[TMP96]]
+; CHECK-NEXT:    [[TMP100:%.*]] = icmp eq i32 [[TMP98]], 0
+; CHECK-NEXT:    [[TMP101:%.*]] = select i1 [[TMP100]], i32 [[TMP99]], i32 [[TMP96]]
+; CHECK-NEXT:    [[TMP102:%.*]] = zext i32 [[TMP101]] to i64
+; CHECK-NEXT:    [[TMP103:%.*]] = zext i32 [[TMP92]] to i64
+; CHECK-NEXT:    [[TMP104:%.*]] = mul i64 [[TMP102]], [[TMP103]]
+; CHECK-NEXT:    [[TMP105:%.*]] = trunc i64 [[TMP104]] to i32
+; CHECK-NEXT:    [[TMP106:%.*]] = lshr i64 [[TMP104]], 32
+; CHECK-NEXT:    [[TMP107:%.*]] = trunc i64 [[TMP106]] to i32
+; CHECK-NEXT:    [[TMP108:%.*]] = add i32 [[TMP92]], [[TMP107]]
+; CHECK-NEXT:    [[TMP109:%.*]] = sub i32 [[TMP92]], [[TMP107]]
+; CHECK-NEXT:    [[TMP110:%.*]] = select i1 [[TMP100]], i32 [[TMP108]], i32 [[TMP109]]
+; CHECK-NEXT:    [[TMP111:%.*]] = zext i32 [[TMP110]] to i64
+; CHECK-NEXT:    [[TMP112:%.*]] = zext i32 [[TMP87]] to i64
+; CHECK-NEXT:    [[TMP113:%.*]] = mul i64 [[TMP111]], [[TMP112]]
+; CHECK-NEXT:    [[TMP114:%.*]] = trunc i64 [[TMP113]] to i32
+; CHECK-NEXT:    [[TMP115:%.*]] = lshr i64 [[TMP113]], 32
+; CHECK-NEXT:    [[TMP116:%.*]] = trunc i64 [[TMP115]] to i32
+; CHECK-NEXT:    [[TMP117:%.*]] = mul i32 [[TMP116]], [[TMP88]]
+; CHECK-NEXT:    [[TMP118:%.*]] = sub i32 [[TMP87]], [[TMP117]]
+; CHECK-NEXT:    [[TMP119:%.*]] = icmp uge i32 [[TMP118]], [[TMP88]]
+; CHECK-NEXT:    [[TMP120:%.*]] = select i1 [[TMP119]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP121:%.*]] = icmp uge i32 [[TMP87]], [[TMP117]]
+; CHECK-NEXT:    [[TMP122:%.*]] = select i1 [[TMP121]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP123:%.*]] = and i32 [[TMP120]], [[TMP122]]
+; CHECK-NEXT:    [[TMP124:%.*]] = icmp eq i32 [[TMP123]], 0
+; CHECK-NEXT:    [[TMP125:%.*]] = add i32 [[TMP116]], 1
+; CHECK-NEXT:    [[TMP126:%.*]] = sub i32 [[TMP116]], 1
+; CHECK-NEXT:    [[TMP127:%.*]] = select i1 [[TMP124]], i32 [[TMP116]], i32 [[TMP125]]
+; CHECK-NEXT:    [[TMP128:%.*]] = select i1 [[TMP121]], i32 [[TMP127]], i32 [[TMP126]]
+; CHECK-NEXT:    [[TMP129:%.*]] = insertelement <4 x i32> [[TMP86]], i32 [[TMP128]], i64 2
+; CHECK-NEXT:    [[TMP130:%.*]] = extractelement <4 x i32> [[X]], i64 3
+; CHECK-NEXT:    [[TMP131:%.*]] = extractelement <4 x i32> [[Y]], i64 3
+; CHECK-NEXT:    [[TMP132:%.*]] = uitofp i32 [[TMP131]] to float
+; CHECK-NEXT:    [[TMP133:%.*]] = fdiv fast float 1.000000e+00, [[TMP132]]
+; CHECK-NEXT:    [[TMP134:%.*]] = fmul fast float [[TMP133]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP135:%.*]] = fptoui float [[TMP134]] to i32
+; CHECK-NEXT:    [[TMP136:%.*]] = zext i32 [[TMP135]] to i64
+; CHECK-NEXT:    [[TMP137:%.*]] = zext i32 [[TMP131]] to i64
+; CHECK-NEXT:    [[TMP138:%.*]] = mul i64 [[TMP136]], [[TMP137]]
+; CHECK-NEXT:    [[TMP139:%.*]] = trunc i64 [[TMP138]] to i32
+; CHECK-NEXT:    [[TMP140:%.*]] = lshr i64 [[TMP138]], 32
+; CHECK-NEXT:    [[TMP141:%.*]] = trunc i64 [[TMP140]] to i32
+; CHECK-NEXT:    [[TMP142:%.*]] = sub i32 0, [[TMP139]]
+; CHECK-NEXT:    [[TMP143:%.*]] = icmp eq i32 [[TMP141]], 0
+; CHECK-NEXT:    [[TMP144:%.*]] = select i1 [[TMP143]], i32 [[TMP142]], i32 [[TMP139]]
+; CHECK-NEXT:    [[TMP145:%.*]] = zext i32 [[TMP144]] to i64
+; CHECK-NEXT:    [[TMP146:%.*]] = zext i32 [[TMP135]] to i64
+; CHECK-NEXT:    [[TMP147:%.*]] = mul i64 [[TMP145]], [[TMP146]]
+; CHECK-NEXT:    [[TMP148:%.*]] = trunc i64 [[TMP147]] to i32
+; CHECK-NEXT:    [[TMP149:%.*]] = lshr i64 [[TMP147]], 32
+; CHECK-NEXT:    [[TMP150:%.*]] = trunc i64 [[TMP149]] to i32
+; CHECK-NEXT:    [[TMP151:%.*]] = add i32 [[TMP135]], [[TMP150]]
+; CHECK-NEXT:    [[TMP152:%.*]] = sub i32 [[TMP135]], [[TMP150]]
+; CHECK-NEXT:    [[TMP153:%.*]] = select i1 [[TMP143]], i32 [[TMP151]], i32 [[TMP152]]
+; CHECK-NEXT:    [[TMP154:%.*]] = zext i32 [[TMP153]] to i64
+; CHECK-NEXT:    [[TMP155:%.*]] = zext i32 [[TMP130]] to i64
+; CHECK-NEXT:    [[TMP156:%.*]] = mul i64 [[TMP154]], [[TMP155]]
+; CHECK-NEXT:    [[TMP157:%.*]] = trunc i64 [[TMP156]] to i32
+; CHECK-NEXT:    [[TMP158:%.*]] = lshr i64 [[TMP156]], 32
+; CHECK-NEXT:    [[TMP159:%.*]] = trunc i64 [[TMP158]] to i32
+; CHECK-NEXT:    [[TMP160:%.*]] = mul i32 [[TMP159]], [[TMP131]]
+; CHECK-NEXT:    [[TMP161:%.*]] = sub i32 [[TMP130]], [[TMP160]]
+; CHECK-NEXT:    [[TMP162:%.*]] = icmp uge i32 [[TMP161]], [[TMP131]]
+; CHECK-NEXT:    [[TMP163:%.*]] = select i1 [[TMP162]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP164:%.*]] = icmp uge i32 [[TMP130]], [[TMP160]]
+; CHECK-NEXT:    [[TMP165:%.*]] = select i1 [[TMP164]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP166:%.*]] = and i32 [[TMP163]], [[TMP165]]
+; CHECK-NEXT:    [[TMP167:%.*]] = icmp eq i32 [[TMP166]], 0
+; CHECK-NEXT:    [[TMP168:%.*]] = add i32 [[TMP159]], 1
+; CHECK-NEXT:    [[TMP169:%.*]] = sub i32 [[TMP159]], 1
+; CHECK-NEXT:    [[TMP170:%.*]] = select i1 [[TMP167]], i32 [[TMP159]], i32 [[TMP168]]
+; CHECK-NEXT:    [[TMP171:%.*]] = select i1 [[TMP164]], i32 [[TMP170]], i32 [[TMP169]]
+; CHECK-NEXT:    [[TMP172:%.*]] = insertelement <4 x i32> [[TMP129]], i32 [[TMP171]], i64 3
+; CHECK-NEXT:    store <4 x i32> [[TMP172]], <4 x i32> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = udiv <4 x i32> %x, %y
+  store <4 x i32> %r, <4 x i32> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @urem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @urem_v4i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i32> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP4:%.*]] = fdiv fast float 1.000000e+00, [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast float [[TMP4]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP6:%.*]] = fptoui float [[TMP5]] to i32
+; CHECK-NEXT:    [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP7]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP11:%.*]] = lshr i64 [[TMP9]], 32
+; CHECK-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
+; CHECK-NEXT:    [[TMP13:%.*]] = sub i32 0, [[TMP10]]
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp eq i32 [[TMP12]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP13]], i32 [[TMP10]]
+; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
+; CHECK-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = mul i64 [[TMP16]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; CHECK-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; CHECK-NEXT:    [[TMP22:%.*]] = add i32 [[TMP6]], [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = sub i32 [[TMP6]], [[TMP21]]
+; CHECK-NEXT:    [[TMP24:%.*]] = select i1 [[TMP14]], i32 [[TMP22]], i32 [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = zext i32 [[TMP24]] to i64
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = mul i64 [[TMP25]], [[TMP26]]
+; CHECK-NEXT:    [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; CHECK-NEXT:    [[TMP29:%.*]] = lshr i64 [[TMP27]], 32
+; CHECK-NEXT:    [[TMP30:%.*]] = trunc i64 [[TMP29]] to i32
+; CHECK-NEXT:    [[TMP31:%.*]] = mul i32 [[TMP30]], [[TMP2]]
+; CHECK-NEXT:    [[TMP32:%.*]] = sub i32 [[TMP1]], [[TMP31]]
+; CHECK-NEXT:    [[TMP33:%.*]] = icmp uge i32 [[TMP32]], [[TMP2]]
+; CHECK-NEXT:    [[TMP34:%.*]] = select i1 [[TMP33]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP35:%.*]] = icmp uge i32 [[TMP1]], [[TMP31]]
+; CHECK-NEXT:    [[TMP36:%.*]] = select i1 [[TMP35]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP37:%.*]] = and i32 [[TMP34]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = icmp eq i32 [[TMP37]], 0
+; CHECK-NEXT:    [[TMP39:%.*]] = sub i32 [[TMP32]], [[TMP2]]
+; CHECK-NEXT:    [[TMP40:%.*]] = add i32 [[TMP32]], [[TMP2]]
+; CHECK-NEXT:    [[TMP41:%.*]] = select i1 [[TMP38]], i32 [[TMP32]], i32 [[TMP39]]
+; CHECK-NEXT:    [[TMP42:%.*]] = select i1 [[TMP35]], i32 [[TMP41]], i32 [[TMP40]]
+; CHECK-NEXT:    [[TMP43:%.*]] = insertelement <4 x i32> undef, i32 [[TMP42]], i64 0
+; CHECK-NEXT:    [[TMP44:%.*]] = extractelement <4 x i32> [[X]], i64 1
+; CHECK-NEXT:    [[TMP45:%.*]] = extractelement <4 x i32> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP46:%.*]] = uitofp i32 [[TMP45]] to float
+; CHECK-NEXT:    [[TMP47:%.*]] = fdiv fast float 1.000000e+00, [[TMP46]]
+; CHECK-NEXT:    [[TMP48:%.*]] = fmul fast float [[TMP47]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP49:%.*]] = fptoui float [[TMP48]] to i32
+; CHECK-NEXT:    [[TMP50:%.*]] = zext i32 [[TMP49]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = zext i32 [[TMP45]] to i64
+; CHECK-NEXT:    [[TMP52:%.*]] = mul i64 [[TMP50]], [[TMP51]]
+; CHECK-NEXT:    [[TMP53:%.*]] = trunc i64 [[TMP52]] to i32
+; CHECK-NEXT:    [[TMP54:%.*]] = lshr i64 [[TMP52]], 32
+; CHECK-NEXT:    [[TMP55:%.*]] = trunc i64 [[TMP54]] to i32
+; CHECK-NEXT:    [[TMP56:%.*]] = sub i32 0, [[TMP53]]
+; CHECK-NEXT:    [[TMP57:%.*]] = icmp eq i32 [[TMP55]], 0
+; CHECK-NEXT:    [[TMP58:%.*]] = select i1 [[TMP57]], i32 [[TMP56]], i32 [[TMP53]]
+; CHECK-NEXT:    [[TMP59:%.*]] = zext i32 [[TMP58]] to i64
+; CHECK-NEXT:    [[TMP60:%.*]] = zext i32 [[TMP49]] to i64
+; CHECK-NEXT:    [[TMP61:%.*]] = mul i64 [[TMP59]], [[TMP60]]
+; CHECK-NEXT:    [[TMP62:%.*]] = trunc i64 [[TMP61]] to i32
+; CHECK-NEXT:    [[TMP63:%.*]] = lshr i64 [[TMP61]], 32
+; CHECK-NEXT:    [[TMP64:%.*]] = trunc i64 [[TMP63]] to i32
+; CHECK-NEXT:    [[TMP65:%.*]] = add i32 [[TMP49]], [[TMP64]]
+; CHECK-NEXT:    [[TMP66:%.*]] = sub i32 [[TMP49]], [[TMP64]]
+; CHECK-NEXT:    [[TMP67:%.*]] = select i1 [[TMP57]], i32 [[TMP65]], i32 [[TMP66]]
+; CHECK-NEXT:    [[TMP68:%.*]] = zext i32 [[TMP67]] to i64
+; CHECK-NEXT:    [[TMP69:%.*]] = zext i32 [[TMP44]] to i64
+; CHECK-NEXT:    [[TMP70:%.*]] = mul i64 [[TMP68]], [[TMP69]]
+; CHECK-NEXT:    [[TMP71:%.*]] = trunc i64 [[TMP70]] to i32
+; CHECK-NEXT:    [[TMP72:%.*]] = lshr i64 [[TMP70]], 32
+; CHECK-NEXT:    [[TMP73:%.*]] = trunc i64 [[TMP72]] to i32
+; CHECK-NEXT:    [[TMP74:%.*]] = mul i32 [[TMP73]], [[TMP45]]
+; CHECK-NEXT:    [[TMP75:%.*]] = sub i32 [[TMP44]], [[TMP74]]
+; CHECK-NEXT:    [[TMP76:%.*]] = icmp uge i32 [[TMP75]], [[TMP45]]
+; CHECK-NEXT:    [[TMP77:%.*]] = select i1 [[TMP76]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP78:%.*]] = icmp uge i32 [[TMP44]], [[TMP74]]
+; CHECK-NEXT:    [[TMP79:%.*]] = select i1 [[TMP78]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP80:%.*]] = and i32 [[TMP77]], [[TMP79]]
+; CHECK-NEXT:    [[TMP81:%.*]] = icmp eq i32 [[TMP80]], 0
+; CHECK-NEXT:    [[TMP82:%.*]] = sub i32 [[TMP75]], [[TMP45]]
+; CHECK-NEXT:    [[TMP83:%.*]] = add i32 [[TMP75]], [[TMP45]]
+; CHECK-NEXT:    [[TMP84:%.*]] = select i1 [[TMP81]], i32 [[TMP75]], i32 [[TMP82]]
+; CHECK-NEXT:    [[TMP85:%.*]] = select i1 [[TMP78]], i32 [[TMP84]], i32 [[TMP83]]
+; CHECK-NEXT:    [[TMP86:%.*]] = insertelement <4 x i32> [[TMP43]], i32 [[TMP85]], i64 1
+; CHECK-NEXT:    [[TMP87:%.*]] = extractelement <4 x i32> [[X]], i64 2
+; CHECK-NEXT:    [[TMP88:%.*]] = extractelement <4 x i32> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP89:%.*]] = uitofp i32 [[TMP88]] to float
+; CHECK-NEXT:    [[TMP90:%.*]] = fdiv fast float 1.000000e+00, [[TMP89]]
+; CHECK-NEXT:    [[TMP91:%.*]] = fmul fast float [[TMP90]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP92:%.*]] = fptoui float [[TMP91]] to i32
+; CHECK-NEXT:    [[TMP93:%.*]] = zext i32 [[TMP92]] to i64
+; CHECK-NEXT:    [[TMP94:%.*]] = zext i32 [[TMP88]] to i64
+; CHECK-NEXT:    [[TMP95:%.*]] = mul i64 [[TMP93]], [[TMP94]]
+; CHECK-NEXT:    [[TMP96:%.*]] = trunc i64 [[TMP95]] to i32
+; CHECK-NEXT:    [[TMP97:%.*]] = lshr i64 [[TMP95]], 32
+; CHECK-NEXT:    [[TMP98:%.*]] = trunc i64 [[TMP97]] to i32
+; CHECK-NEXT:    [[TMP99:%.*]] = sub i32 0, [[TMP96]]
+; CHECK-NEXT:    [[TMP100:%.*]] = icmp eq i32 [[TMP98]], 0
+; CHECK-NEXT:    [[TMP101:%.*]] = select i1 [[TMP100]], i32 [[TMP99]], i32 [[TMP96]]
+; CHECK-NEXT:    [[TMP102:%.*]] = zext i32 [[TMP101]] to i64
+; CHECK-NEXT:    [[TMP103:%.*]] = zext i32 [[TMP92]] to i64
+; CHECK-NEXT:    [[TMP104:%.*]] = mul i64 [[TMP102]], [[TMP103]]
+; CHECK-NEXT:    [[TMP105:%.*]] = trunc i64 [[TMP104]] to i32
+; CHECK-NEXT:    [[TMP106:%.*]] = lshr i64 [[TMP104]], 32
+; CHECK-NEXT:    [[TMP107:%.*]] = trunc i64 [[TMP106]] to i32
+; CHECK-NEXT:    [[TMP108:%.*]] = add i32 [[TMP92]], [[TMP107]]
+; CHECK-NEXT:    [[TMP109:%.*]] = sub i32 [[TMP92]], [[TMP107]]
+; CHECK-NEXT:    [[TMP110:%.*]] = select i1 [[TMP100]], i32 [[TMP108]], i32 [[TMP109]]
+; CHECK-NEXT:    [[TMP111:%.*]] = zext i32 [[TMP110]] to i64
+; CHECK-NEXT:    [[TMP112:%.*]] = zext i32 [[TMP87]] to i64
+; CHECK-NEXT:    [[TMP113:%.*]] = mul i64 [[TMP111]], [[TMP112]]
+; CHECK-NEXT:    [[TMP114:%.*]] = trunc i64 [[TMP113]] to i32
+; CHECK-NEXT:    [[TMP115:%.*]] = lshr i64 [[TMP113]], 32
+; CHECK-NEXT:    [[TMP116:%.*]] = trunc i64 [[TMP115]] to i32
+; CHECK-NEXT:    [[TMP117:%.*]] = mul i32 [[TMP116]], [[TMP88]]
+; CHECK-NEXT:    [[TMP118:%.*]] = sub i32 [[TMP87]], [[TMP117]]
+; CHECK-NEXT:    [[TMP119:%.*]] = icmp uge i32 [[TMP118]], [[TMP88]]
+; CHECK-NEXT:    [[TMP120:%.*]] = select i1 [[TMP119]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP121:%.*]] = icmp uge i32 [[TMP87]], [[TMP117]]
+; CHECK-NEXT:    [[TMP122:%.*]] = select i1 [[TMP121]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP123:%.*]] = and i32 [[TMP120]], [[TMP122]]
+; CHECK-NEXT:    [[TMP124:%.*]] = icmp eq i32 [[TMP123]], 0
+; CHECK-NEXT:    [[TMP125:%.*]] = sub i32 [[TMP118]], [[TMP88]]
+; CHECK-NEXT:    [[TMP126:%.*]] = add i32 [[TMP118]], [[TMP88]]
+; CHECK-NEXT:    [[TMP127:%.*]] = select i1 [[TMP124]], i32 [[TMP118]], i32 [[TMP125]]
+; CHECK-NEXT:    [[TMP128:%.*]] = select i1 [[TMP121]], i32 [[TMP127]], i32 [[TMP126]]
+; CHECK-NEXT:    [[TMP129:%.*]] = insertelement <4 x i32> [[TMP86]], i32 [[TMP128]], i64 2
+; CHECK-NEXT:    [[TMP130:%.*]] = extractelement <4 x i32> [[X]], i64 3
+; CHECK-NEXT:    [[TMP131:%.*]] = extractelement <4 x i32> [[Y]], i64 3
+; CHECK-NEXT:    [[TMP132:%.*]] = uitofp i32 [[TMP131]] to float
+; CHECK-NEXT:    [[TMP133:%.*]] = fdiv fast float 1.000000e+00, [[TMP132]]
+; CHECK-NEXT:    [[TMP134:%.*]] = fmul fast float [[TMP133]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP135:%.*]] = fptoui float [[TMP134]] to i32
+; CHECK-NEXT:    [[TMP136:%.*]] = zext i32 [[TMP135]] to i64
+; CHECK-NEXT:    [[TMP137:%.*]] = zext i32 [[TMP131]] to i64
+; CHECK-NEXT:    [[TMP138:%.*]] = mul i64 [[TMP136]], [[TMP137]]
+; CHECK-NEXT:    [[TMP139:%.*]] = trunc i64 [[TMP138]] to i32
+; CHECK-NEXT:    [[TMP140:%.*]] = lshr i64 [[TMP138]], 32
+; CHECK-NEXT:    [[TMP141:%.*]] = trunc i64 [[TMP140]] to i32
+; CHECK-NEXT:    [[TMP142:%.*]] = sub i32 0, [[TMP139]]
+; CHECK-NEXT:    [[TMP143:%.*]] = icmp eq i32 [[TMP141]], 0
+; CHECK-NEXT:    [[TMP144:%.*]] = select i1 [[TMP143]], i32 [[TMP142]], i32 [[TMP139]]
+; CHECK-NEXT:    [[TMP145:%.*]] = zext i32 [[TMP144]] to i64
+; CHECK-NEXT:    [[TMP146:%.*]] = zext i32 [[TMP135]] to i64
+; CHECK-NEXT:    [[TMP147:%.*]] = mul i64 [[TMP145]], [[TMP146]]
+; CHECK-NEXT:    [[TMP148:%.*]] = trunc i64 [[TMP147]] to i32
+; CHECK-NEXT:    [[TMP149:%.*]] = lshr i64 [[TMP147]], 32
+; CHECK-NEXT:    [[TMP150:%.*]] = trunc i64 [[TMP149]] to i32
+; CHECK-NEXT:    [[TMP151:%.*]] = add i32 [[TMP135]], [[TMP150]]
+; CHECK-NEXT:    [[TMP152:%.*]] = sub i32 [[TMP135]], [[TMP150]]
+; CHECK-NEXT:    [[TMP153:%.*]] = select i1 [[TMP143]], i32 [[TMP151]], i32 [[TMP152]]
+; CHECK-NEXT:    [[TMP154:%.*]] = zext i32 [[TMP153]] to i64
+; CHECK-NEXT:    [[TMP155:%.*]] = zext i32 [[TMP130]] to i64
+; CHECK-NEXT:    [[TMP156:%.*]] = mul i64 [[TMP154]], [[TMP155]]
+; CHECK-NEXT:    [[TMP157:%.*]] = trunc i64 [[TMP156]] to i32
+; CHECK-NEXT:    [[TMP158:%.*]] = lshr i64 [[TMP156]], 32
+; CHECK-NEXT:    [[TMP159:%.*]] = trunc i64 [[TMP158]] to i32
+; CHECK-NEXT:    [[TMP160:%.*]] = mul i32 [[TMP159]], [[TMP131]]
+; CHECK-NEXT:    [[TMP161:%.*]] = sub i32 [[TMP130]], [[TMP160]]
+; CHECK-NEXT:    [[TMP162:%.*]] = icmp uge i32 [[TMP161]], [[TMP131]]
+; CHECK-NEXT:    [[TMP163:%.*]] = select i1 [[TMP162]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP164:%.*]] = icmp uge i32 [[TMP130]], [[TMP160]]
+; CHECK-NEXT:    [[TMP165:%.*]] = select i1 [[TMP164]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP166:%.*]] = and i32 [[TMP163]], [[TMP165]]
+; CHECK-NEXT:    [[TMP167:%.*]] = icmp eq i32 [[TMP166]], 0
+; CHECK-NEXT:    [[TMP168:%.*]] = sub i32 [[TMP161]], [[TMP131]]
+; CHECK-NEXT:    [[TMP169:%.*]] = add i32 [[TMP161]], [[TMP131]]
+; CHECK-NEXT:    [[TMP170:%.*]] = select i1 [[TMP167]], i32 [[TMP161]], i32 [[TMP168]]
+; CHECK-NEXT:    [[TMP171:%.*]] = select i1 [[TMP164]], i32 [[TMP170]], i32 [[TMP169]]
+; CHECK-NEXT:    [[TMP172:%.*]] = insertelement <4 x i32> [[TMP129]], i32 [[TMP171]], i64 3
+; CHECK-NEXT:    store <4 x i32> [[TMP172]], <4 x i32> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = urem <4 x i32> %x, %y
+  store <4 x i32> %r, <4 x i32> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @sdiv_v4i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i32> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = ashr i32 [[TMP1]], 31
+; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP2]], 31
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP7:%.*]] = add i32 [[TMP2]], [[TMP4]]
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i32 [[TMP6]], [[TMP3]]
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i32 [[TMP7]], [[TMP4]]
+; CHECK-NEXT:    [[TMP10:%.*]] = uitofp i32 [[TMP9]] to float
+; CHECK-NEXT:    [[TMP11:%.*]] = fdiv fast float 1.000000e+00, [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = fmul fast float [[TMP11]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP13:%.*]] = fptoui float [[TMP12]] to i32
+; CHECK-NEXT:    [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = mul i64 [[TMP14]], [[TMP15]]
+; CHECK-NEXT:    [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; CHECK-NEXT:    [[TMP18:%.*]] = lshr i64 [[TMP16]], 32
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; CHECK-NEXT:    [[TMP20:%.*]] = sub i32 0, [[TMP17]]
+; CHECK-NEXT:    [[TMP21:%.*]] = icmp eq i32 [[TMP19]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP20]], i32 [[TMP17]]
+; CHECK-NEXT:    [[TMP23:%.*]] = zext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP25:%.*]] = mul i64 [[TMP23]], [[TMP24]]
+; CHECK-NEXT:    [[TMP26:%.*]] = trunc i64 [[TMP25]] to i32
+; CHECK-NEXT:    [[TMP27:%.*]] = lshr i64 [[TMP25]], 32
+; CHECK-NEXT:    [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
+; CHECK-NEXT:    [[TMP29:%.*]] = add i32 [[TMP13]], [[TMP28]]
+; CHECK-NEXT:    [[TMP30:%.*]] = sub i32 [[TMP13]], [[TMP28]]
+; CHECK-NEXT:    [[TMP31:%.*]] = select i1 [[TMP21]], i32 [[TMP29]], i32 [[TMP30]]
+; CHECK-NEXT:    [[TMP32:%.*]] = zext i32 [[TMP31]] to i64
+; CHECK-NEXT:    [[TMP33:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT:    [[TMP34:%.*]] = mul i64 [[TMP32]], [[TMP33]]
+; CHECK-NEXT:    [[TMP35:%.*]] = trunc i64 [[TMP34]] to i32
+; CHECK-NEXT:    [[TMP36:%.*]] = lshr i64 [[TMP34]], 32
+; CHECK-NEXT:    [[TMP37:%.*]] = trunc i64 [[TMP36]] to i32
+; CHECK-NEXT:    [[TMP38:%.*]] = mul i32 [[TMP37]], [[TMP9]]
+; CHECK-NEXT:    [[TMP39:%.*]] = sub i32 [[TMP8]], [[TMP38]]
+; CHECK-NEXT:    [[TMP40:%.*]] = icmp uge i32 [[TMP39]], [[TMP9]]
+; CHECK-NEXT:    [[TMP41:%.*]] = select i1 [[TMP40]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP42:%.*]] = icmp uge i32 [[TMP8]], [[TMP38]]
+; CHECK-NEXT:    [[TMP43:%.*]] = select i1 [[TMP42]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP44:%.*]] = and i32 [[TMP41]], [[TMP43]]
+; CHECK-NEXT:    [[TMP45:%.*]] = icmp eq i32 [[TMP44]], 0
+; CHECK-NEXT:    [[TMP46:%.*]] = add i32 [[TMP37]], 1
+; CHECK-NEXT:    [[TMP47:%.*]] = sub i32 [[TMP37]], 1
+; CHECK-NEXT:    [[TMP48:%.*]] = select i1 [[TMP45]], i32 [[TMP37]], i32 [[TMP46]]
+; CHECK-NEXT:    [[TMP49:%.*]] = select i1 [[TMP42]], i32 [[TMP48]], i32 [[TMP47]]
+; CHECK-NEXT:    [[TMP50:%.*]] = xor i32 [[TMP49]], [[TMP5]]
+; CHECK-NEXT:    [[TMP51:%.*]] = sub i32 [[TMP50]], [[TMP5]]
+; CHECK-NEXT:    [[TMP52:%.*]] = insertelement <4 x i32> undef, i32 [[TMP51]], i64 0
+; CHECK-NEXT:    [[TMP53:%.*]] = extractelement <4 x i32> [[X]], i64 1
+; CHECK-NEXT:    [[TMP54:%.*]] = extractelement <4 x i32> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP55:%.*]] = ashr i32 [[TMP53]], 31
+; CHECK-NEXT:    [[TMP56:%.*]] = ashr i32 [[TMP54]], 31
+; CHECK-NEXT:    [[TMP57:%.*]] = xor i32 [[TMP55]], [[TMP56]]
+; CHECK-NEXT:    [[TMP58:%.*]] = add i32 [[TMP53]], [[TMP55]]
+; CHECK-NEXT:    [[TMP59:%.*]] = add i32 [[TMP54]], [[TMP56]]
+; CHECK-NEXT:    [[TMP60:%.*]] = xor i32 [[TMP58]], [[TMP55]]
+; CHECK-NEXT:    [[TMP61:%.*]] = xor i32 [[TMP59]], [[TMP56]]
+; CHECK-NEXT:    [[TMP62:%.*]] = uitofp i32 [[TMP61]] to float
+; CHECK-NEXT:    [[TMP63:%.*]] = fdiv fast float 1.000000e+00, [[TMP62]]
+; CHECK-NEXT:    [[TMP64:%.*]] = fmul fast float [[TMP63]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP65:%.*]] = fptoui float [[TMP64]] to i32
+; CHECK-NEXT:    [[TMP66:%.*]] = zext i32 [[TMP65]] to i64
+; CHECK-NEXT:    [[TMP67:%.*]] = zext i32 [[TMP61]] to i64
+; CHECK-NEXT:    [[TMP68:%.*]] = mul i64 [[TMP66]], [[TMP67]]
+; CHECK-NEXT:    [[TMP69:%.*]] = trunc i64 [[TMP68]] to i32
+; CHECK-NEXT:    [[TMP70:%.*]] = lshr i64 [[TMP68]], 32
+; CHECK-NEXT:    [[TMP71:%.*]] = trunc i64 [[TMP70]] to i32
+; CHECK-NEXT:    [[TMP72:%.*]] = sub i32 0, [[TMP69]]
+; CHECK-NEXT:    [[TMP73:%.*]] = icmp eq i32 [[TMP71]], 0
+; CHECK-NEXT:    [[TMP74:%.*]] = select i1 [[TMP73]], i32 [[TMP72]], i32 [[TMP69]]
+; CHECK-NEXT:    [[TMP75:%.*]] = zext i32 [[TMP74]] to i64
+; CHECK-NEXT:    [[TMP76:%.*]] = zext i32 [[TMP65]] to i64
+; CHECK-NEXT:    [[TMP77:%.*]] = mul i64 [[TMP75]], [[TMP76]]
+; CHECK-NEXT:    [[TMP78:%.*]] = trunc i64 [[TMP77]] to i32
+; CHECK-NEXT:    [[TMP79:%.*]] = lshr i64 [[TMP77]], 32
+; CHECK-NEXT:    [[TMP80:%.*]] = trunc i64 [[TMP79]] to i32
+; CHECK-NEXT:    [[TMP81:%.*]] = add i32 [[TMP65]], [[TMP80]]
+; CHECK-NEXT:    [[TMP82:%.*]] = sub i32 [[TMP65]], [[TMP80]]
+; CHECK-NEXT:    [[TMP83:%.*]] = select i1 [[TMP73]], i32 [[TMP81]], i32 [[TMP82]]
+; CHECK-NEXT:    [[TMP84:%.*]] = zext i32 [[TMP83]] to i64
+; CHECK-NEXT:    [[TMP85:%.*]] = zext i32 [[TMP60]] to i64
+; CHECK-NEXT:    [[TMP86:%.*]] = mul i64 [[TMP84]], [[TMP85]]
+; CHECK-NEXT:    [[TMP87:%.*]] = trunc i64 [[TMP86]] to i32
+; CHECK-NEXT:    [[TMP88:%.*]] = lshr i64 [[TMP86]], 32
+; CHECK-NEXT:    [[TMP89:%.*]] = trunc i64 [[TMP88]] to i32
+; CHECK-NEXT:    [[TMP90:%.*]] = mul i32 [[TMP89]], [[TMP61]]
+; CHECK-NEXT:    [[TMP91:%.*]] = sub i32 [[TMP60]], [[TMP90]]
+; CHECK-NEXT:    [[TMP92:%.*]] = icmp uge i32 [[TMP91]], [[TMP61]]
+; CHECK-NEXT:    [[TMP93:%.*]] = select i1 [[TMP92]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP94:%.*]] = icmp uge i32 [[TMP60]], [[TMP90]]
+; CHECK-NEXT:    [[TMP95:%.*]] = select i1 [[TMP94]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP96:%.*]] = and i32 [[TMP93]], [[TMP95]]
+; CHECK-NEXT:    [[TMP97:%.*]] = icmp eq i32 [[TMP96]], 0
+; CHECK-NEXT:    [[TMP98:%.*]] = add i32 [[TMP89]], 1
+; CHECK-NEXT:    [[TMP99:%.*]] = sub i32 [[TMP89]], 1
+; CHECK-NEXT:    [[TMP100:%.*]] = select i1 [[TMP97]], i32 [[TMP89]], i32 [[TMP98]]
+; CHECK-NEXT:    [[TMP101:%.*]] = select i1 [[TMP94]], i32 [[TMP100]], i32 [[TMP99]]
+; CHECK-NEXT:    [[TMP102:%.*]] = xor i32 [[TMP101]], [[TMP57]]
+; CHECK-NEXT:    [[TMP103:%.*]] = sub i32 [[TMP102]], [[TMP57]]
+; CHECK-NEXT:    [[TMP104:%.*]] = insertelement <4 x i32> [[TMP52]], i32 [[TMP103]], i64 1
+; CHECK-NEXT:    [[TMP105:%.*]] = extractelement <4 x i32> [[X]], i64 2
+; CHECK-NEXT:    [[TMP106:%.*]] = extractelement <4 x i32> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP107:%.*]] = ashr i32 [[TMP105]], 31
+; CHECK-NEXT:    [[TMP108:%.*]] = ashr i32 [[TMP106]], 31
+; CHECK-NEXT:    [[TMP109:%.*]] = xor i32 [[TMP107]], [[TMP108]]
+; CHECK-NEXT:    [[TMP110:%.*]] = add i32 [[TMP105]], [[TMP107]]
+; CHECK-NEXT:    [[TMP111:%.*]] = add i32 [[TMP106]], [[TMP108]]
+; CHECK-NEXT:    [[TMP112:%.*]] = xor i32 [[TMP110]], [[TMP107]]
+; CHECK-NEXT:    [[TMP113:%.*]] = xor i32 [[TMP111]], [[TMP108]]
+; CHECK-NEXT:    [[TMP114:%.*]] = uitofp i32 [[TMP113]] to float
+; CHECK-NEXT:    [[TMP115:%.*]] = fdiv fast float 1.000000e+00, [[TMP114]]
+; CHECK-NEXT:    [[TMP116:%.*]] = fmul fast float [[TMP115]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP117:%.*]] = fptoui float [[TMP116]] to i32
+; CHECK-NEXT:    [[TMP118:%.*]] = zext i32 [[TMP117]] to i64
+; CHECK-NEXT:    [[TMP119:%.*]] = zext i32 [[TMP113]] to i64
+; CHECK-NEXT:    [[TMP120:%.*]] = mul i64 [[TMP118]], [[TMP119]]
+; CHECK-NEXT:    [[TMP121:%.*]] = trunc i64 [[TMP120]] to i32
+; CHECK-NEXT:    [[TMP122:%.*]] = lshr i64 [[TMP120]], 32
+; CHECK-NEXT:    [[TMP123:%.*]] = trunc i64 [[TMP122]] to i32
+; CHECK-NEXT:    [[TMP124:%.*]] = sub i32 0, [[TMP121]]
+; CHECK-NEXT:    [[TMP125:%.*]] = icmp eq i32 [[TMP123]], 0
+; CHECK-NEXT:    [[TMP126:%.*]] = select i1 [[TMP125]], i32 [[TMP124]], i32 [[TMP121]]
+; CHECK-NEXT:    [[TMP127:%.*]] = zext i32 [[TMP126]] to i64
+; CHECK-NEXT:    [[TMP128:%.*]] = zext i32 [[TMP117]] to i64
+; CHECK-NEXT:    [[TMP129:%.*]] = mul i64 [[TMP127]], [[TMP128]]
+; CHECK-NEXT:    [[TMP130:%.*]] = trunc i64 [[TMP129]] to i32
+; CHECK-NEXT:    [[TMP131:%.*]] = lshr i64 [[TMP129]], 32
+; CHECK-NEXT:    [[TMP132:%.*]] = trunc i64 [[TMP131]] to i32
+; CHECK-NEXT:    [[TMP133:%.*]] = add i32 [[TMP117]], [[TMP132]]
+; CHECK-NEXT:    [[TMP134:%.*]] = sub i32 [[TMP117]], [[TMP132]]
+; CHECK-NEXT:    [[TMP135:%.*]] = select i1 [[TMP125]], i32 [[TMP133]], i32 [[TMP134]]
+; CHECK-NEXT:    [[TMP136:%.*]] = zext i32 [[TMP135]] to i64
+; CHECK-NEXT:    [[TMP137:%.*]] = zext i32 [[TMP112]] to i64
+; CHECK-NEXT:    [[TMP138:%.*]] = mul i64 [[TMP136]], [[TMP137]]
+; CHECK-NEXT:    [[TMP139:%.*]] = trunc i64 [[TMP138]] to i32
+; CHECK-NEXT:    [[TMP140:%.*]] = lshr i64 [[TMP138]], 32
+; CHECK-NEXT:    [[TMP141:%.*]] = trunc i64 [[TMP140]] to i32
+; CHECK-NEXT:    [[TMP142:%.*]] = mul i32 [[TMP141]], [[TMP113]]
+; CHECK-NEXT:    [[TMP143:%.*]] = sub i32 [[TMP112]], [[TMP142]]
+; CHECK-NEXT:    [[TMP144:%.*]] = icmp uge i32 [[TMP143]], [[TMP113]]
+; CHECK-NEXT:    [[TMP145:%.*]] = select i1 [[TMP144]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP146:%.*]] = icmp uge i32 [[TMP112]], [[TMP142]]
+; CHECK-NEXT:    [[TMP147:%.*]] = select i1 [[TMP146]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP148:%.*]] = and i32 [[TMP145]], [[TMP147]]
+; CHECK-NEXT:    [[TMP149:%.*]] = icmp eq i32 [[TMP148]], 0
+; CHECK-NEXT:    [[TMP150:%.*]] = add i32 [[TMP141]], 1
+; CHECK-NEXT:    [[TMP151:%.*]] = sub i32 [[TMP141]], 1
+; CHECK-NEXT:    [[TMP152:%.*]] = select i1 [[TMP149]], i32 [[TMP141]], i32 [[TMP150]]
+; CHECK-NEXT:    [[TMP153:%.*]] = select i1 [[TMP146]], i32 [[TMP152]], i32 [[TMP151]]
+; CHECK-NEXT:    [[TMP154:%.*]] = xor i32 [[TMP153]], [[TMP109]]
+; CHECK-NEXT:    [[TMP155:%.*]] = sub i32 [[TMP154]], [[TMP109]]
+; CHECK-NEXT:    [[TMP156:%.*]] = insertelement <4 x i32> [[TMP104]], i32 [[TMP155]], i64 2
+; CHECK-NEXT:    [[TMP157:%.*]] = extractelement <4 x i32> [[X]], i64 3
+; CHECK-NEXT:    [[TMP158:%.*]] = extractelement <4 x i32> [[Y]], i64 3
+; CHECK-NEXT:    [[TMP159:%.*]] = ashr i32 [[TMP157]], 31
+; CHECK-NEXT:    [[TMP160:%.*]] = ashr i32 [[TMP158]], 31
+; CHECK-NEXT:    [[TMP161:%.*]] = xor i32 [[TMP159]], [[TMP160]]
+; CHECK-NEXT:    [[TMP162:%.*]] = add i32 [[TMP157]], [[TMP159]]
+; CHECK-NEXT:    [[TMP163:%.*]] = add i32 [[TMP158]], [[TMP160]]
+; CHECK-NEXT:    [[TMP164:%.*]] = xor i32 [[TMP162]], [[TMP159]]
+; CHECK-NEXT:    [[TMP165:%.*]] = xor i32 [[TMP163]], [[TMP160]]
+; CHECK-NEXT:    [[TMP166:%.*]] = uitofp i32 [[TMP165]] to float
+; CHECK-NEXT:    [[TMP167:%.*]] = fdiv fast float 1.000000e+00, [[TMP166]]
+; CHECK-NEXT:    [[TMP168:%.*]] = fmul fast float [[TMP167]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP169:%.*]] = fptoui float [[TMP168]] to i32
+; CHECK-NEXT:    [[TMP170:%.*]] = zext i32 [[TMP169]] to i64
+; CHECK-NEXT:    [[TMP171:%.*]] = zext i32 [[TMP165]] to i64
+; CHECK-NEXT:    [[TMP172:%.*]] = mul i64 [[TMP170]], [[TMP171]]
+; CHECK-NEXT:    [[TMP173:%.*]] = trunc i64 [[TMP172]] to i32
+; CHECK-NEXT:    [[TMP174:%.*]] = lshr i64 [[TMP172]], 32
+; CHECK-NEXT:    [[TMP175:%.*]] = trunc i64 [[TMP174]] to i32
+; CHECK-NEXT:    [[TMP176:%.*]] = sub i32 0, [[TMP173]]
+; CHECK-NEXT:    [[TMP177:%.*]] = icmp eq i32 [[TMP175]], 0
+; CHECK-NEXT:    [[TMP178:%.*]] = select i1 [[TMP177]], i32 [[TMP176]], i32 [[TMP173]]
+; CHECK-NEXT:    [[TMP179:%.*]] = zext i32 [[TMP178]] to i64
+; CHECK-NEXT:    [[TMP180:%.*]] = zext i32 [[TMP169]] to i64
+; CHECK-NEXT:    [[TMP181:%.*]] = mul i64 [[TMP179]], [[TMP180]]
+; CHECK-NEXT:    [[TMP182:%.*]] = trunc i64 [[TMP181]] to i32
+; CHECK-NEXT:    [[TMP183:%.*]] = lshr i64 [[TMP181]], 32
+; CHECK-NEXT:    [[TMP184:%.*]] = trunc i64 [[TMP183]] to i32
+; CHECK-NEXT:    [[TMP185:%.*]] = add i32 [[TMP169]], [[TMP184]]
+; CHECK-NEXT:    [[TMP186:%.*]] = sub i32 [[TMP169]], [[TMP184]]
+; CHECK-NEXT:    [[TMP187:%.*]] = select i1 [[TMP177]], i32 [[TMP185]], i32 [[TMP186]]
+; CHECK-NEXT:    [[TMP188:%.*]] = zext i32 [[TMP187]] to i64
+; CHECK-NEXT:    [[TMP189:%.*]] = zext i32 [[TMP164]] to i64
+; CHECK-NEXT:    [[TMP190:%.*]] = mul i64 [[TMP188]], [[TMP189]]
+; CHECK-NEXT:    [[TMP191:%.*]] = trunc i64 [[TMP190]] to i32
+; CHECK-NEXT:    [[TMP192:%.*]] = lshr i64 [[TMP190]], 32
+; CHECK-NEXT:    [[TMP193:%.*]] = trunc i64 [[TMP192]] to i32
+; CHECK-NEXT:    [[TMP194:%.*]] = mul i32 [[TMP193]], [[TMP165]]
+; CHECK-NEXT:    [[TMP195:%.*]] = sub i32 [[TMP164]], [[TMP194]]
+; CHECK-NEXT:    [[TMP196:%.*]] = icmp uge i32 [[TMP195]], [[TMP165]]
+; CHECK-NEXT:    [[TMP197:%.*]] = select i1 [[TMP196]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP198:%.*]] = icmp uge i32 [[TMP164]], [[TMP194]]
+; CHECK-NEXT:    [[TMP199:%.*]] = select i1 [[TMP198]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP200:%.*]] = and i32 [[TMP197]], [[TMP199]]
+; CHECK-NEXT:    [[TMP201:%.*]] = icmp eq i32 [[TMP200]], 0
+; CHECK-NEXT:    [[TMP202:%.*]] = add i32 [[TMP193]], 1
+; CHECK-NEXT:    [[TMP203:%.*]] = sub i32 [[TMP193]], 1
+; CHECK-NEXT:    [[TMP204:%.*]] = select i1 [[TMP201]], i32 [[TMP193]], i32 [[TMP202]]
+; CHECK-NEXT:    [[TMP205:%.*]] = select i1 [[TMP198]], i32 [[TMP204]], i32 [[TMP203]]
+; CHECK-NEXT:    [[TMP206:%.*]] = xor i32 [[TMP205]], [[TMP161]]
+; CHECK-NEXT:    [[TMP207:%.*]] = sub i32 [[TMP206]], [[TMP161]]
+; CHECK-NEXT:    [[TMP208:%.*]] = insertelement <4 x i32> [[TMP156]], i32 [[TMP207]], i64 3
+; CHECK-NEXT:    store <4 x i32> [[TMP208]], <4 x i32> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = sdiv <4 x i32> %x, %y
+  store <4 x i32> %r, <4 x i32> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @srem_v4i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i32> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = ashr i32 [[TMP1]], 31
+; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP2]], 31
+; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[TMP2]], [[TMP4]]
+; CHECK-NEXT:    [[TMP7:%.*]] = xor i32 [[TMP5]], [[TMP3]]
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i32 [[TMP6]], [[TMP4]]
+; CHECK-NEXT:    [[TMP9:%.*]] = uitofp i32 [[TMP8]] to float
+; CHECK-NEXT:    [[TMP10:%.*]] = fdiv fast float 1.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP10]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP11]] to i32
+; CHECK-NEXT:    [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT:    [[TMP14:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = trunc i64 [[TMP15]] to i32
+; CHECK-NEXT:    [[TMP17:%.*]] = lshr i64 [[TMP15]], 32
+; CHECK-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
+; CHECK-NEXT:    [[TMP19:%.*]] = sub i32 0, [[TMP16]]
+; CHECK-NEXT:    [[TMP20:%.*]] = icmp eq i32 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP19]], i32 [[TMP16]]
+; CHECK-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
+; CHECK-NEXT:    [[TMP23:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = mul i64 [[TMP22]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = trunc i64 [[TMP24]] to i32
+; CHECK-NEXT:    [[TMP26:%.*]] = lshr i64 [[TMP24]], 32
+; CHECK-NEXT:    [[TMP27:%.*]] = trunc i64 [[TMP26]] to i32
+; CHECK-NEXT:    [[TMP28:%.*]] = add i32 [[TMP12]], [[TMP27]]
+; CHECK-NEXT:    [[TMP29:%.*]] = sub i32 [[TMP12]], [[TMP27]]
+; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP20]], i32 [[TMP28]], i32 [[TMP29]]
+; CHECK-NEXT:    [[TMP31:%.*]] = zext i32 [[TMP30]] to i64
+; CHECK-NEXT:    [[TMP32:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT:    [[TMP33:%.*]] = mul i64 [[TMP31]], [[TMP32]]
+; CHECK-NEXT:    [[TMP34:%.*]] = trunc i64 [[TMP33]] to i32
+; CHECK-NEXT:    [[TMP35:%.*]] = lshr i64 [[TMP33]], 32
+; CHECK-NEXT:    [[TMP36:%.*]] = trunc i64 [[TMP35]] to i32
+; CHECK-NEXT:    [[TMP37:%.*]] = mul i32 [[TMP36]], [[TMP8]]
+; CHECK-NEXT:    [[TMP38:%.*]] = sub i32 [[TMP7]], [[TMP37]]
+; CHECK-NEXT:    [[TMP39:%.*]] = icmp uge i32 [[TMP38]], [[TMP8]]
+; CHECK-NEXT:    [[TMP40:%.*]] = select i1 [[TMP39]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP41:%.*]] = icmp uge i32 [[TMP7]], [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = select i1 [[TMP41]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP43:%.*]] = and i32 [[TMP40]], [[TMP42]]
+; CHECK-NEXT:    [[TMP44:%.*]] = icmp eq i32 [[TMP43]], 0
+; CHECK-NEXT:    [[TMP45:%.*]] = sub i32 [[TMP38]], [[TMP8]]
+; CHECK-NEXT:    [[TMP46:%.*]] = add i32 [[TMP38]], [[TMP8]]
+; CHECK-NEXT:    [[TMP47:%.*]] = select i1 [[TMP44]], i32 [[TMP38]], i32 [[TMP45]]
+; CHECK-NEXT:    [[TMP48:%.*]] = select i1 [[TMP41]], i32 [[TMP47]], i32 [[TMP46]]
+; CHECK-NEXT:    [[TMP49:%.*]] = xor i32 [[TMP48]], [[TMP3]]
+; CHECK-NEXT:    [[TMP50:%.*]] = sub i32 [[TMP49]], [[TMP3]]
+; CHECK-NEXT:    [[TMP51:%.*]] = insertelement <4 x i32> undef, i32 [[TMP50]], i64 0
+; CHECK-NEXT:    [[TMP52:%.*]] = extractelement <4 x i32> [[X]], i64 1
+; CHECK-NEXT:    [[TMP53:%.*]] = extractelement <4 x i32> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP54:%.*]] = ashr i32 [[TMP52]], 31
+; CHECK-NEXT:    [[TMP55:%.*]] = ashr i32 [[TMP53]], 31
+; CHECK-NEXT:    [[TMP56:%.*]] = add i32 [[TMP52]], [[TMP54]]
+; CHECK-NEXT:    [[TMP57:%.*]] = add i32 [[TMP53]], [[TMP55]]
+; CHECK-NEXT:    [[TMP58:%.*]] = xor i32 [[TMP56]], [[TMP54]]
+; CHECK-NEXT:    [[TMP59:%.*]] = xor i32 [[TMP57]], [[TMP55]]
+; CHECK-NEXT:    [[TMP60:%.*]] = uitofp i32 [[TMP59]] to float
+; CHECK-NEXT:    [[TMP61:%.*]] = fdiv fast float 1.000000e+00, [[TMP60]]
+; CHECK-NEXT:    [[TMP62:%.*]] = fmul fast float [[TMP61]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP63:%.*]] = fptoui float [[TMP62]] to i32
+; CHECK-NEXT:    [[TMP64:%.*]] = zext i32 [[TMP63]] to i64
+; CHECK-NEXT:    [[TMP65:%.*]] = zext i32 [[TMP59]] to i64
+; CHECK-NEXT:    [[TMP66:%.*]] = mul i64 [[TMP64]], [[TMP65]]
+; CHECK-NEXT:    [[TMP67:%.*]] = trunc i64 [[TMP66]] to i32
+; CHECK-NEXT:    [[TMP68:%.*]] = lshr i64 [[TMP66]], 32
+; CHECK-NEXT:    [[TMP69:%.*]] = trunc i64 [[TMP68]] to i32
+; CHECK-NEXT:    [[TMP70:%.*]] = sub i32 0, [[TMP67]]
+; CHECK-NEXT:    [[TMP71:%.*]] = icmp eq i32 [[TMP69]], 0
+; CHECK-NEXT:    [[TMP72:%.*]] = select i1 [[TMP71]], i32 [[TMP70]], i32 [[TMP67]]
+; CHECK-NEXT:    [[TMP73:%.*]] = zext i32 [[TMP72]] to i64
+; CHECK-NEXT:    [[TMP74:%.*]] = zext i32 [[TMP63]] to i64
+; CHECK-NEXT:    [[TMP75:%.*]] = mul i64 [[TMP73]], [[TMP74]]
+; CHECK-NEXT:    [[TMP76:%.*]] = trunc i64 [[TMP75]] to i32
+; CHECK-NEXT:    [[TMP77:%.*]] = lshr i64 [[TMP75]], 32
+; CHECK-NEXT:    [[TMP78:%.*]] = trunc i64 [[TMP77]] to i32
+; CHECK-NEXT:    [[TMP79:%.*]] = add i32 [[TMP63]], [[TMP78]]
+; CHECK-NEXT:    [[TMP80:%.*]] = sub i32 [[TMP63]], [[TMP78]]
+; CHECK-NEXT:    [[TMP81:%.*]] = select i1 [[TMP71]], i32 [[TMP79]], i32 [[TMP80]]
+; CHECK-NEXT:    [[TMP82:%.*]] = zext i32 [[TMP81]] to i64
+; CHECK-NEXT:    [[TMP83:%.*]] = zext i32 [[TMP58]] to i64
+; CHECK-NEXT:    [[TMP84:%.*]] = mul i64 [[TMP82]], [[TMP83]]
+; CHECK-NEXT:    [[TMP85:%.*]] = trunc i64 [[TMP84]] to i32
+; CHECK-NEXT:    [[TMP86:%.*]] = lshr i64 [[TMP84]], 32
+; CHECK-NEXT:    [[TMP87:%.*]] = trunc i64 [[TMP86]] to i32
+; CHECK-NEXT:    [[TMP88:%.*]] = mul i32 [[TMP87]], [[TMP59]]
+; CHECK-NEXT:    [[TMP89:%.*]] = sub i32 [[TMP58]], [[TMP88]]
+; CHECK-NEXT:    [[TMP90:%.*]] = icmp uge i32 [[TMP89]], [[TMP59]]
+; CHECK-NEXT:    [[TMP91:%.*]] = select i1 [[TMP90]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP92:%.*]] = icmp uge i32 [[TMP58]], [[TMP88]]
+; CHECK-NEXT:    [[TMP93:%.*]] = select i1 [[TMP92]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP94:%.*]] = and i32 [[TMP91]], [[TMP93]]
+; CHECK-NEXT:    [[TMP95:%.*]] = icmp eq i32 [[TMP94]], 0
+; CHECK-NEXT:    [[TMP96:%.*]] = sub i32 [[TMP89]], [[TMP59]]
+; CHECK-NEXT:    [[TMP97:%.*]] = add i32 [[TMP89]], [[TMP59]]
+; CHECK-NEXT:    [[TMP98:%.*]] = select i1 [[TMP95]], i32 [[TMP89]], i32 [[TMP96]]
+; CHECK-NEXT:    [[TMP99:%.*]] = select i1 [[TMP92]], i32 [[TMP98]], i32 [[TMP97]]
+; CHECK-NEXT:    [[TMP100:%.*]] = xor i32 [[TMP99]], [[TMP54]]
+; CHECK-NEXT:    [[TMP101:%.*]] = sub i32 [[TMP100]], [[TMP54]]
+; CHECK-NEXT:    [[TMP102:%.*]] = insertelement <4 x i32> [[TMP51]], i32 [[TMP101]], i64 1
+; CHECK-NEXT:    [[TMP103:%.*]] = extractelement <4 x i32> [[X]], i64 2
+; CHECK-NEXT:    [[TMP104:%.*]] = extractelement <4 x i32> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP105:%.*]] = ashr i32 [[TMP103]], 31
+; CHECK-NEXT:    [[TMP106:%.*]] = ashr i32 [[TMP104]], 31
+; CHECK-NEXT:    [[TMP107:%.*]] = add i32 [[TMP103]], [[TMP105]]
+; CHECK-NEXT:    [[TMP108:%.*]] = add i32 [[TMP104]], [[TMP106]]
+; CHECK-NEXT:    [[TMP109:%.*]] = xor i32 [[TMP107]], [[TMP105]]
+; CHECK-NEXT:    [[TMP110:%.*]] = xor i32 [[TMP108]], [[TMP106]]
+; CHECK-NEXT:    [[TMP111:%.*]] = uitofp i32 [[TMP110]] to float
+; CHECK-NEXT:    [[TMP112:%.*]] = fdiv fast float 1.000000e+00, [[TMP111]]
+; CHECK-NEXT:    [[TMP113:%.*]] = fmul fast float [[TMP112]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP114:%.*]] = fptoui float [[TMP113]] to i32
+; CHECK-NEXT:    [[TMP115:%.*]] = zext i32 [[TMP114]] to i64
+; CHECK-NEXT:    [[TMP116:%.*]] = zext i32 [[TMP110]] to i64
+; CHECK-NEXT:    [[TMP117:%.*]] = mul i64 [[TMP115]], [[TMP116]]
+; CHECK-NEXT:    [[TMP118:%.*]] = trunc i64 [[TMP117]] to i32
+; CHECK-NEXT:    [[TMP119:%.*]] = lshr i64 [[TMP117]], 32
+; CHECK-NEXT:    [[TMP120:%.*]] = trunc i64 [[TMP119]] to i32
+; CHECK-NEXT:    [[TMP121:%.*]] = sub i32 0, [[TMP118]]
+; CHECK-NEXT:    [[TMP122:%.*]] = icmp eq i32 [[TMP120]], 0
+; CHECK-NEXT:    [[TMP123:%.*]] = select i1 [[TMP122]], i32 [[TMP121]], i32 [[TMP118]]
+; CHECK-NEXT:    [[TMP124:%.*]] = zext i32 [[TMP123]] to i64
+; CHECK-NEXT:    [[TMP125:%.*]] = zext i32 [[TMP114]] to i64
+; CHECK-NEXT:    [[TMP126:%.*]] = mul i64 [[TMP124]], [[TMP125]]
+; CHECK-NEXT:    [[TMP127:%.*]] = trunc i64 [[TMP126]] to i32
+; CHECK-NEXT:    [[TMP128:%.*]] = lshr i64 [[TMP126]], 32
+; CHECK-NEXT:    [[TMP129:%.*]] = trunc i64 [[TMP128]] to i32
+; CHECK-NEXT:    [[TMP130:%.*]] = add i32 [[TMP114]], [[TMP129]]
+; CHECK-NEXT:    [[TMP131:%.*]] = sub i32 [[TMP114]], [[TMP129]]
+; CHECK-NEXT:    [[TMP132:%.*]] = select i1 [[TMP122]], i32 [[TMP130]], i32 [[TMP131]]
+; CHECK-NEXT:    [[TMP133:%.*]] = zext i32 [[TMP132]] to i64
+; CHECK-NEXT:    [[TMP134:%.*]] = zext i32 [[TMP109]] to i64
+; CHECK-NEXT:    [[TMP135:%.*]] = mul i64 [[TMP133]], [[TMP134]]
+; CHECK-NEXT:    [[TMP136:%.*]] = trunc i64 [[TMP135]] to i32
+; CHECK-NEXT:    [[TMP137:%.*]] = lshr i64 [[TMP135]], 32
+; CHECK-NEXT:    [[TMP138:%.*]] = trunc i64 [[TMP137]] to i32
+; CHECK-NEXT:    [[TMP139:%.*]] = mul i32 [[TMP138]], [[TMP110]]
+; CHECK-NEXT:    [[TMP140:%.*]] = sub i32 [[TMP109]], [[TMP139]]
+; CHECK-NEXT:    [[TMP141:%.*]] = icmp uge i32 [[TMP140]], [[TMP110]]
+; CHECK-NEXT:    [[TMP142:%.*]] = select i1 [[TMP141]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP143:%.*]] = icmp uge i32 [[TMP109]], [[TMP139]]
+; CHECK-NEXT:    [[TMP144:%.*]] = select i1 [[TMP143]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP145:%.*]] = and i32 [[TMP142]], [[TMP144]]
+; CHECK-NEXT:    [[TMP146:%.*]] = icmp eq i32 [[TMP145]], 0
+; CHECK-NEXT:    [[TMP147:%.*]] = sub i32 [[TMP140]], [[TMP110]]
+; CHECK-NEXT:    [[TMP148:%.*]] = add i32 [[TMP140]], [[TMP110]]
+; CHECK-NEXT:    [[TMP149:%.*]] = select i1 [[TMP146]], i32 [[TMP140]], i32 [[TMP147]]
+; CHECK-NEXT:    [[TMP150:%.*]] = select i1 [[TMP143]], i32 [[TMP149]], i32 [[TMP148]]
+; CHECK-NEXT:    [[TMP151:%.*]] = xor i32 [[TMP150]], [[TMP105]]
+; CHECK-NEXT:    [[TMP152:%.*]] = sub i32 [[TMP151]], [[TMP105]]
+; CHECK-NEXT:    [[TMP153:%.*]] = insertelement <4 x i32> [[TMP102]], i32 [[TMP152]], i64 2
+; CHECK-NEXT:    [[TMP154:%.*]] = extractelement <4 x i32> [[X]], i64 3
+; CHECK-NEXT:    [[TMP155:%.*]] = extractelement <4 x i32> [[Y]], i64 3
+; CHECK-NEXT:    [[TMP156:%.*]] = ashr i32 [[TMP154]], 31
+; CHECK-NEXT:    [[TMP157:%.*]] = ashr i32 [[TMP155]], 31
+; CHECK-NEXT:    [[TMP158:%.*]] = add i32 [[TMP154]], [[TMP156]]
+; CHECK-NEXT:    [[TMP159:%.*]] = add i32 [[TMP155]], [[TMP157]]
+; CHECK-NEXT:    [[TMP160:%.*]] = xor i32 [[TMP158]], [[TMP156]]
+; CHECK-NEXT:    [[TMP161:%.*]] = xor i32 [[TMP159]], [[TMP157]]
+; CHECK-NEXT:    [[TMP162:%.*]] = uitofp i32 [[TMP161]] to float
+; CHECK-NEXT:    [[TMP163:%.*]] = fdiv fast float 1.000000e+00, [[TMP162]]
+; CHECK-NEXT:    [[TMP164:%.*]] = fmul fast float [[TMP163]], 0x41F0000000000000
+; CHECK-NEXT:    [[TMP165:%.*]] = fptoui float [[TMP164]] to i32
+; CHECK-NEXT:    [[TMP166:%.*]] = zext i32 [[TMP165]] to i64
+; CHECK-NEXT:    [[TMP167:%.*]] = zext i32 [[TMP161]] to i64
+; CHECK-NEXT:    [[TMP168:%.*]] = mul i64 [[TMP166]], [[TMP167]]
+; CHECK-NEXT:    [[TMP169:%.*]] = trunc i64 [[TMP168]] to i32
+; CHECK-NEXT:    [[TMP170:%.*]] = lshr i64 [[TMP168]], 32
+; CHECK-NEXT:    [[TMP171:%.*]] = trunc i64 [[TMP170]] to i32
+; CHECK-NEXT:    [[TMP172:%.*]] = sub i32 0, [[TMP169]]
+; CHECK-NEXT:    [[TMP173:%.*]] = icmp eq i32 [[TMP171]], 0
+; CHECK-NEXT:    [[TMP174:%.*]] = select i1 [[TMP173]], i32 [[TMP172]], i32 [[TMP169]]
+; CHECK-NEXT:    [[TMP175:%.*]] = zext i32 [[TMP174]] to i64
+; CHECK-NEXT:    [[TMP176:%.*]] = zext i32 [[TMP165]] to i64
+; CHECK-NEXT:    [[TMP177:%.*]] = mul i64 [[TMP175]], [[TMP176]]
+; CHECK-NEXT:    [[TMP178:%.*]] = trunc i64 [[TMP177]] to i32
+; CHECK-NEXT:    [[TMP179:%.*]] = lshr i64 [[TMP177]], 32
+; CHECK-NEXT:    [[TMP180:%.*]] = trunc i64 [[TMP179]] to i32
+; CHECK-NEXT:    [[TMP181:%.*]] = add i32 [[TMP165]], [[TMP180]]
+; CHECK-NEXT:    [[TMP182:%.*]] = sub i32 [[TMP165]], [[TMP180]]
+; CHECK-NEXT:    [[TMP183:%.*]] = select i1 [[TMP173]], i32 [[TMP181]], i32 [[TMP182]]
+; CHECK-NEXT:    [[TMP184:%.*]] = zext i32 [[TMP183]] to i64
+; CHECK-NEXT:    [[TMP185:%.*]] = zext i32 [[TMP160]] to i64
+; CHECK-NEXT:    [[TMP186:%.*]] = mul i64 [[TMP184]], [[TMP185]]
+; CHECK-NEXT:    [[TMP187:%.*]] = trunc i64 [[TMP186]] to i32
+; CHECK-NEXT:    [[TMP188:%.*]] = lshr i64 [[TMP186]], 32
+; CHECK-NEXT:    [[TMP189:%.*]] = trunc i64 [[TMP188]] to i32
+; CHECK-NEXT:    [[TMP190:%.*]] = mul i32 [[TMP189]], [[TMP161]]
+; CHECK-NEXT:    [[TMP191:%.*]] = sub i32 [[TMP160]], [[TMP190]]
+; CHECK-NEXT:    [[TMP192:%.*]] = icmp uge i32 [[TMP191]], [[TMP161]]
+; CHECK-NEXT:    [[TMP193:%.*]] = select i1 [[TMP192]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP194:%.*]] = icmp uge i32 [[TMP160]], [[TMP190]]
+; CHECK-NEXT:    [[TMP195:%.*]] = select i1 [[TMP194]], i32 -1, i32 0
+; CHECK-NEXT:    [[TMP196:%.*]] = and i32 [[TMP193]], [[TMP195]]
+; CHECK-NEXT:    [[TMP197:%.*]] = icmp eq i32 [[TMP196]], 0
+; CHECK-NEXT:    [[TMP198:%.*]] = sub i32 [[TMP191]], [[TMP161]]
+; CHECK-NEXT:    [[TMP199:%.*]] = add i32 [[TMP191]], [[TMP161]]
+; CHECK-NEXT:    [[TMP200:%.*]] = select i1 [[TMP197]], i32 [[TMP191]], i32 [[TMP198]]
+; CHECK-NEXT:    [[TMP201:%.*]] = select i1 [[TMP194]], i32 [[TMP200]], i32 [[TMP199]]
+; CHECK-NEXT:    [[TMP202:%.*]] = xor i32 [[TMP201]], [[TMP156]]
+; CHECK-NEXT:    [[TMP203:%.*]] = sub i32 [[TMP202]], [[TMP156]]
+; CHECK-NEXT:    [[TMP204:%.*]] = insertelement <4 x i32> [[TMP153]], i32 [[TMP203]], i64 3
+; CHECK-NEXT:    store <4 x i32> [[TMP204]], <4 x i32> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = srem <4 x i32> %x, %y
+  store <4 x i32> %r, <4 x i32> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @udiv_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x, <4 x i16> %y) {
+; CHECK-LABEL: @udiv_v4i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i16> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i16> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = fdiv fast float 1.000000e+00, [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fsub fast float -0.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
+; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 65535
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16
+; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i16> undef, i16 [[TMP19]], i64 0
+; CHECK-NEXT:    [[TMP21:%.*]] = extractelement <4 x i16> [[X]], i64 1
+; CHECK-NEXT:    [[TMP22:%.*]] = extractelement <4 x i16> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP23:%.*]] = zext i16 [[TMP21]] to i32
+; CHECK-NEXT:    [[TMP24:%.*]] = zext i16 [[TMP22]] to i32
+; CHECK-NEXT:    [[TMP25:%.*]] = uitofp i32 [[TMP23]] to float
+; CHECK-NEXT:    [[TMP26:%.*]] = uitofp i32 [[TMP24]] to float
+; CHECK-NEXT:    [[TMP27:%.*]] = fdiv fast float 1.000000e+00, [[TMP26]]
+; CHECK-NEXT:    [[TMP28:%.*]] = fmul fast float [[TMP25]], [[TMP27]]
+; CHECK-NEXT:    [[TMP29:%.*]] = call fast float @llvm.trunc.f32(float [[TMP28]])
+; CHECK-NEXT:    [[TMP30:%.*]] = fsub fast float -0.000000e+00, [[TMP29]]
+; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP30]], float [[TMP26]], float [[TMP25]])
+; CHECK-NEXT:    [[TMP32:%.*]] = fptoui float [[TMP29]] to i32
+; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.fabs.f32(float [[TMP31]])
+; CHECK-NEXT:    [[TMP34:%.*]] = call fast float @llvm.fabs.f32(float [[TMP26]])
+; CHECK-NEXT:    [[TMP35:%.*]] = fcmp fast oge float [[TMP33]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = select i1 [[TMP35]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP37:%.*]] = add i32 [[TMP32]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = and i32 [[TMP37]], 65535
+; CHECK-NEXT:    [[TMP39:%.*]] = trunc i32 [[TMP38]] to i16
+; CHECK-NEXT:    [[TMP40:%.*]] = insertelement <4 x i16> [[TMP20]], i16 [[TMP39]], i64 1
+; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <4 x i16> [[X]], i64 2
+; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <4 x i16> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP43:%.*]] = zext i16 [[TMP41]] to i32
+; CHECK-NEXT:    [[TMP44:%.*]] = zext i16 [[TMP42]] to i32
+; CHECK-NEXT:    [[TMP45:%.*]] = uitofp i32 [[TMP43]] to float
+; CHECK-NEXT:    [[TMP46:%.*]] = uitofp i32 [[TMP44]] to float
+; CHECK-NEXT:    [[TMP47:%.*]] = fdiv fast float 1.000000e+00, [[TMP46]]
+; CHECK-NEXT:    [[TMP48:%.*]] = fmul fast float [[TMP45]], [[TMP47]]
+; CHECK-NEXT:    [[TMP49:%.*]] = call fast float @llvm.trunc.f32(float [[TMP48]])
+; CHECK-NEXT:    [[TMP50:%.*]] = fsub fast float -0.000000e+00, [[TMP49]]
+; CHECK-NEXT:    [[TMP51:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP50]], float [[TMP46]], float [[TMP45]])
+; CHECK-NEXT:    [[TMP52:%.*]] = fptoui float [[TMP49]] to i32
+; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.fabs.f32(float [[TMP51]])
+; CHECK-NEXT:    [[TMP54:%.*]] = call fast float @llvm.fabs.f32(float [[TMP46]])
+; CHECK-NEXT:    [[TMP55:%.*]] = fcmp fast oge float [[TMP53]], [[TMP54]]
+; CHECK-NEXT:    [[TMP56:%.*]] = select i1 [[TMP55]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP57:%.*]] = add i32 [[TMP52]], [[TMP56]]
+; CHECK-NEXT:    [[TMP58:%.*]] = and i32 [[TMP57]], 65535
+; CHECK-NEXT:    [[TMP59:%.*]] = trunc i32 [[TMP58]] to i16
+; CHECK-NEXT:    [[TMP60:%.*]] = insertelement <4 x i16> [[TMP40]], i16 [[TMP59]], i64 2
+; CHECK-NEXT:    [[TMP61:%.*]] = extractelement <4 x i16> [[X]], i64 3
+; CHECK-NEXT:    [[TMP62:%.*]] = extractelement <4 x i16> [[Y]], i64 3
+; CHECK-NEXT:    [[TMP63:%.*]] = zext i16 [[TMP61]] to i32
+; CHECK-NEXT:    [[TMP64:%.*]] = zext i16 [[TMP62]] to i32
+; CHECK-NEXT:    [[TMP65:%.*]] = uitofp i32 [[TMP63]] to float
+; CHECK-NEXT:    [[TMP66:%.*]] = uitofp i32 [[TMP64]] to float
+; CHECK-NEXT:    [[TMP67:%.*]] = fdiv fast float 1.000000e+00, [[TMP66]]
+; CHECK-NEXT:    [[TMP68:%.*]] = fmul fast float [[TMP65]], [[TMP67]]
+; CHECK-NEXT:    [[TMP69:%.*]] = call fast float @llvm.trunc.f32(float [[TMP68]])
+; CHECK-NEXT:    [[TMP70:%.*]] = fsub fast float -0.000000e+00, [[TMP69]]
+; CHECK-NEXT:    [[TMP71:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP70]], float [[TMP66]], float [[TMP65]])
+; CHECK-NEXT:    [[TMP72:%.*]] = fptoui float [[TMP69]] to i32
+; CHECK-NEXT:    [[TMP73:%.*]] = call fast float @llvm.fabs.f32(float [[TMP71]])
+; CHECK-NEXT:    [[TMP74:%.*]] = call fast float @llvm.fabs.f32(float [[TMP66]])
+; CHECK-NEXT:    [[TMP75:%.*]] = fcmp fast oge float [[TMP73]], [[TMP74]]
+; CHECK-NEXT:    [[TMP76:%.*]] = select i1 [[TMP75]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP77:%.*]] = add i32 [[TMP72]], [[TMP76]]
+; CHECK-NEXT:    [[TMP78:%.*]] = and i32 [[TMP77]], 65535
+; CHECK-NEXT:    [[TMP79:%.*]] = trunc i32 [[TMP78]] to i16
+; CHECK-NEXT:    [[TMP80:%.*]] = insertelement <4 x i16> [[TMP60]], i16 [[TMP79]], i64 3
+; CHECK-NEXT:    store <4 x i16> [[TMP80]], <4 x i16> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = udiv <4 x i16> %x, %y
+  store <4 x i16> %r, <4 x i16> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @urem_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x, <4 x i16> %y) {
+; CHECK-LABEL: @urem_v4i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i16> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i16> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = fdiv fast float 1.000000e+00, [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fsub fast float -0.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
+; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = mul i32 [[TMP17]], [[TMP4]]
+; CHECK-NEXT:    [[TMP19:%.*]] = sub i32 [[TMP3]], [[TMP18]]
+; CHECK-NEXT:    [[TMP20:%.*]] = and i32 [[TMP19]], 65535
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
+; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i16> undef, i16 [[TMP21]], i64 0
+; CHECK-NEXT:    [[TMP23:%.*]] = extractelement <4 x i16> [[X]], i64 1
+; CHECK-NEXT:    [[TMP24:%.*]] = extractelement <4 x i16> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP25:%.*]] = zext i16 [[TMP23]] to i32
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i16 [[TMP24]] to i32
+; CHECK-NEXT:    [[TMP27:%.*]] = uitofp i32 [[TMP25]] to float
+; CHECK-NEXT:    [[TMP28:%.*]] = uitofp i32 [[TMP26]] to float
+; CHECK-NEXT:    [[TMP29:%.*]] = fdiv fast float 1.000000e+00, [[TMP28]]
+; CHECK-NEXT:    [[TMP30:%.*]] = fmul fast float [[TMP27]], [[TMP29]]
+; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.trunc.f32(float [[TMP30]])
+; CHECK-NEXT:    [[TMP32:%.*]] = fsub fast float -0.000000e+00, [[TMP31]]
+; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP32]], float [[TMP28]], float [[TMP27]])
+; CHECK-NEXT:    [[TMP34:%.*]] = fptoui float [[TMP31]] to i32
+; CHECK-NEXT:    [[TMP35:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
+; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.fabs.f32(float [[TMP28]])
+; CHECK-NEXT:    [[TMP37:%.*]] = fcmp fast oge float [[TMP35]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = select i1 [[TMP37]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP39:%.*]] = add i32 [[TMP34]], [[TMP38]]
+; CHECK-NEXT:    [[TMP40:%.*]] = mul i32 [[TMP39]], [[TMP26]]
+; CHECK-NEXT:    [[TMP41:%.*]] = sub i32 [[TMP25]], [[TMP40]]
+; CHECK-NEXT:    [[TMP42:%.*]] = and i32 [[TMP41]], 65535
+; CHECK-NEXT:    [[TMP43:%.*]] = trunc i32 [[TMP42]] to i16
+; CHECK-NEXT:    [[TMP44:%.*]] = insertelement <4 x i16> [[TMP22]], i16 [[TMP43]], i64 1
+; CHECK-NEXT:    [[TMP45:%.*]] = extractelement <4 x i16> [[X]], i64 2
+; CHECK-NEXT:    [[TMP46:%.*]] = extractelement <4 x i16> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP47:%.*]] = zext i16 [[TMP45]] to i32
+; CHECK-NEXT:    [[TMP48:%.*]] = zext i16 [[TMP46]] to i32
+; CHECK-NEXT:    [[TMP49:%.*]] = uitofp i32 [[TMP47]] to float
+; CHECK-NEXT:    [[TMP50:%.*]] = uitofp i32 [[TMP48]] to float
+; CHECK-NEXT:    [[TMP51:%.*]] = fdiv fast float 1.000000e+00, [[TMP50]]
+; CHECK-NEXT:    [[TMP52:%.*]] = fmul fast float [[TMP49]], [[TMP51]]
+; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.trunc.f32(float [[TMP52]])
+; CHECK-NEXT:    [[TMP54:%.*]] = fsub fast float -0.000000e+00, [[TMP53]]
+; CHECK-NEXT:    [[TMP55:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP54]], float [[TMP50]], float [[TMP49]])
+; CHECK-NEXT:    [[TMP56:%.*]] = fptoui float [[TMP53]] to i32
+; CHECK-NEXT:    [[TMP57:%.*]] = call fast float @llvm.fabs.f32(float [[TMP55]])
+; CHECK-NEXT:    [[TMP58:%.*]] = call fast float @llvm.fabs.f32(float [[TMP50]])
+; CHECK-NEXT:    [[TMP59:%.*]] = fcmp fast oge float [[TMP57]], [[TMP58]]
+; CHECK-NEXT:    [[TMP60:%.*]] = select i1 [[TMP59]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP61:%.*]] = add i32 [[TMP56]], [[TMP60]]
+; CHECK-NEXT:    [[TMP62:%.*]] = mul i32 [[TMP61]], [[TMP48]]
+; CHECK-NEXT:    [[TMP63:%.*]] = sub i32 [[TMP47]], [[TMP62]]
+; CHECK-NEXT:    [[TMP64:%.*]] = and i32 [[TMP63]], 65535
+; CHECK-NEXT:    [[TMP65:%.*]] = trunc i32 [[TMP64]] to i16
+; CHECK-NEXT:    [[TMP66:%.*]] = insertelement <4 x i16> [[TMP44]], i16 [[TMP65]], i64 2
+; CHECK-NEXT:    [[TMP67:%.*]] = extractelement <4 x i16> [[X]], i64 3
+; CHECK-NEXT:    [[TMP68:%.*]] = extractelement <4 x i16> [[Y]], i64 3
+; CHECK-NEXT:    [[TMP69:%.*]] = zext i16 [[TMP67]] to i32
+; CHECK-NEXT:    [[TMP70:%.*]] = zext i16 [[TMP68]] to i32
+; CHECK-NEXT:    [[TMP71:%.*]] = uitofp i32 [[TMP69]] to float
+; CHECK-NEXT:    [[TMP72:%.*]] = uitofp i32 [[TMP70]] to float
+; CHECK-NEXT:    [[TMP73:%.*]] = fdiv fast float 1.000000e+00, [[TMP72]]
+; CHECK-NEXT:    [[TMP74:%.*]] = fmul fast float [[TMP71]], [[TMP73]]
+; CHECK-NEXT:    [[TMP75:%.*]] = call fast float @llvm.trunc.f32(float [[TMP74]])
+; CHECK-NEXT:    [[TMP76:%.*]] = fsub fast float -0.000000e+00, [[TMP75]]
+; CHECK-NEXT:    [[TMP77:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP76]], float [[TMP72]], float [[TMP71]])
+; CHECK-NEXT:    [[TMP78:%.*]] = fptoui float [[TMP75]] to i32
+; CHECK-NEXT:    [[TMP79:%.*]] = call fast float @llvm.fabs.f32(float [[TMP77]])
+; CHECK-NEXT:    [[TMP80:%.*]] = call fast float @llvm.fabs.f32(float [[TMP72]])
+; CHECK-NEXT:    [[TMP81:%.*]] = fcmp fast oge float [[TMP79]], [[TMP80]]
+; CHECK-NEXT:    [[TMP82:%.*]] = select i1 [[TMP81]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP83:%.*]] = add i32 [[TMP78]], [[TMP82]]
+; CHECK-NEXT:    [[TMP84:%.*]] = mul i32 [[TMP83]], [[TMP70]]
+; CHECK-NEXT:    [[TMP85:%.*]] = sub i32 [[TMP69]], [[TMP84]]
+; CHECK-NEXT:    [[TMP86:%.*]] = and i32 [[TMP85]], 65535
+; CHECK-NEXT:    [[TMP87:%.*]] = trunc i32 [[TMP86]] to i16
+; CHECK-NEXT:    [[TMP88:%.*]] = insertelement <4 x i16> [[TMP66]], i16 [[TMP87]], i64 3
+; CHECK-NEXT:    store <4 x i16> [[TMP88]], <4 x i16> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = urem <4 x i16> %x, %y
+  store <4 x i16> %r, <4 x i16> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @sdiv_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x, <4 x i16> %y) {
+; CHECK-LABEL: @sdiv_v4i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i16> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i16> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = sext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = sext i16 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
+; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP10:%.*]] = fdiv fast float 1.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fsub fast float -0.000000e+00, [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
+; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
+; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
+; CHECK-NEXT:    [[TMP22:%.*]] = sext i16 [[TMP21]] to i32
+; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i16
+; CHECK-NEXT:    [[TMP24:%.*]] = insertelement <4 x i16> undef, i16 [[TMP23]], i64 0
+; CHECK-NEXT:    [[TMP25:%.*]] = extractelement <4 x i16> [[X]], i64 1
+; CHECK-NEXT:    [[TMP26:%.*]] = extractelement <4 x i16> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP27:%.*]] = sext i16 [[TMP25]] to i32
+; CHECK-NEXT:    [[TMP28:%.*]] = sext i16 [[TMP26]] to i32
+; CHECK-NEXT:    [[TMP29:%.*]] = xor i32 [[TMP27]], [[TMP28]]
+; CHECK-NEXT:    [[TMP30:%.*]] = ashr i32 [[TMP29]], 30
+; CHECK-NEXT:    [[TMP31:%.*]] = or i32 [[TMP30]], 1
+; CHECK-NEXT:    [[TMP32:%.*]] = sitofp i32 [[TMP27]] to float
+; CHECK-NEXT:    [[TMP33:%.*]] = sitofp i32 [[TMP28]] to float
+; CHECK-NEXT:    [[TMP34:%.*]] = fdiv fast float 1.000000e+00, [[TMP33]]
+; CHECK-NEXT:    [[TMP35:%.*]] = fmul fast float [[TMP32]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.trunc.f32(float [[TMP35]])
+; CHECK-NEXT:    [[TMP37:%.*]] = fsub fast float -0.000000e+00, [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP37]], float [[TMP33]], float [[TMP32]])
+; CHECK-NEXT:    [[TMP39:%.*]] = fptosi float [[TMP36]] to i32
+; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.fabs.f32(float [[TMP38]])
+; CHECK-NEXT:    [[TMP41:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
+; CHECK-NEXT:    [[TMP42:%.*]] = fcmp fast oge float [[TMP40]], [[TMP41]]
+; CHECK-NEXT:    [[TMP43:%.*]] = select i1 [[TMP42]], i32 [[TMP31]], i32 0
+; CHECK-NEXT:    [[TMP44:%.*]] = add i32 [[TMP39]], [[TMP43]]
+; CHECK-NEXT:    [[TMP45:%.*]] = trunc i32 [[TMP44]] to i16
+; CHECK-NEXT:    [[TMP46:%.*]] = sext i16 [[TMP45]] to i32
+; CHECK-NEXT:    [[TMP47:%.*]] = trunc i32 [[TMP46]] to i16
+; CHECK-NEXT:    [[TMP48:%.*]] = insertelement <4 x i16> [[TMP24]], i16 [[TMP47]], i64 1
+; CHECK-NEXT:    [[TMP49:%.*]] = extractelement <4 x i16> [[X]], i64 2
+; CHECK-NEXT:    [[TMP50:%.*]] = extractelement <4 x i16> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP51:%.*]] = sext i16 [[TMP49]] to i32
+; CHECK-NEXT:    [[TMP52:%.*]] = sext i16 [[TMP50]] to i32
+; CHECK-NEXT:    [[TMP53:%.*]] = xor i32 [[TMP51]], [[TMP52]]
+; CHECK-NEXT:    [[TMP54:%.*]] = ashr i32 [[TMP53]], 30
+; CHECK-NEXT:    [[TMP55:%.*]] = or i32 [[TMP54]], 1
+; CHECK-NEXT:    [[TMP56:%.*]] = sitofp i32 [[TMP51]] to float
+; CHECK-NEXT:    [[TMP57:%.*]] = sitofp i32 [[TMP52]] to float
+; CHECK-NEXT:    [[TMP58:%.*]] = fdiv fast float 1.000000e+00, [[TMP57]]
+; CHECK-NEXT:    [[TMP59:%.*]] = fmul fast float [[TMP56]], [[TMP58]]
+; CHECK-NEXT:    [[TMP60:%.*]] = call fast float @llvm.trunc.f32(float [[TMP59]])
+; CHECK-NEXT:    [[TMP61:%.*]] = fsub fast float -0.000000e+00, [[TMP60]]
+; CHECK-NEXT:    [[TMP62:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP61]], float [[TMP57]], float [[TMP56]])
+; CHECK-NEXT:    [[TMP63:%.*]] = fptosi float [[TMP60]] to i32
+; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.fabs.f32(float [[TMP62]])
+; CHECK-NEXT:    [[TMP65:%.*]] = call fast float @llvm.fabs.f32(float [[TMP57]])
+; CHECK-NEXT:    [[TMP66:%.*]] = fcmp fast oge float [[TMP64]], [[TMP65]]
+; CHECK-NEXT:    [[TMP67:%.*]] = select i1 [[TMP66]], i32 [[TMP55]], i32 0
+; CHECK-NEXT:    [[TMP68:%.*]] = add i32 [[TMP63]], [[TMP67]]
+; CHECK-NEXT:    [[TMP69:%.*]] = trunc i32 [[TMP68]] to i16
+; CHECK-NEXT:    [[TMP70:%.*]] = sext i16 [[TMP69]] to i32
+; CHECK-NEXT:    [[TMP71:%.*]] = trunc i32 [[TMP70]] to i16
+; CHECK-NEXT:    [[TMP72:%.*]] = insertelement <4 x i16> [[TMP48]], i16 [[TMP71]], i64 2
+; CHECK-NEXT:    [[TMP73:%.*]] = extractelement <4 x i16> [[X]], i64 3
+; CHECK-NEXT:    [[TMP74:%.*]] = extractelement <4 x i16> [[Y]], i64 3
+; CHECK-NEXT:    [[TMP75:%.*]] = sext i16 [[TMP73]] to i32
+; CHECK-NEXT:    [[TMP76:%.*]] = sext i16 [[TMP74]] to i32
+; CHECK-NEXT:    [[TMP77:%.*]] = xor i32 [[TMP75]], [[TMP76]]
+; CHECK-NEXT:    [[TMP78:%.*]] = ashr i32 [[TMP77]], 30
+; CHECK-NEXT:    [[TMP79:%.*]] = or i32 [[TMP78]], 1
+; CHECK-NEXT:    [[TMP80:%.*]] = sitofp i32 [[TMP75]] to float
+; CHECK-NEXT:    [[TMP81:%.*]] = sitofp i32 [[TMP76]] to float
+; CHECK-NEXT:    [[TMP82:%.*]] = fdiv fast float 1.000000e+00, [[TMP81]]
+; CHECK-NEXT:    [[TMP83:%.*]] = fmul fast float [[TMP80]], [[TMP82]]
+; CHECK-NEXT:    [[TMP84:%.*]] = call fast float @llvm.trunc.f32(float [[TMP83]])
+; CHECK-NEXT:    [[TMP85:%.*]] = fsub fast float -0.000000e+00, [[TMP84]]
+; CHECK-NEXT:    [[TMP86:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP85]], float [[TMP81]], float [[TMP80]])
+; CHECK-NEXT:    [[TMP87:%.*]] = fptosi float [[TMP84]] to i32
+; CHECK-NEXT:    [[TMP88:%.*]] = call fast float @llvm.fabs.f32(float [[TMP86]])
+; CHECK-NEXT:    [[TMP89:%.*]] = call fast float @llvm.fabs.f32(float [[TMP81]])
+; CHECK-NEXT:    [[TMP90:%.*]] = fcmp fast oge float [[TMP88]], [[TMP89]]
+; CHECK-NEXT:    [[TMP91:%.*]] = select i1 [[TMP90]], i32 [[TMP79]], i32 0
+; CHECK-NEXT:    [[TMP92:%.*]] = add i32 [[TMP87]], [[TMP91]]
+; CHECK-NEXT:    [[TMP93:%.*]] = trunc i32 [[TMP92]] to i16
+; CHECK-NEXT:    [[TMP94:%.*]] = sext i16 [[TMP93]] to i32
+; CHECK-NEXT:    [[TMP95:%.*]] = trunc i32 [[TMP94]] to i16
+; CHECK-NEXT:    [[TMP96:%.*]] = insertelement <4 x i16> [[TMP72]], i16 [[TMP95]], i64 3
+; CHECK-NEXT:    store <4 x i16> [[TMP96]], <4 x i16> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = sdiv <4 x i16> %x, %y
+  store <4 x i16> %r, <4 x i16> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @srem_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x, <4 x i16> %y) {
+; CHECK-LABEL: @srem_v4i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i16> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i16> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = sext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = sext i16 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
+; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP10:%.*]] = fdiv fast float 1.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fsub fast float -0.000000e+00, [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
+; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
+; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = mul i32 [[TMP20]], [[TMP4]]
+; CHECK-NEXT:    [[TMP22:%.*]] = sub i32 [[TMP3]], [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i16
+; CHECK-NEXT:    [[TMP24:%.*]] = sext i16 [[TMP23]] to i32
+; CHECK-NEXT:    [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
+; CHECK-NEXT:    [[TMP26:%.*]] = insertelement <4 x i16> undef, i16 [[TMP25]], i64 0
+; CHECK-NEXT:    [[TMP27:%.*]] = extractelement <4 x i16> [[X]], i64 1
+; CHECK-NEXT:    [[TMP28:%.*]] = extractelement <4 x i16> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP29:%.*]] = sext i16 [[TMP27]] to i32
+; CHECK-NEXT:    [[TMP30:%.*]] = sext i16 [[TMP28]] to i32
+; CHECK-NEXT:    [[TMP31:%.*]] = xor i32 [[TMP29]], [[TMP30]]
+; CHECK-NEXT:    [[TMP32:%.*]] = ashr i32 [[TMP31]], 30
+; CHECK-NEXT:    [[TMP33:%.*]] = or i32 [[TMP32]], 1
+; CHECK-NEXT:    [[TMP34:%.*]] = sitofp i32 [[TMP29]] to float
+; CHECK-NEXT:    [[TMP35:%.*]] = sitofp i32 [[TMP30]] to float
+; CHECK-NEXT:    [[TMP36:%.*]] = fdiv fast float 1.000000e+00, [[TMP35]]
+; CHECK-NEXT:    [[TMP37:%.*]] = fmul fast float [[TMP34]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.trunc.f32(float [[TMP37]])
+; CHECK-NEXT:    [[TMP39:%.*]] = fsub fast float -0.000000e+00, [[TMP38]]
+; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP39]], float [[TMP35]], float [[TMP34]])
+; CHECK-NEXT:    [[TMP41:%.*]] = fptosi float [[TMP38]] to i32
+; CHECK-NEXT:    [[TMP42:%.*]] = call fast float @llvm.fabs.f32(float [[TMP40]])
+; CHECK-NEXT:    [[TMP43:%.*]] = call fast float @llvm.fabs.f32(float [[TMP35]])
+; CHECK-NEXT:    [[TMP44:%.*]] = fcmp fast oge float [[TMP42]], [[TMP43]]
+; CHECK-NEXT:    [[TMP45:%.*]] = select i1 [[TMP44]], i32 [[TMP33]], i32 0
+; CHECK-NEXT:    [[TMP46:%.*]] = add i32 [[TMP41]], [[TMP45]]
+; CHECK-NEXT:    [[TMP47:%.*]] = mul i32 [[TMP46]], [[TMP30]]
+; CHECK-NEXT:    [[TMP48:%.*]] = sub i32 [[TMP29]], [[TMP47]]
+; CHECK-NEXT:    [[TMP49:%.*]] = trunc i32 [[TMP48]] to i16
+; CHECK-NEXT:    [[TMP50:%.*]] = sext i16 [[TMP49]] to i32
+; CHECK-NEXT:    [[TMP51:%.*]] = trunc i32 [[TMP50]] to i16
+; CHECK-NEXT:    [[TMP52:%.*]] = insertelement <4 x i16> [[TMP26]], i16 [[TMP51]], i64 1
+; CHECK-NEXT:    [[TMP53:%.*]] = extractelement <4 x i16> [[X]], i64 2
+; CHECK-NEXT:    [[TMP54:%.*]] = extractelement <4 x i16> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP55:%.*]] = sext i16 [[TMP53]] to i32
+; CHECK-NEXT:    [[TMP56:%.*]] = sext i16 [[TMP54]] to i32
+; CHECK-NEXT:    [[TMP57:%.*]] = xor i32 [[TMP55]], [[TMP56]]
+; CHECK-NEXT:    [[TMP58:%.*]] = ashr i32 [[TMP57]], 30
+; CHECK-NEXT:    [[TMP59:%.*]] = or i32 [[TMP58]], 1
+; CHECK-NEXT:    [[TMP60:%.*]] = sitofp i32 [[TMP55]] to float
+; CHECK-NEXT:    [[TMP61:%.*]] = sitofp i32 [[TMP56]] to float
+; CHECK-NEXT:    [[TMP62:%.*]] = fdiv fast float 1.000000e+00, [[TMP61]]
+; CHECK-NEXT:    [[TMP63:%.*]] = fmul fast float [[TMP60]], [[TMP62]]
+; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.trunc.f32(float [[TMP63]])
+; CHECK-NEXT:    [[TMP65:%.*]] = fsub fast float -0.000000e+00, [[TMP64]]
+; CHECK-NEXT:    [[TMP66:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP65]], float [[TMP61]], float [[TMP60]])
+; CHECK-NEXT:    [[TMP67:%.*]] = fptosi float [[TMP64]] to i32
+; CHECK-NEXT:    [[TMP68:%.*]] = call fast float @llvm.fabs.f32(float [[TMP66]])
+; CHECK-NEXT:    [[TMP69:%.*]] = call fast float @llvm.fabs.f32(float [[TMP61]])
+; CHECK-NEXT:    [[TMP70:%.*]] = fcmp fast oge float [[TMP68]], [[TMP69]]
+; CHECK-NEXT:    [[TMP71:%.*]] = select i1 [[TMP70]], i32 [[TMP59]], i32 0
+; CHECK-NEXT:    [[TMP72:%.*]] = add i32 [[TMP67]], [[TMP71]]
+; CHECK-NEXT:    [[TMP73:%.*]] = mul i32 [[TMP72]], [[TMP56]]
+; CHECK-NEXT:    [[TMP74:%.*]] = sub i32 [[TMP55]], [[TMP73]]
+; CHECK-NEXT:    [[TMP75:%.*]] = trunc i32 [[TMP74]] to i16
+; CHECK-NEXT:    [[TMP76:%.*]] = sext i16 [[TMP75]] to i32
+; CHECK-NEXT:    [[TMP77:%.*]] = trunc i32 [[TMP76]] to i16
+; CHECK-NEXT:    [[TMP78:%.*]] = insertelement <4 x i16> [[TMP52]], i16 [[TMP77]], i64 2
+; CHECK-NEXT:    [[TMP79:%.*]] = extractelement <4 x i16> [[X]], i64 3
+; CHECK-NEXT:    [[TMP80:%.*]] = extractelement <4 x i16> [[Y]], i64 3
+; CHECK-NEXT:    [[TMP81:%.*]] = sext i16 [[TMP79]] to i32
+; CHECK-NEXT:    [[TMP82:%.*]] = sext i16 [[TMP80]] to i32
+; CHECK-NEXT:    [[TMP83:%.*]] = xor i32 [[TMP81]], [[TMP82]]
+; CHECK-NEXT:    [[TMP84:%.*]] = ashr i32 [[TMP83]], 30
+; CHECK-NEXT:    [[TMP85:%.*]] = or i32 [[TMP84]], 1
+; CHECK-NEXT:    [[TMP86:%.*]] = sitofp i32 [[TMP81]] to float
+; CHECK-NEXT:    [[TMP87:%.*]] = sitofp i32 [[TMP82]] to float
+; CHECK-NEXT:    [[TMP88:%.*]] = fdiv fast float 1.000000e+00, [[TMP87]]
+; CHECK-NEXT:    [[TMP89:%.*]] = fmul fast float [[TMP86]], [[TMP88]]
+; CHECK-NEXT:    [[TMP90:%.*]] = call fast float @llvm.trunc.f32(float [[TMP89]])
+; CHECK-NEXT:    [[TMP91:%.*]] = fsub fast float -0.000000e+00, [[TMP90]]
+; CHECK-NEXT:    [[TMP92:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP91]], float [[TMP87]], float [[TMP86]])
+; CHECK-NEXT:    [[TMP93:%.*]] = fptosi float [[TMP90]] to i32
+; CHECK-NEXT:    [[TMP94:%.*]] = call fast float @llvm.fabs.f32(float [[TMP92]])
+; CHECK-NEXT:    [[TMP95:%.*]] = call fast float @llvm.fabs.f32(float [[TMP87]])
+; CHECK-NEXT:    [[TMP96:%.*]] = fcmp fast oge float [[TMP94]], [[TMP95]]
+; CHECK-NEXT:    [[TMP97:%.*]] = select i1 [[TMP96]], i32 [[TMP85]], i32 0
+; CHECK-NEXT:    [[TMP98:%.*]] = add i32 [[TMP93]], [[TMP97]]
+; CHECK-NEXT:    [[TMP99:%.*]] = mul i32 [[TMP98]], [[TMP82]]
+; CHECK-NEXT:    [[TMP100:%.*]] = sub i32 [[TMP81]], [[TMP99]]
+; CHECK-NEXT:    [[TMP101:%.*]] = trunc i32 [[TMP100]] to i16
+; CHECK-NEXT:    [[TMP102:%.*]] = sext i16 [[TMP101]] to i32
+; CHECK-NEXT:    [[TMP103:%.*]] = trunc i32 [[TMP102]] to i16
+; CHECK-NEXT:    [[TMP104:%.*]] = insertelement <4 x i16> [[TMP78]], i16 [[TMP103]], i64 3
+; CHECK-NEXT:    store <4 x i16> [[TMP104]], <4 x i16> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = srem <4 x i16> %x, %y
+  store <4 x i16> %r, <4 x i16> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @udiv_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
+; CHECK-LABEL: @udiv_i3(
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i3 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i3 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP5:%.*]] = fdiv fast float 1.000000e+00, [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP8:%.*]] = fsub fast float -0.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = and i32 [[TMP15]], 7
+; CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i3
+; CHECK-NEXT:    store i3 [[TMP17]], i3 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = udiv i3 %x, %y
+  store i3 %r, i3 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @urem_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
+; CHECK-LABEL: @urem_i3(
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i3 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i3 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP5:%.*]] = fdiv fast float 1.000000e+00, [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP8:%.*]] = fsub fast float -0.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP15]], [[TMP2]]
+; CHECK-NEXT:    [[TMP17:%.*]] = sub i32 [[TMP1]], [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 7
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i3
+; CHECK-NEXT:    store i3 [[TMP19]], i3 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = urem i3 %x, %y
+  store i3 %r, i3 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @sdiv_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
+; CHECK-LABEL: @sdiv_i3(
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i3 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = sext i3 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
+; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP8:%.*]] = fdiv fast float 1.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP11:%.*]] = fsub fast float -0.000000e+00, [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
+; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
+; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
+; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i3
+; CHECK-NEXT:    [[TMP20:%.*]] = sext i3 [[TMP19]] to i32
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i3
+; CHECK-NEXT:    store i3 [[TMP21]], i3 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = sdiv i3 %x, %y
+  store i3 %r, i3 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @srem_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
+; CHECK-LABEL: @srem_i3(
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i3 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = sext i3 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
+; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
+; CHECK-NEXT:    [[TMP8:%.*]] = fdiv fast float 1.000000e+00, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP11:%.*]] = fsub fast float -0.000000e+00, [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
+; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
+; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
+; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], [[TMP2]]
+; CHECK-NEXT:    [[TMP20:%.*]] = sub i32 [[TMP1]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i3
+; CHECK-NEXT:    [[TMP22:%.*]] = sext i3 [[TMP21]] to i32
+; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i3
+; CHECK-NEXT:    store i3 [[TMP23]], i3 addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = srem i3 %x, %y
+  store i3 %r, i3 addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @udiv_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x, <3 x i16> %y) {
+; CHECK-LABEL: @udiv_v3i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i16> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i16> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = fdiv fast float 1.000000e+00, [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fsub fast float -0.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
+; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 65535
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16
+; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <3 x i16> undef, i16 [[TMP19]], i64 0
+; CHECK-NEXT:    [[TMP21:%.*]] = extractelement <3 x i16> [[X]], i64 1
+; CHECK-NEXT:    [[TMP22:%.*]] = extractelement <3 x i16> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP23:%.*]] = zext i16 [[TMP21]] to i32
+; CHECK-NEXT:    [[TMP24:%.*]] = zext i16 [[TMP22]] to i32
+; CHECK-NEXT:    [[TMP25:%.*]] = uitofp i32 [[TMP23]] to float
+; CHECK-NEXT:    [[TMP26:%.*]] = uitofp i32 [[TMP24]] to float
+; CHECK-NEXT:    [[TMP27:%.*]] = fdiv fast float 1.000000e+00, [[TMP26]]
+; CHECK-NEXT:    [[TMP28:%.*]] = fmul fast float [[TMP25]], [[TMP27]]
+; CHECK-NEXT:    [[TMP29:%.*]] = call fast float @llvm.trunc.f32(float [[TMP28]])
+; CHECK-NEXT:    [[TMP30:%.*]] = fsub fast float -0.000000e+00, [[TMP29]]
+; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP30]], float [[TMP26]], float [[TMP25]])
+; CHECK-NEXT:    [[TMP32:%.*]] = fptoui float [[TMP29]] to i32
+; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.fabs.f32(float [[TMP31]])
+; CHECK-NEXT:    [[TMP34:%.*]] = call fast float @llvm.fabs.f32(float [[TMP26]])
+; CHECK-NEXT:    [[TMP35:%.*]] = fcmp fast oge float [[TMP33]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = select i1 [[TMP35]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP37:%.*]] = add i32 [[TMP32]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = and i32 [[TMP37]], 65535
+; CHECK-NEXT:    [[TMP39:%.*]] = trunc i32 [[TMP38]] to i16
+; CHECK-NEXT:    [[TMP40:%.*]] = insertelement <3 x i16> [[TMP20]], i16 [[TMP39]], i64 1
+; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <3 x i16> [[X]], i64 2
+; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <3 x i16> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP43:%.*]] = zext i16 [[TMP41]] to i32
+; CHECK-NEXT:    [[TMP44:%.*]] = zext i16 [[TMP42]] to i32
+; CHECK-NEXT:    [[TMP45:%.*]] = uitofp i32 [[TMP43]] to float
+; CHECK-NEXT:    [[TMP46:%.*]] = uitofp i32 [[TMP44]] to float
+; CHECK-NEXT:    [[TMP47:%.*]] = fdiv fast float 1.000000e+00, [[TMP46]]
+; CHECK-NEXT:    [[TMP48:%.*]] = fmul fast float [[TMP45]], [[TMP47]]
+; CHECK-NEXT:    [[TMP49:%.*]] = call fast float @llvm.trunc.f32(float [[TMP48]])
+; CHECK-NEXT:    [[TMP50:%.*]] = fsub fast float -0.000000e+00, [[TMP49]]
+; CHECK-NEXT:    [[TMP51:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP50]], float [[TMP46]], float [[TMP45]])
+; CHECK-NEXT:    [[TMP52:%.*]] = fptoui float [[TMP49]] to i32
+; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.fabs.f32(float [[TMP51]])
+; CHECK-NEXT:    [[TMP54:%.*]] = call fast float @llvm.fabs.f32(float [[TMP46]])
+; CHECK-NEXT:    [[TMP55:%.*]] = fcmp fast oge float [[TMP53]], [[TMP54]]
+; CHECK-NEXT:    [[TMP56:%.*]] = select i1 [[TMP55]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP57:%.*]] = add i32 [[TMP52]], [[TMP56]]
+; CHECK-NEXT:    [[TMP58:%.*]] = and i32 [[TMP57]], 65535
+; CHECK-NEXT:    [[TMP59:%.*]] = trunc i32 [[TMP58]] to i16
+; CHECK-NEXT:    [[TMP60:%.*]] = insertelement <3 x i16> [[TMP40]], i16 [[TMP59]], i64 2
+; CHECK-NEXT:    store <3 x i16> [[TMP60]], <3 x i16> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = udiv <3 x i16> %x, %y
+  store <3 x i16> %r, <3 x i16> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @urem_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x, <3 x i16> %y) {
+; CHECK-LABEL: @urem_v3i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i16> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i16> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = fdiv fast float 1.000000e+00, [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fsub fast float -0.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
+; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = mul i32 [[TMP17]], [[TMP4]]
+; CHECK-NEXT:    [[TMP19:%.*]] = sub i32 [[TMP3]], [[TMP18]]
+; CHECK-NEXT:    [[TMP20:%.*]] = and i32 [[TMP19]], 65535
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
+; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <3 x i16> undef, i16 [[TMP21]], i64 0
+; CHECK-NEXT:    [[TMP23:%.*]] = extractelement <3 x i16> [[X]], i64 1
+; CHECK-NEXT:    [[TMP24:%.*]] = extractelement <3 x i16> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP25:%.*]] = zext i16 [[TMP23]] to i32
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i16 [[TMP24]] to i32
+; CHECK-NEXT:    [[TMP27:%.*]] = uitofp i32 [[TMP25]] to float
+; CHECK-NEXT:    [[TMP28:%.*]] = uitofp i32 [[TMP26]] to float
+; CHECK-NEXT:    [[TMP29:%.*]] = fdiv fast float 1.000000e+00, [[TMP28]]
+; CHECK-NEXT:    [[TMP30:%.*]] = fmul fast float [[TMP27]], [[TMP29]]
+; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.trunc.f32(float [[TMP30]])
+; CHECK-NEXT:    [[TMP32:%.*]] = fsub fast float -0.000000e+00, [[TMP31]]
+; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP32]], float [[TMP28]], float [[TMP27]])
+; CHECK-NEXT:    [[TMP34:%.*]] = fptoui float [[TMP31]] to i32
+; CHECK-NEXT:    [[TMP35:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
+; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.fabs.f32(float [[TMP28]])
+; CHECK-NEXT:    [[TMP37:%.*]] = fcmp fast oge float [[TMP35]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = select i1 [[TMP37]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP39:%.*]] = add i32 [[TMP34]], [[TMP38]]
+; CHECK-NEXT:    [[TMP40:%.*]] = mul i32 [[TMP39]], [[TMP26]]
+; CHECK-NEXT:    [[TMP41:%.*]] = sub i32 [[TMP25]], [[TMP40]]
+; CHECK-NEXT:    [[TMP42:%.*]] = and i32 [[TMP41]], 65535
+; CHECK-NEXT:    [[TMP43:%.*]] = trunc i32 [[TMP42]] to i16
+; CHECK-NEXT:    [[TMP44:%.*]] = insertelement <3 x i16> [[TMP22]], i16 [[TMP43]], i64 1
+; CHECK-NEXT:    [[TMP45:%.*]] = extractelement <3 x i16> [[X]], i64 2
+; CHECK-NEXT:    [[TMP46:%.*]] = extractelement <3 x i16> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP47:%.*]] = zext i16 [[TMP45]] to i32
+; CHECK-NEXT:    [[TMP48:%.*]] = zext i16 [[TMP46]] to i32
+; CHECK-NEXT:    [[TMP49:%.*]] = uitofp i32 [[TMP47]] to float
+; CHECK-NEXT:    [[TMP50:%.*]] = uitofp i32 [[TMP48]] to float
+; CHECK-NEXT:    [[TMP51:%.*]] = fdiv fast float 1.000000e+00, [[TMP50]]
+; CHECK-NEXT:    [[TMP52:%.*]] = fmul fast float [[TMP49]], [[TMP51]]
+; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.trunc.f32(float [[TMP52]])
+; CHECK-NEXT:    [[TMP54:%.*]] = fsub fast float -0.000000e+00, [[TMP53]]
+; CHECK-NEXT:    [[TMP55:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP54]], float [[TMP50]], float [[TMP49]])
+; CHECK-NEXT:    [[TMP56:%.*]] = fptoui float [[TMP53]] to i32
+; CHECK-NEXT:    [[TMP57:%.*]] = call fast float @llvm.fabs.f32(float [[TMP55]])
+; CHECK-NEXT:    [[TMP58:%.*]] = call fast float @llvm.fabs.f32(float [[TMP50]])
+; CHECK-NEXT:    [[TMP59:%.*]] = fcmp fast oge float [[TMP57]], [[TMP58]]
+; CHECK-NEXT:    [[TMP60:%.*]] = select i1 [[TMP59]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP61:%.*]] = add i32 [[TMP56]], [[TMP60]]
+; CHECK-NEXT:    [[TMP62:%.*]] = mul i32 [[TMP61]], [[TMP48]]
+; CHECK-NEXT:    [[TMP63:%.*]] = sub i32 [[TMP47]], [[TMP62]]
+; CHECK-NEXT:    [[TMP64:%.*]] = and i32 [[TMP63]], 65535
+; CHECK-NEXT:    [[TMP65:%.*]] = trunc i32 [[TMP64]] to i16
+; CHECK-NEXT:    [[TMP66:%.*]] = insertelement <3 x i16> [[TMP44]], i16 [[TMP65]], i64 2
+; CHECK-NEXT:    store <3 x i16> [[TMP66]], <3 x i16> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = urem <3 x i16> %x, %y
+  store <3 x i16> %r, <3 x i16> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @sdiv_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x, <3 x i16> %y) {
+; CHECK-LABEL: @sdiv_v3i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i16> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i16> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = sext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = sext i16 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
+; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP10:%.*]] = fdiv fast float 1.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fsub fast float -0.000000e+00, [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
+; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
+; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
+; CHECK-NEXT:    [[TMP22:%.*]] = sext i16 [[TMP21]] to i32
+; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i16
+; CHECK-NEXT:    [[TMP24:%.*]] = insertelement <3 x i16> undef, i16 [[TMP23]], i64 0
+; CHECK-NEXT:    [[TMP25:%.*]] = extractelement <3 x i16> [[X]], i64 1
+; CHECK-NEXT:    [[TMP26:%.*]] = extractelement <3 x i16> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP27:%.*]] = sext i16 [[TMP25]] to i32
+; CHECK-NEXT:    [[TMP28:%.*]] = sext i16 [[TMP26]] to i32
+; CHECK-NEXT:    [[TMP29:%.*]] = xor i32 [[TMP27]], [[TMP28]]
+; CHECK-NEXT:    [[TMP30:%.*]] = ashr i32 [[TMP29]], 30
+; CHECK-NEXT:    [[TMP31:%.*]] = or i32 [[TMP30]], 1
+; CHECK-NEXT:    [[TMP32:%.*]] = sitofp i32 [[TMP27]] to float
+; CHECK-NEXT:    [[TMP33:%.*]] = sitofp i32 [[TMP28]] to float
+; CHECK-NEXT:    [[TMP34:%.*]] = fdiv fast float 1.000000e+00, [[TMP33]]
+; CHECK-NEXT:    [[TMP35:%.*]] = fmul fast float [[TMP32]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.trunc.f32(float [[TMP35]])
+; CHECK-NEXT:    [[TMP37:%.*]] = fsub fast float -0.000000e+00, [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP37]], float [[TMP33]], float [[TMP32]])
+; CHECK-NEXT:    [[TMP39:%.*]] = fptosi float [[TMP36]] to i32
+; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.fabs.f32(float [[TMP38]])
+; CHECK-NEXT:    [[TMP41:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
+; CHECK-NEXT:    [[TMP42:%.*]] = fcmp fast oge float [[TMP40]], [[TMP41]]
+; CHECK-NEXT:    [[TMP43:%.*]] = select i1 [[TMP42]], i32 [[TMP31]], i32 0
+; CHECK-NEXT:    [[TMP44:%.*]] = add i32 [[TMP39]], [[TMP43]]
+; CHECK-NEXT:    [[TMP45:%.*]] = trunc i32 [[TMP44]] to i16
+; CHECK-NEXT:    [[TMP46:%.*]] = sext i16 [[TMP45]] to i32
+; CHECK-NEXT:    [[TMP47:%.*]] = trunc i32 [[TMP46]] to i16
+; CHECK-NEXT:    [[TMP48:%.*]] = insertelement <3 x i16> [[TMP24]], i16 [[TMP47]], i64 1
+; CHECK-NEXT:    [[TMP49:%.*]] = extractelement <3 x i16> [[X]], i64 2
+; CHECK-NEXT:    [[TMP50:%.*]] = extractelement <3 x i16> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP51:%.*]] = sext i16 [[TMP49]] to i32
+; CHECK-NEXT:    [[TMP52:%.*]] = sext i16 [[TMP50]] to i32
+; CHECK-NEXT:    [[TMP53:%.*]] = xor i32 [[TMP51]], [[TMP52]]
+; CHECK-NEXT:    [[TMP54:%.*]] = ashr i32 [[TMP53]], 30
+; CHECK-NEXT:    [[TMP55:%.*]] = or i32 [[TMP54]], 1
+; CHECK-NEXT:    [[TMP56:%.*]] = sitofp i32 [[TMP51]] to float
+; CHECK-NEXT:    [[TMP57:%.*]] = sitofp i32 [[TMP52]] to float
+; CHECK-NEXT:    [[TMP58:%.*]] = fdiv fast float 1.000000e+00, [[TMP57]]
+; CHECK-NEXT:    [[TMP59:%.*]] = fmul fast float [[TMP56]], [[TMP58]]
+; CHECK-NEXT:    [[TMP60:%.*]] = call fast float @llvm.trunc.f32(float [[TMP59]])
+; CHECK-NEXT:    [[TMP61:%.*]] = fsub fast float -0.000000e+00, [[TMP60]]
+; CHECK-NEXT:    [[TMP62:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP61]], float [[TMP57]], float [[TMP56]])
+; CHECK-NEXT:    [[TMP63:%.*]] = fptosi float [[TMP60]] to i32
+; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.fabs.f32(float [[TMP62]])
+; CHECK-NEXT:    [[TMP65:%.*]] = call fast float @llvm.fabs.f32(float [[TMP57]])
+; CHECK-NEXT:    [[TMP66:%.*]] = fcmp fast oge float [[TMP64]], [[TMP65]]
+; CHECK-NEXT:    [[TMP67:%.*]] = select i1 [[TMP66]], i32 [[TMP55]], i32 0
+; CHECK-NEXT:    [[TMP68:%.*]] = add i32 [[TMP63]], [[TMP67]]
+; CHECK-NEXT:    [[TMP69:%.*]] = trunc i32 [[TMP68]] to i16
+; CHECK-NEXT:    [[TMP70:%.*]] = sext i16 [[TMP69]] to i32
+; CHECK-NEXT:    [[TMP71:%.*]] = trunc i32 [[TMP70]] to i16
+; CHECK-NEXT:    [[TMP72:%.*]] = insertelement <3 x i16> [[TMP48]], i16 [[TMP71]], i64 2
+; CHECK-NEXT:    store <3 x i16> [[TMP72]], <3 x i16> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = sdiv <3 x i16> %x, %y
+  store <3 x i16> %r, <3 x i16> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @srem_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x, <3 x i16> %y) {
+; CHECK-LABEL: @srem_v3i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i16> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i16> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = sext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = sext i16 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
+; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP10:%.*]] = fdiv fast float 1.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fsub fast float -0.000000e+00, [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
+; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
+; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = mul i32 [[TMP20]], [[TMP4]]
+; CHECK-NEXT:    [[TMP22:%.*]] = sub i32 [[TMP3]], [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i16
+; CHECK-NEXT:    [[TMP24:%.*]] = sext i16 [[TMP23]] to i32
+; CHECK-NEXT:    [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
+; CHECK-NEXT:    [[TMP26:%.*]] = insertelement <3 x i16> undef, i16 [[TMP25]], i64 0
+; CHECK-NEXT:    [[TMP27:%.*]] = extractelement <3 x i16> [[X]], i64 1
+; CHECK-NEXT:    [[TMP28:%.*]] = extractelement <3 x i16> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP29:%.*]] = sext i16 [[TMP27]] to i32
+; CHECK-NEXT:    [[TMP30:%.*]] = sext i16 [[TMP28]] to i32
+; CHECK-NEXT:    [[TMP31:%.*]] = xor i32 [[TMP29]], [[TMP30]]
+; CHECK-NEXT:    [[TMP32:%.*]] = ashr i32 [[TMP31]], 30
+; CHECK-NEXT:    [[TMP33:%.*]] = or i32 [[TMP32]], 1
+; CHECK-NEXT:    [[TMP34:%.*]] = sitofp i32 [[TMP29]] to float
+; CHECK-NEXT:    [[TMP35:%.*]] = sitofp i32 [[TMP30]] to float
+; CHECK-NEXT:    [[TMP36:%.*]] = fdiv fast float 1.000000e+00, [[TMP35]]
+; CHECK-NEXT:    [[TMP37:%.*]] = fmul fast float [[TMP34]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.trunc.f32(float [[TMP37]])
+; CHECK-NEXT:    [[TMP39:%.*]] = fsub fast float -0.000000e+00, [[TMP38]]
+; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP39]], float [[TMP35]], float [[TMP34]])
+; CHECK-NEXT:    [[TMP41:%.*]] = fptosi float [[TMP38]] to i32
+; CHECK-NEXT:    [[TMP42:%.*]] = call fast float @llvm.fabs.f32(float [[TMP40]])
+; CHECK-NEXT:    [[TMP43:%.*]] = call fast float @llvm.fabs.f32(float [[TMP35]])
+; CHECK-NEXT:    [[TMP44:%.*]] = fcmp fast oge float [[TMP42]], [[TMP43]]
+; CHECK-NEXT:    [[TMP45:%.*]] = select i1 [[TMP44]], i32 [[TMP33]], i32 0
+; CHECK-NEXT:    [[TMP46:%.*]] = add i32 [[TMP41]], [[TMP45]]
+; CHECK-NEXT:    [[TMP47:%.*]] = mul i32 [[TMP46]], [[TMP30]]
+; CHECK-NEXT:    [[TMP48:%.*]] = sub i32 [[TMP29]], [[TMP47]]
+; CHECK-NEXT:    [[TMP49:%.*]] = trunc i32 [[TMP48]] to i16
+; CHECK-NEXT:    [[TMP50:%.*]] = sext i16 [[TMP49]] to i32
+; CHECK-NEXT:    [[TMP51:%.*]] = trunc i32 [[TMP50]] to i16
+; CHECK-NEXT:    [[TMP52:%.*]] = insertelement <3 x i16> [[TMP26]], i16 [[TMP51]], i64 1
+; CHECK-NEXT:    [[TMP53:%.*]] = extractelement <3 x i16> [[X]], i64 2
+; CHECK-NEXT:    [[TMP54:%.*]] = extractelement <3 x i16> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP55:%.*]] = sext i16 [[TMP53]] to i32
+; CHECK-NEXT:    [[TMP56:%.*]] = sext i16 [[TMP54]] to i32
+; CHECK-NEXT:    [[TMP57:%.*]] = xor i32 [[TMP55]], [[TMP56]]
+; CHECK-NEXT:    [[TMP58:%.*]] = ashr i32 [[TMP57]], 30
+; CHECK-NEXT:    [[TMP59:%.*]] = or i32 [[TMP58]], 1
+; CHECK-NEXT:    [[TMP60:%.*]] = sitofp i32 [[TMP55]] to float
+; CHECK-NEXT:    [[TMP61:%.*]] = sitofp i32 [[TMP56]] to float
+; CHECK-NEXT:    [[TMP62:%.*]] = fdiv fast float 1.000000e+00, [[TMP61]]
+; CHECK-NEXT:    [[TMP63:%.*]] = fmul fast float [[TMP60]], [[TMP62]]
+; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.trunc.f32(float [[TMP63]])
+; CHECK-NEXT:    [[TMP65:%.*]] = fsub fast float -0.000000e+00, [[TMP64]]
+; CHECK-NEXT:    [[TMP66:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP65]], float [[TMP61]], float [[TMP60]])
+; CHECK-NEXT:    [[TMP67:%.*]] = fptosi float [[TMP64]] to i32
+; CHECK-NEXT:    [[TMP68:%.*]] = call fast float @llvm.fabs.f32(float [[TMP66]])
+; CHECK-NEXT:    [[TMP69:%.*]] = call fast float @llvm.fabs.f32(float [[TMP61]])
+; CHECK-NEXT:    [[TMP70:%.*]] = fcmp fast oge float [[TMP68]], [[TMP69]]
+; CHECK-NEXT:    [[TMP71:%.*]] = select i1 [[TMP70]], i32 [[TMP59]], i32 0
+; CHECK-NEXT:    [[TMP72:%.*]] = add i32 [[TMP67]], [[TMP71]]
+; CHECK-NEXT:    [[TMP73:%.*]] = mul i32 [[TMP72]], [[TMP56]]
+; CHECK-NEXT:    [[TMP74:%.*]] = sub i32 [[TMP55]], [[TMP73]]
+; CHECK-NEXT:    [[TMP75:%.*]] = trunc i32 [[TMP74]] to i16
+; CHECK-NEXT:    [[TMP76:%.*]] = sext i16 [[TMP75]] to i32
+; CHECK-NEXT:    [[TMP77:%.*]] = trunc i32 [[TMP76]] to i16
+; CHECK-NEXT:    [[TMP78:%.*]] = insertelement <3 x i16> [[TMP52]], i16 [[TMP77]], i64 2
+; CHECK-NEXT:    store <3 x i16> [[TMP78]], <3 x i16> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = srem <3 x i16> %x, %y
+  store <3 x i16> %r, <3 x i16> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @udiv_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x, <3 x i15> %y) {
+; CHECK-LABEL: @udiv_v3i15(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i15> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i15> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i15 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i15 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = fdiv fast float 1.000000e+00, [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fsub fast float -0.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
+; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 32767
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i15
+; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <3 x i15> undef, i15 [[TMP19]], i64 0
+; CHECK-NEXT:    [[TMP21:%.*]] = extractelement <3 x i15> [[X]], i64 1
+; CHECK-NEXT:    [[TMP22:%.*]] = extractelement <3 x i15> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP23:%.*]] = zext i15 [[TMP21]] to i32
+; CHECK-NEXT:    [[TMP24:%.*]] = zext i15 [[TMP22]] to i32
+; CHECK-NEXT:    [[TMP25:%.*]] = uitofp i32 [[TMP23]] to float
+; CHECK-NEXT:    [[TMP26:%.*]] = uitofp i32 [[TMP24]] to float
+; CHECK-NEXT:    [[TMP27:%.*]] = fdiv fast float 1.000000e+00, [[TMP26]]
+; CHECK-NEXT:    [[TMP28:%.*]] = fmul fast float [[TMP25]], [[TMP27]]
+; CHECK-NEXT:    [[TMP29:%.*]] = call fast float @llvm.trunc.f32(float [[TMP28]])
+; CHECK-NEXT:    [[TMP30:%.*]] = fsub fast float -0.000000e+00, [[TMP29]]
+; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP30]], float [[TMP26]], float [[TMP25]])
+; CHECK-NEXT:    [[TMP32:%.*]] = fptoui float [[TMP29]] to i32
+; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.fabs.f32(float [[TMP31]])
+; CHECK-NEXT:    [[TMP34:%.*]] = call fast float @llvm.fabs.f32(float [[TMP26]])
+; CHECK-NEXT:    [[TMP35:%.*]] = fcmp fast oge float [[TMP33]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = select i1 [[TMP35]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP37:%.*]] = add i32 [[TMP32]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = and i32 [[TMP37]], 32767
+; CHECK-NEXT:    [[TMP39:%.*]] = trunc i32 [[TMP38]] to i15
+; CHECK-NEXT:    [[TMP40:%.*]] = insertelement <3 x i15> [[TMP20]], i15 [[TMP39]], i64 1
+; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <3 x i15> [[X]], i64 2
+; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <3 x i15> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP43:%.*]] = zext i15 [[TMP41]] to i32
+; CHECK-NEXT:    [[TMP44:%.*]] = zext i15 [[TMP42]] to i32
+; CHECK-NEXT:    [[TMP45:%.*]] = uitofp i32 [[TMP43]] to float
+; CHECK-NEXT:    [[TMP46:%.*]] = uitofp i32 [[TMP44]] to float
+; CHECK-NEXT:    [[TMP47:%.*]] = fdiv fast float 1.000000e+00, [[TMP46]]
+; CHECK-NEXT:    [[TMP48:%.*]] = fmul fast float [[TMP45]], [[TMP47]]
+; CHECK-NEXT:    [[TMP49:%.*]] = call fast float @llvm.trunc.f32(float [[TMP48]])
+; CHECK-NEXT:    [[TMP50:%.*]] = fsub fast float -0.000000e+00, [[TMP49]]
+; CHECK-NEXT:    [[TMP51:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP50]], float [[TMP46]], float [[TMP45]])
+; CHECK-NEXT:    [[TMP52:%.*]] = fptoui float [[TMP49]] to i32
+; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.fabs.f32(float [[TMP51]])
+; CHECK-NEXT:    [[TMP54:%.*]] = call fast float @llvm.fabs.f32(float [[TMP46]])
+; CHECK-NEXT:    [[TMP55:%.*]] = fcmp fast oge float [[TMP53]], [[TMP54]]
+; CHECK-NEXT:    [[TMP56:%.*]] = select i1 [[TMP55]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP57:%.*]] = add i32 [[TMP52]], [[TMP56]]
+; CHECK-NEXT:    [[TMP58:%.*]] = and i32 [[TMP57]], 32767
+; CHECK-NEXT:    [[TMP59:%.*]] = trunc i32 [[TMP58]] to i15
+; CHECK-NEXT:    [[TMP60:%.*]] = insertelement <3 x i15> [[TMP40]], i15 [[TMP59]], i64 2
+; CHECK-NEXT:    store <3 x i15> [[TMP60]], <3 x i15> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = udiv <3 x i15> %x, %y
+  store <3 x i15> %r, <3 x i15> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @urem_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x, <3 x i15> %y) {
+; CHECK-LABEL: @urem_v3i15(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i15> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i15> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i15 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i15 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP7:%.*]] = fdiv fast float 1.000000e+00, [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
+; CHECK-NEXT:    [[TMP10:%.*]] = fsub fast float -0.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
+; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = mul i32 [[TMP17]], [[TMP4]]
+; CHECK-NEXT:    [[TMP19:%.*]] = sub i32 [[TMP3]], [[TMP18]]
+; CHECK-NEXT:    [[TMP20:%.*]] = and i32 [[TMP19]], 32767
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i15
+; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <3 x i15> undef, i15 [[TMP21]], i64 0
+; CHECK-NEXT:    [[TMP23:%.*]] = extractelement <3 x i15> [[X]], i64 1
+; CHECK-NEXT:    [[TMP24:%.*]] = extractelement <3 x i15> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP25:%.*]] = zext i15 [[TMP23]] to i32
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i15 [[TMP24]] to i32
+; CHECK-NEXT:    [[TMP27:%.*]] = uitofp i32 [[TMP25]] to float
+; CHECK-NEXT:    [[TMP28:%.*]] = uitofp i32 [[TMP26]] to float
+; CHECK-NEXT:    [[TMP29:%.*]] = fdiv fast float 1.000000e+00, [[TMP28]]
+; CHECK-NEXT:    [[TMP30:%.*]] = fmul fast float [[TMP27]], [[TMP29]]
+; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.trunc.f32(float [[TMP30]])
+; CHECK-NEXT:    [[TMP32:%.*]] = fsub fast float -0.000000e+00, [[TMP31]]
+; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP32]], float [[TMP28]], float [[TMP27]])
+; CHECK-NEXT:    [[TMP34:%.*]] = fptoui float [[TMP31]] to i32
+; CHECK-NEXT:    [[TMP35:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
+; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.fabs.f32(float [[TMP28]])
+; CHECK-NEXT:    [[TMP37:%.*]] = fcmp fast oge float [[TMP35]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = select i1 [[TMP37]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP39:%.*]] = add i32 [[TMP34]], [[TMP38]]
+; CHECK-NEXT:    [[TMP40:%.*]] = mul i32 [[TMP39]], [[TMP26]]
+; CHECK-NEXT:    [[TMP41:%.*]] = sub i32 [[TMP25]], [[TMP40]]
+; CHECK-NEXT:    [[TMP42:%.*]] = and i32 [[TMP41]], 32767
+; CHECK-NEXT:    [[TMP43:%.*]] = trunc i32 [[TMP42]] to i15
+; CHECK-NEXT:    [[TMP44:%.*]] = insertelement <3 x i15> [[TMP22]], i15 [[TMP43]], i64 1
+; CHECK-NEXT:    [[TMP45:%.*]] = extractelement <3 x i15> [[X]], i64 2
+; CHECK-NEXT:    [[TMP46:%.*]] = extractelement <3 x i15> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP47:%.*]] = zext i15 [[TMP45]] to i32
+; CHECK-NEXT:    [[TMP48:%.*]] = zext i15 [[TMP46]] to i32
+; CHECK-NEXT:    [[TMP49:%.*]] = uitofp i32 [[TMP47]] to float
+; CHECK-NEXT:    [[TMP50:%.*]] = uitofp i32 [[TMP48]] to float
+; CHECK-NEXT:    [[TMP51:%.*]] = fdiv fast float 1.000000e+00, [[TMP50]]
+; CHECK-NEXT:    [[TMP52:%.*]] = fmul fast float [[TMP49]], [[TMP51]]
+; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.trunc.f32(float [[TMP52]])
+; CHECK-NEXT:    [[TMP54:%.*]] = fsub fast float -0.000000e+00, [[TMP53]]
+; CHECK-NEXT:    [[TMP55:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP54]], float [[TMP50]], float [[TMP49]])
+; CHECK-NEXT:    [[TMP56:%.*]] = fptoui float [[TMP53]] to i32
+; CHECK-NEXT:    [[TMP57:%.*]] = call fast float @llvm.fabs.f32(float [[TMP55]])
+; CHECK-NEXT:    [[TMP58:%.*]] = call fast float @llvm.fabs.f32(float [[TMP50]])
+; CHECK-NEXT:    [[TMP59:%.*]] = fcmp fast oge float [[TMP57]], [[TMP58]]
+; CHECK-NEXT:    [[TMP60:%.*]] = select i1 [[TMP59]], i32 1, i32 0
+; CHECK-NEXT:    [[TMP61:%.*]] = add i32 [[TMP56]], [[TMP60]]
+; CHECK-NEXT:    [[TMP62:%.*]] = mul i32 [[TMP61]], [[TMP48]]
+; CHECK-NEXT:    [[TMP63:%.*]] = sub i32 [[TMP47]], [[TMP62]]
+; CHECK-NEXT:    [[TMP64:%.*]] = and i32 [[TMP63]], 32767
+; CHECK-NEXT:    [[TMP65:%.*]] = trunc i32 [[TMP64]] to i15
+; CHECK-NEXT:    [[TMP66:%.*]] = insertelement <3 x i15> [[TMP44]], i15 [[TMP65]], i64 2
+; CHECK-NEXT:    store <3 x i15> [[TMP66]], <3 x i15> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = urem <3 x i15> %x, %y
+  store <3 x i15> %r, <3 x i15> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @sdiv_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x, <3 x i15> %y) {
+; CHECK-LABEL: @sdiv_v3i15(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i15> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i15> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = sext i15 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = sext i15 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
+; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP10:%.*]] = fdiv fast float 1.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fsub fast float -0.000000e+00, [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
+; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
+; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i15
+; CHECK-NEXT:    [[TMP22:%.*]] = sext i15 [[TMP21]] to i32
+; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i15
+; CHECK-NEXT:    [[TMP24:%.*]] = insertelement <3 x i15> undef, i15 [[TMP23]], i64 0
+; CHECK-NEXT:    [[TMP25:%.*]] = extractelement <3 x i15> [[X]], i64 1
+; CHECK-NEXT:    [[TMP26:%.*]] = extractelement <3 x i15> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP27:%.*]] = sext i15 [[TMP25]] to i32
+; CHECK-NEXT:    [[TMP28:%.*]] = sext i15 [[TMP26]] to i32
+; CHECK-NEXT:    [[TMP29:%.*]] = xor i32 [[TMP27]], [[TMP28]]
+; CHECK-NEXT:    [[TMP30:%.*]] = ashr i32 [[TMP29]], 30
+; CHECK-NEXT:    [[TMP31:%.*]] = or i32 [[TMP30]], 1
+; CHECK-NEXT:    [[TMP32:%.*]] = sitofp i32 [[TMP27]] to float
+; CHECK-NEXT:    [[TMP33:%.*]] = sitofp i32 [[TMP28]] to float
+; CHECK-NEXT:    [[TMP34:%.*]] = fdiv fast float 1.000000e+00, [[TMP33]]
+; CHECK-NEXT:    [[TMP35:%.*]] = fmul fast float [[TMP32]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.trunc.f32(float [[TMP35]])
+; CHECK-NEXT:    [[TMP37:%.*]] = fsub fast float -0.000000e+00, [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP37]], float [[TMP33]], float [[TMP32]])
+; CHECK-NEXT:    [[TMP39:%.*]] = fptosi float [[TMP36]] to i32
+; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.fabs.f32(float [[TMP38]])
+; CHECK-NEXT:    [[TMP41:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
+; CHECK-NEXT:    [[TMP42:%.*]] = fcmp fast oge float [[TMP40]], [[TMP41]]
+; CHECK-NEXT:    [[TMP43:%.*]] = select i1 [[TMP42]], i32 [[TMP31]], i32 0
+; CHECK-NEXT:    [[TMP44:%.*]] = add i32 [[TMP39]], [[TMP43]]
+; CHECK-NEXT:    [[TMP45:%.*]] = trunc i32 [[TMP44]] to i15
+; CHECK-NEXT:    [[TMP46:%.*]] = sext i15 [[TMP45]] to i32
+; CHECK-NEXT:    [[TMP47:%.*]] = trunc i32 [[TMP46]] to i15
+; CHECK-NEXT:    [[TMP48:%.*]] = insertelement <3 x i15> [[TMP24]], i15 [[TMP47]], i64 1
+; CHECK-NEXT:    [[TMP49:%.*]] = extractelement <3 x i15> [[X]], i64 2
+; CHECK-NEXT:    [[TMP50:%.*]] = extractelement <3 x i15> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP51:%.*]] = sext i15 [[TMP49]] to i32
+; CHECK-NEXT:    [[TMP52:%.*]] = sext i15 [[TMP50]] to i32
+; CHECK-NEXT:    [[TMP53:%.*]] = xor i32 [[TMP51]], [[TMP52]]
+; CHECK-NEXT:    [[TMP54:%.*]] = ashr i32 [[TMP53]], 30
+; CHECK-NEXT:    [[TMP55:%.*]] = or i32 [[TMP54]], 1
+; CHECK-NEXT:    [[TMP56:%.*]] = sitofp i32 [[TMP51]] to float
+; CHECK-NEXT:    [[TMP57:%.*]] = sitofp i32 [[TMP52]] to float
+; CHECK-NEXT:    [[TMP58:%.*]] = fdiv fast float 1.000000e+00, [[TMP57]]
+; CHECK-NEXT:    [[TMP59:%.*]] = fmul fast float [[TMP56]], [[TMP58]]
+; CHECK-NEXT:    [[TMP60:%.*]] = call fast float @llvm.trunc.f32(float [[TMP59]])
+; CHECK-NEXT:    [[TMP61:%.*]] = fsub fast float -0.000000e+00, [[TMP60]]
+; CHECK-NEXT:    [[TMP62:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP61]], float [[TMP57]], float [[TMP56]])
+; CHECK-NEXT:    [[TMP63:%.*]] = fptosi float [[TMP60]] to i32
+; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.fabs.f32(float [[TMP62]])
+; CHECK-NEXT:    [[TMP65:%.*]] = call fast float @llvm.fabs.f32(float [[TMP57]])
+; CHECK-NEXT:    [[TMP66:%.*]] = fcmp fast oge float [[TMP64]], [[TMP65]]
+; CHECK-NEXT:    [[TMP67:%.*]] = select i1 [[TMP66]], i32 [[TMP55]], i32 0
+; CHECK-NEXT:    [[TMP68:%.*]] = add i32 [[TMP63]], [[TMP67]]
+; CHECK-NEXT:    [[TMP69:%.*]] = trunc i32 [[TMP68]] to i15
+; CHECK-NEXT:    [[TMP70:%.*]] = sext i15 [[TMP69]] to i32
+; CHECK-NEXT:    [[TMP71:%.*]] = trunc i32 [[TMP70]] to i15
+; CHECK-NEXT:    [[TMP72:%.*]] = insertelement <3 x i15> [[TMP48]], i15 [[TMP71]], i64 2
+; CHECK-NEXT:    store <3 x i15> [[TMP72]], <3 x i15> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = sdiv <3 x i15> %x, %y
+  store <3 x i15> %r, <3 x i15> addrspace(1)* %out
+  ret void
+}
+
+define amdgpu_kernel void @srem_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x, <3 x i15> %y) {
+; CHECK-LABEL: @srem_v3i15(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i15> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i15> [[Y:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = sext i15 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = sext i15 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
+; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
+; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP10:%.*]] = fdiv fast float 1.000000e+00, [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
+; CHECK-NEXT:    [[TMP13:%.*]] = fsub fast float -0.000000e+00, [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
+; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
+; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
+; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
+; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = mul i32 [[TMP20]], [[TMP4]]
+; CHECK-NEXT:    [[TMP22:%.*]] = sub i32 [[TMP3]], [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i15
+; CHECK-NEXT:    [[TMP24:%.*]] = sext i15 [[TMP23]] to i32
+; CHECK-NEXT:    [[TMP25:%.*]] = trunc i32 [[TMP24]] to i15
+; CHECK-NEXT:    [[TMP26:%.*]] = insertelement <3 x i15> undef, i15 [[TMP25]], i64 0
+; CHECK-NEXT:    [[TMP27:%.*]] = extractelement <3 x i15> [[X]], i64 1
+; CHECK-NEXT:    [[TMP28:%.*]] = extractelement <3 x i15> [[Y]], i64 1
+; CHECK-NEXT:    [[TMP29:%.*]] = sext i15 [[TMP27]] to i32
+; CHECK-NEXT:    [[TMP30:%.*]] = sext i15 [[TMP28]] to i32
+; CHECK-NEXT:    [[TMP31:%.*]] = xor i32 [[TMP29]], [[TMP30]]
+; CHECK-NEXT:    [[TMP32:%.*]] = ashr i32 [[TMP31]], 30
+; CHECK-NEXT:    [[TMP33:%.*]] = or i32 [[TMP32]], 1
+; CHECK-NEXT:    [[TMP34:%.*]] = sitofp i32 [[TMP29]] to float
+; CHECK-NEXT:    [[TMP35:%.*]] = sitofp i32 [[TMP30]] to float
+; CHECK-NEXT:    [[TMP36:%.*]] = fdiv fast float 1.000000e+00, [[TMP35]]
+; CHECK-NEXT:    [[TMP37:%.*]] = fmul fast float [[TMP34]], [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.trunc.f32(float [[TMP37]])
+; CHECK-NEXT:    [[TMP39:%.*]] = fsub fast float -0.000000e+00, [[TMP38]]
+; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP39]], float [[TMP35]], float [[TMP34]])
+; CHECK-NEXT:    [[TMP41:%.*]] = fptosi float [[TMP38]] to i32
+; CHECK-NEXT:    [[TMP42:%.*]] = call fast float @llvm.fabs.f32(float [[TMP40]])
+; CHECK-NEXT:    [[TMP43:%.*]] = call fast float @llvm.fabs.f32(float [[TMP35]])
+; CHECK-NEXT:    [[TMP44:%.*]] = fcmp fast oge float [[TMP42]], [[TMP43]]
+; CHECK-NEXT:    [[TMP45:%.*]] = select i1 [[TMP44]], i32 [[TMP33]], i32 0
+; CHECK-NEXT:    [[TMP46:%.*]] = add i32 [[TMP41]], [[TMP45]]
+; CHECK-NEXT:    [[TMP47:%.*]] = mul i32 [[TMP46]], [[TMP30]]
+; CHECK-NEXT:    [[TMP48:%.*]] = sub i32 [[TMP29]], [[TMP47]]
+; CHECK-NEXT:    [[TMP49:%.*]] = trunc i32 [[TMP48]] to i15
+; CHECK-NEXT:    [[TMP50:%.*]] = sext i15 [[TMP49]] to i32
+; CHECK-NEXT:    [[TMP51:%.*]] = trunc i32 [[TMP50]] to i15
+; CHECK-NEXT:    [[TMP52:%.*]] = insertelement <3 x i15> [[TMP26]], i15 [[TMP51]], i64 1
+; CHECK-NEXT:    [[TMP53:%.*]] = extractelement <3 x i15> [[X]], i64 2
+; CHECK-NEXT:    [[TMP54:%.*]] = extractelement <3 x i15> [[Y]], i64 2
+; CHECK-NEXT:    [[TMP55:%.*]] = sext i15 [[TMP53]] to i32
+; CHECK-NEXT:    [[TMP56:%.*]] = sext i15 [[TMP54]] to i32
+; CHECK-NEXT:    [[TMP57:%.*]] = xor i32 [[TMP55]], [[TMP56]]
+; CHECK-NEXT:    [[TMP58:%.*]] = ashr i32 [[TMP57]], 30
+; CHECK-NEXT:    [[TMP59:%.*]] = or i32 [[TMP58]], 1
+; CHECK-NEXT:    [[TMP60:%.*]] = sitofp i32 [[TMP55]] to float
+; CHECK-NEXT:    [[TMP61:%.*]] = sitofp i32 [[TMP56]] to float
+; CHECK-NEXT:    [[TMP62:%.*]] = fdiv fast float 1.000000e+00, [[TMP61]]
+; CHECK-NEXT:    [[TMP63:%.*]] = fmul fast float [[TMP60]], [[TMP62]]
+; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.trunc.f32(float [[TMP63]])
+; CHECK-NEXT:    [[TMP65:%.*]] = fsub fast float -0.000000e+00, [[TMP64]]
+; CHECK-NEXT:    [[TMP66:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP65]], float [[TMP61]], float [[TMP60]])
+; CHECK-NEXT:    [[TMP67:%.*]] = fptosi float [[TMP64]] to i32
+; CHECK-NEXT:    [[TMP68:%.*]] = call fast float @llvm.fabs.f32(float [[TMP66]])
+; CHECK-NEXT:    [[TMP69:%.*]] = call fast float @llvm.fabs.f32(float [[TMP61]])
+; CHECK-NEXT:    [[TMP70:%.*]] = fcmp fast oge float [[TMP68]], [[TMP69]]
+; CHECK-NEXT:    [[TMP71:%.*]] = select i1 [[TMP70]], i32 [[TMP59]], i32 0
+; CHECK-NEXT:    [[TMP72:%.*]] = add i32 [[TMP67]], [[TMP71]]
+; CHECK-NEXT:    [[TMP73:%.*]] = mul i32 [[TMP72]], [[TMP56]]
+; CHECK-NEXT:    [[TMP74:%.*]] = sub i32 [[TMP55]], [[TMP73]]
+; CHECK-NEXT:    [[TMP75:%.*]] = trunc i32 [[TMP74]] to i15
+; CHECK-NEXT:    [[TMP76:%.*]] = sext i15 [[TMP75]] to i32
+; CHECK-NEXT:    [[TMP77:%.*]] = trunc i32 [[TMP76]] to i15
+; CHECK-NEXT:    [[TMP78:%.*]] = insertelement <3 x i15> [[TMP52]], i15 [[TMP77]], i64 2
+; CHECK-NEXT:    store <3 x i15> [[TMP78]], <3 x i15> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    ret void
+;
+  %r = srem <3 x i15> %x, %y
+  store <3 x i15> %r, <3 x i15> addrspace(1)* %out
+  ret void
+}

Modified: llvm/trunk/test/CodeGen/AMDGPU/dagcombine-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/dagcombine-select.ll?rev=335868&r1=335867&r2=335868&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/dagcombine-select.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/dagcombine-select.ll Thu Jun 28 08:59:18 2018
@@ -157,37 +157,37 @@ define amdgpu_kernel void @sel_constants
 
 ; GCN-LABEL: {{^}}sdiv_constant_sel_constants:
 ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 5, 0,
-define amdgpu_kernel void @sdiv_constant_sel_constants(i32 addrspace(1)* %p, i1 %cond) {
-  %sel = select i1 %cond, i32 121, i32 23
-  %bo = sdiv i32 120, %sel
-  store i32 %bo, i32 addrspace(1)* %p, align 4
+define amdgpu_kernel void @sdiv_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) {
+  %sel = select i1 %cond, i64 121, i64 23
+  %bo = sdiv i64 120, %sel
+  store i64 %bo, i64 addrspace(1)* %p, align 8
   ret void
 }
 
 ; GCN-LABEL: {{^}}udiv_constant_sel_constants:
 ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 5, 0,
-define amdgpu_kernel void @udiv_constant_sel_constants(i32 addrspace(1)* %p, i1 %cond) {
-  %sel = select i1 %cond, i32 -4, i32 23
-  %bo = udiv i32 120, %sel
-  store i32 %bo, i32 addrspace(1)* %p, align 4
+define amdgpu_kernel void @udiv_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) {
+  %sel = select i1 %cond, i64 -4, i64 23
+  %bo = udiv i64 120, %sel
+  store i64 %bo, i64 addrspace(1)* %p, align 8
   ret void
 }
 
 ; GCN-LABEL: {{^}}srem_constant_sel_constants:
 ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 3, 33,
-define amdgpu_kernel void @srem_constant_sel_constants(i32 addrspace(1)* %p, i1 %cond) {
-  %sel = select i1 %cond, i32 34, i32 15
-  %bo = srem i32 33, %sel
-  store i32 %bo, i32 addrspace(1)* %p, align 4
+define amdgpu_kernel void @srem_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) {
+  %sel = select i1 %cond, i64 34, i64 15
+  %bo = srem i64 33, %sel
+  store i64 %bo, i64 addrspace(1)* %p, align 8
   ret void
 }
 
 ; GCN-LABEL: {{^}}urem_constant_sel_constants:
 ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 3, 33,
-define amdgpu_kernel void @urem_constant_sel_constants(i32 addrspace(1)* %p, i1 %cond) {
-  %sel = select i1 %cond, i32 34, i32 15
-  %bo = urem i32 33, %sel
-  store i32 %bo, i32 addrspace(1)* %p, align 4
+define amdgpu_kernel void @urem_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) {
+  %sel = select i1 %cond, i64 34, i64 15
+  %bo = urem i64 33, %sel
+  store i64 %bo, i64 addrspace(1)* %p, align 8
   ret void
 }
 




More information about the llvm-commits mailing list