[llvm] r283555 - [AMDGPU] Promote uniform (i1, i16] operations to i32

Konstantin Zhuravlyov via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 7 07:22:58 PDT 2016


Author: kzhuravl
Date: Fri Oct  7 09:22:58 2016
New Revision: 283555

URL: http://llvm.org/viewvc/llvm-project?rev=283555&view=rev
Log:
[AMDGPU] Promote uniform (i1, i16] operations to i32

Differential Revision: https://reviews.llvm.org/D25302

Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
    llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll
    llvm/trunk/test/CodeGen/AMDGPU/ctlz.ll
    llvm/trunk/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
    llvm/trunk/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
    llvm/trunk/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
    llvm/trunk/test/CodeGen/AMDGPU/trunc-cmp-constant.ll

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp?rev=283555&r1=283554&r2=283555&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp Fri Oct  7 09:22:58 2016
@@ -39,28 +39,20 @@ class AMDGPUCodeGenPrepare : public Func
   Module *Mod;
   bool HasUnsafeFPMath;
 
-  /// \brief Copies exact/nsw/nuw flags (if any) from binary operator \p I to
-  /// binary operator \p V.
+  /// \brief Copies exact/nsw/nuw flags (if any) from binary operation \p I to
+  /// binary operation \p V.
   ///
-  /// \returns Binary operator \p V.
+  /// \returns Binary operation \p V.
   Value *copyFlags(const BinaryOperator &I, Value *V) const;
 
-  /// \returns Equivalent 16 bit integer type for given 32 bit integer type
-  /// \p T.
-  Type *getI16Ty(IRBuilder<> &B, const Type *T) const;
+  /// \returns \p T's base element bit width.
+  unsigned getBaseElementBitWidth(const Type *T) const;
 
-  /// \returns Equivalent 32 bit integer type for given 16 bit integer type
-  /// \p T.
+  /// \returns Equivalent 32 bit integer type for given type \p T. For example,
+  /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32>
+  /// is returned.
   Type *getI32Ty(IRBuilder<> &B, const Type *T) const;
 
-  /// \returns True if the base element of type \p T is 16 bit integer, false
-  /// otherwise.
-  bool isI16Ty(const Type *T) const;
-
-  /// \returns True if the base element of type \p T is 32 bit integer, false
-  /// otherwise.
-  bool isI32Ty(const Type *T) const;
-
   /// \returns True if binary operation \p I is a signed binary operation, false
   /// otherwise.
   bool isSigned(const BinaryOperator &I) const;
@@ -69,39 +61,55 @@ class AMDGPUCodeGenPrepare : public Func
   /// signed 'icmp' operation, false otherwise.
   bool isSigned(const SelectInst &I) const;
 
-  /// \brief Promotes uniform 16 bit binary operation \p I to equivalent 32 bit
-  /// binary operation by sign or zero extending operands to 32 bits, replacing
-  /// 16 bit operation with equivalent 32 bit operation, and truncating the
-  /// result of 32 bit operation back to 16 bits. 16 bit division operation is
-  /// not promoted.
+  /// \returns True if type \p T needs to be promoted to 32 bit integer type,
+  /// false otherwise.
+  bool needsPromotionToI32(const Type *T) const;
+
+  /// \brief Promotes uniform binary operation \p I to equivalent 32 bit binary
+  /// operation.
+  ///
+  /// \details \p I's base element bit width must be greater than 1 and less
+  /// than or equal 16. Promotion is done by sign or zero extending operands to
+  /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and
+  /// truncating the result of 32 bit binary operation back to \p I's original
+  /// type. Division operation is not promoted.
+  ///
+  /// \returns True if \p I is promoted to equivalent 32 bit binary operation,
+  /// false otherwise.
+  bool promoteUniformOpToI32(BinaryOperator &I) const;
+
+  /// \brief Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation.
   ///
-  /// \returns True if 16 bit binary operation is promoted to equivalent 32 bit
-  /// binary operation, false otherwise.
-  bool promoteUniformI16OpToI32(BinaryOperator &I) const;
-
-  /// \brief Promotes uniform 16 bit 'icmp' operation \p I to 32 bit 'icmp'
-  /// operation by sign or zero extending operands to 32 bits, and replacing 16
-  /// bit operation with 32 bit operation.
+  /// \details \p I's base element bit width must be greater than 1 and less
+  /// than or equal 16. Promotion is done by sign or zero extending operands to
+  /// 32 bits, and replacing \p I with 32 bit 'icmp' operation.
   ///
   /// \returns True.
-  bool promoteUniformI16OpToI32(ICmpInst &I) const;
+  bool promoteUniformOpToI32(ICmpInst &I) const;
 
-  /// \brief Promotes uniform 16 bit 'select' operation \p I to 32 bit 'select'
-  /// operation by sign or zero extending operands to 32 bits, replacing 16 bit
-  /// operation with 32 bit operation, and truncating the result of 32 bit
-  /// operation back to 16 bits.
+  /// \brief Promotes uniform 'select' operation \p I to 32 bit 'select'
+  /// operation.
+  ///
+  /// \details \p I's base element bit width must be greater than 1 and less
+  /// than or equal 16. Promotion is done by sign or zero extending operands to
+  /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the
+  /// result of 32 bit 'select' operation back to \p I's original type.
   ///
   /// \returns True.
-  bool promoteUniformI16OpToI32(SelectInst &I) const;
+  bool promoteUniformOpToI32(SelectInst &I) const;
 
-  /// \brief Promotes uniform 16 bit 'bitreverse' intrinsic \p I to 32 bit
-  /// 'bitreverse' intrinsic by zero extending operand to 32 bits, replacing 16
-  /// bit intrinsic with 32 bit intrinsic, shifting the result of 32 bit
-  /// intrinsic 16 bits to the right with zero fill, and truncating the result
-  /// of shift operation back to 16 bits.
+  /// \brief Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse'
+  /// intrinsic.
+  ///
+  /// \details \p I's base element bit width must be greater than 1 and less
+  /// than or equal 16. Promotion is done by zero extending the operand to 32
+  /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the
+  /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the
+  /// shift amount is 32 minus \p I's base element bit width), and truncating
+  /// the result of the shift operation back to \p I's original type.
   ///
   /// \returns True.
-  bool promoteUniformI16BitreverseIntrinsicToI32(IntrinsicInst &I) const;
+  bool promoteUniformBitreverseToI32(IntrinsicInst &I) const;
 
 public:
   static char ID;
@@ -138,51 +146,34 @@ public:
 
 Value *AMDGPUCodeGenPrepare::copyFlags(
     const BinaryOperator &I, Value *V) const {
-  assert(isa<BinaryOperator>(V) && "V must be binary operator");
+  assert(isa<BinaryOperator>(V) && "V must be binary operation");
 
   BinaryOperator *BinOp = cast<BinaryOperator>(V);
   if (isa<OverflowingBinaryOperator>(BinOp)) {
     BinOp->setHasNoSignedWrap(I.hasNoSignedWrap());
     BinOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
-  } else if (isa<PossiblyExactOperator>(BinOp)) {
+  } else if (isa<PossiblyExactOperator>(BinOp))
     BinOp->setIsExact(I.isExact());
-  }
 
   return V;
 }
 
-Type *AMDGPUCodeGenPrepare::getI16Ty(IRBuilder<> &B, const Type *T) const {
-  assert(isI32Ty(T) && "T must be 32 bits");
+unsigned AMDGPUCodeGenPrepare::getBaseElementBitWidth(const Type *T) const {
+  assert(needsPromotionToI32(T) && "T does not need promotion to i32");
 
   if (T->isIntegerTy())
-    return B.getInt16Ty();
-  return VectorType::get(B.getInt16Ty(), cast<VectorType>(T)->getNumElements());
+    return T->getIntegerBitWidth();
+  return cast<VectorType>(T)->getElementType()->getIntegerBitWidth();
 }
 
 Type *AMDGPUCodeGenPrepare::getI32Ty(IRBuilder<> &B, const Type *T) const {
-  assert(isI16Ty(T) && "T must be 16 bits");
+  assert(needsPromotionToI32(T) && "T does not need promotion to i32");
 
   if (T->isIntegerTy())
     return B.getInt32Ty();
   return VectorType::get(B.getInt32Ty(), cast<VectorType>(T)->getNumElements());
 }
 
-bool AMDGPUCodeGenPrepare::isI16Ty(const Type *T) const {
-  if (T->isIntegerTy(16))
-    return true;
-  if (!T->isVectorTy())
-    return false;
-  return cast<VectorType>(T)->getElementType()->isIntegerTy(16);
-}
-
-bool AMDGPUCodeGenPrepare::isI32Ty(const Type *T) const {
-  if (T->isIntegerTy(32))
-    return true;
-  if (!T->isVectorTy())
-    return false;
-  return cast<VectorType>(T)->getElementType()->isIntegerTy(32);
-}
-
 bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const {
   return I.getOpcode() == Instruction::AShr ||
       I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
@@ -193,10 +184,21 @@ bool AMDGPUCodeGenPrepare::isSigned(cons
       cast<ICmpInst>(I.getOperand(0))->isSigned() : false;
 }
 
-bool AMDGPUCodeGenPrepare::promoteUniformI16OpToI32(BinaryOperator &I) const {
-  assert(isI16Ty(I.getType()) && "I must be 16 bits");
+bool AMDGPUCodeGenPrepare::needsPromotionToI32(const Type *T) const {
+  if (T->isIntegerTy() && T->getIntegerBitWidth() > 1 &&
+      T->getIntegerBitWidth() <= 16)
+    return true;
+  if (!T->isVectorTy())
+    return false;
+  return needsPromotionToI32(cast<VectorType>(T)->getElementType());
+}
 
-  if (I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::UDiv)
+bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(BinaryOperator &I) const {
+  assert(needsPromotionToI32(I.getType()) &&
+         "I does not need promotion to i32");
+
+  if (I.getOpcode() == Instruction::SDiv ||
+      I.getOpcode() == Instruction::UDiv)
     return false;
 
   IRBuilder<> Builder(&I);
@@ -216,7 +218,7 @@ bool AMDGPUCodeGenPrepare::promoteUnifor
     ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
   }
   ExtRes = copyFlags(I, Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1));
-  TruncRes = Builder.CreateTrunc(ExtRes, getI16Ty(Builder, ExtRes->getType()));
+  TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
 
   I.replaceAllUsesWith(TruncRes);
   I.eraseFromParent();
@@ -224,25 +226,24 @@ bool AMDGPUCodeGenPrepare::promoteUnifor
   return true;
 }
 
-bool AMDGPUCodeGenPrepare::promoteUniformI16OpToI32(ICmpInst &I) const {
-  assert(isI16Ty(I.getOperand(0)->getType()) && "Op0 must be 16 bits");
-  assert(isI16Ty(I.getOperand(1)->getType()) && "Op1 must be 16 bits");
+bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(ICmpInst &I) const {
+  assert(needsPromotionToI32(I.getOperand(0)->getType()) &&
+         "I does not need promotion to i32");
 
   IRBuilder<> Builder(&I);
   Builder.SetCurrentDebugLocation(I.getDebugLoc());
 
-  Type *I32TyOp0 = getI32Ty(Builder, I.getOperand(0)->getType());
-  Type *I32TyOp1 = getI32Ty(Builder, I.getOperand(1)->getType());
+  Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType());
   Value *ExtOp0 = nullptr;
   Value *ExtOp1 = nullptr;
   Value *NewICmp  = nullptr;
 
   if (I.isSigned()) {
-    ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32TyOp0);
-    ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32TyOp1);
+    ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
+    ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
   } else {
-    ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32TyOp0);
-    ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32TyOp1);
+    ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
+    ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
   }
   NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1);
 
@@ -252,8 +253,9 @@ bool AMDGPUCodeGenPrepare::promoteUnifor
   return true;
 }
 
-bool AMDGPUCodeGenPrepare::promoteUniformI16OpToI32(SelectInst &I) const {
-  assert(isI16Ty(I.getType()) && "I must be 16 bits");
+bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(SelectInst &I) const {
+  assert(needsPromotionToI32(I.getType()) &&
+         "I does not need promotion to i32");
 
   IRBuilder<> Builder(&I);
   Builder.SetCurrentDebugLocation(I.getDebugLoc());
@@ -272,7 +274,7 @@ bool AMDGPUCodeGenPrepare::promoteUnifor
     ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty);
   }
   ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2);
-  TruncRes = Builder.CreateTrunc(ExtRes, getI16Ty(Builder, ExtRes->getType()));
+  TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
 
   I.replaceAllUsesWith(TruncRes);
   I.eraseFromParent();
@@ -280,10 +282,12 @@ bool AMDGPUCodeGenPrepare::promoteUnifor
   return true;
 }
 
-bool AMDGPUCodeGenPrepare::promoteUniformI16BitreverseIntrinsicToI32(
+bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32(
     IntrinsicInst &I) const {
-  assert(I.getIntrinsicID() == Intrinsic::bitreverse && "I must be bitreverse");
-  assert(isI16Ty(I.getType()) && "I must be 16 bits");
+  assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
+         "I must be bitreverse intrinsic");
+  assert(needsPromotionToI32(I.getType()) &&
+         "I does not need promotion to i32");
 
   IRBuilder<> Builder(&I);
   Builder.SetCurrentDebugLocation(I.getDebugLoc());
@@ -293,9 +297,10 @@ bool AMDGPUCodeGenPrepare::promoteUnifor
       Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });;
   Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
   Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
-  Value *LShrOp = Builder.CreateLShr(ExtRes, 16);
+  Value *LShrOp =
+      Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
   Value *TruncRes =
-      Builder.CreateTrunc(LShrOp, getI16Ty(Builder, ExtRes->getType()));
+      Builder.CreateTrunc(LShrOp, I.getType());
 
   I.replaceAllUsesWith(TruncRes);
   I.eraseFromParent();
@@ -390,9 +395,9 @@ static bool hasUnsafeFPMath(const Functi
 bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
   bool Changed = false;
 
-  // TODO: Should we promote smaller types that will be legalized to i16?
-  if (ST->has16BitInsts() && isI16Ty(I.getType()) && DA->isUniform(&I))
-    Changed |= promoteUniformI16OpToI32(I);
+  if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
+      DA->isUniform(&I))
+    Changed |= promoteUniformOpToI32(I);
 
   return Changed;
 }
@@ -400,10 +405,9 @@ bool AMDGPUCodeGenPrepare::visitBinaryOp
 bool AMDGPUCodeGenPrepare::visitICmpInst(ICmpInst &I) {
   bool Changed = false;
 
-  // TODO: Should we promote smaller types that will be legalized to i16?
-  if (ST->has16BitInsts() && isI16Ty(I.getOperand(0)->getType()) &&
-          isI16Ty(I.getOperand(1)->getType()) && DA->isUniform(&I))
-    Changed |= promoteUniformI16OpToI32(I);
+  if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) &&
+      DA->isUniform(&I))
+    Changed |= promoteUniformOpToI32(I);
 
   return Changed;
 }
@@ -411,9 +415,9 @@ bool AMDGPUCodeGenPrepare::visitICmpInst
 bool AMDGPUCodeGenPrepare::visitSelectInst(SelectInst &I) {
   bool Changed = false;
 
-  // TODO: Should we promote smaller types that will be legalized to i16?
-  if (ST->has16BitInsts() && isI16Ty(I.getType()) && DA->isUniform(&I))
-    Changed |= promoteUniformI16OpToI32(I);
+  if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
+      DA->isUniform(&I))
+    Changed |= promoteUniformOpToI32(I);
 
   return Changed;
 }
@@ -430,9 +434,9 @@ bool AMDGPUCodeGenPrepare::visitIntrinsi
 bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) {
   bool Changed = false;
 
-  // TODO: Should we promote smaller types that will be legalized to i16?
-  if (ST->has16BitInsts() && isI16Ty(I.getType()) && DA->isUniform(&I))
-    Changed |= promoteUniformI16BitreverseIntrinsicToI32(I);
+  if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
+      DA->isUniform(&I))
+    Changed |= promoteUniformBitreverseToI32(I);
 
   return Changed;
 }

Modified: llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll?rev=283555&r1=283554&r2=283555&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll Fri Oct  7 09:22:58 2016
@@ -1,6 +1,525 @@
 ; RUN: opt -S -mtriple=amdgcn-- -amdgpu-codegenprepare %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
 ; RUN: opt -S -mtriple=amdgcn-- -mcpu=tonga -amdgpu-codegenprepare %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
 
+; GCN-LABEL: @add_i3(
+; SI: %r = add i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = add i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @add_i3(i3 %a, i3 %b) {
+  %r = add i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @add_nsw_i3(
+; SI: %r = add nsw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = add nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @add_nsw_i3(i3 %a, i3 %b) {
+  %r = add nsw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @add_nuw_i3(
+; SI: %r = add nuw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @add_nuw_i3(i3 %a, i3 %b) {
+  %r = add nuw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @add_nuw_nsw_i3(
+; SI: %r = add nuw nsw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @add_nuw_nsw_i3(i3 %a, i3 %b) {
+  %r = add nuw nsw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @sub_i3(
+; SI: %r = sub i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = sub i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @sub_i3(i3 %a, i3 %b) {
+  %r = sub i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @sub_nsw_i3(
+; SI: %r = sub nsw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @sub_nsw_i3(i3 %a, i3 %b) {
+  %r = sub nsw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @sub_nuw_i3(
+; SI: %r = sub nuw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @sub_nuw_i3(i3 %a, i3 %b) {
+  %r = sub nuw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @sub_nuw_nsw_i3(
+; SI: %r = sub nuw nsw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @sub_nuw_nsw_i3(i3 %a, i3 %b) {
+  %r = sub nuw nsw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @mul_i3(
+; SI: %r = mul i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = mul i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @mul_i3(i3 %a, i3 %b) {
+  %r = mul i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @mul_nsw_i3(
+; SI: %r = mul nsw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @mul_nsw_i3(i3 %a, i3 %b) {
+  %r = mul nsw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @mul_nuw_i3(
+; SI: %r = mul nuw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @mul_nuw_i3(i3 %a, i3 %b) {
+  %r = mul nuw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @mul_nuw_nsw_i3(
+; SI: %r = mul nuw nsw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @mul_nuw_nsw_i3(i3 %a, i3 %b) {
+  %r = mul nuw nsw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @urem_i3(
+; SI: %r = urem i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = urem i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @urem_i3(i3 %a, i3 %b) {
+  %r = urem i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @srem_i3(
+; SI: %r = srem i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = sext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = sext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = srem i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @srem_i3(i3 %a, i3 %b) {
+  %r = srem i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @shl_i3(
+; SI: %r = shl i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = shl i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @shl_i3(i3 %a, i3 %b) {
+  %r = shl i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @shl_nsw_i3(
+; SI: %r = shl nsw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @shl_nsw_i3(i3 %a, i3 %b) {
+  %r = shl nsw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @shl_nuw_i3(
+; SI: %r = shl nuw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @shl_nuw_i3(i3 %a, i3 %b) {
+  %r = shl nuw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @shl_nuw_nsw_i3(
+; SI: %r = shl nuw nsw i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @shl_nuw_nsw_i3(i3 %a, i3 %b) {
+  %r = shl nuw nsw i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @lshr_i3(
+; SI: %r = lshr i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = lshr i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @lshr_i3(i3 %a, i3 %b) {
+  %r = lshr i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @lshr_exact_i3(
+; SI: %r = lshr exact i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = lshr exact i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @lshr_exact_i3(i3 %a, i3 %b) {
+  %r = lshr exact i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @ashr_i3(
+; SI: %r = ashr i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = sext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = sext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = ashr i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @ashr_i3(i3 %a, i3 %b) {
+  %r = ashr i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @ashr_exact_i3(
+; SI: %r = ashr exact i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = sext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = sext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = ashr exact i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @ashr_exact_i3(i3 %a, i3 %b) {
+  %r = ashr exact i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @and_i3(
+; SI: %r = and i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = and i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @and_i3(i3 %a, i3 %b) {
+  %r = and i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @or_i3(
+; SI: %r = or i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = or i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @or_i3(i3 %a, i3 %b) {
+  %r = or i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @xor_i3(
+; SI: %r = xor i3 %a, %b
+; SI-NEXT: ret i3 %r
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = xor i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @xor_i3(i3 %a, i3 %b) {
+  %r = xor i3 %a, %b
+  ret i3 %r
+}
+
+; GCN-LABEL: @select_eq_i3(
+; SI: %cmp = icmp eq i3 %a, %b
+; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b
+; SI-NEXT: ret i3 %sel
+; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp eq i32 %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]]
+; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3
+; VI-NEXT: ret i3 %[[SEL_3]]
+define i3 @select_eq_i3(i3 %a, i3 %b) {
+  %cmp = icmp eq i3 %a, %b
+  %sel = select i1 %cmp, i3 %a, i3 %b
+  ret i3 %sel
+}
+
+; GCN-LABEL: @select_ne_i3(
+; SI: %cmp = icmp ne i3 %a, %b
+; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b
+; SI-NEXT: ret i3 %sel
+; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp ne i32 %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]]
+; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3
+; VI-NEXT: ret i3 %[[SEL_3]]
+define i3 @select_ne_i3(i3 %a, i3 %b) {
+  %cmp = icmp ne i3 %a, %b
+  %sel = select i1 %cmp, i3 %a, i3 %b
+  ret i3 %sel
+}
+
+; GCN-LABEL: @select_ugt_i3(
+; SI: %cmp = icmp ugt i3 %a, %b
+; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b
+; SI-NEXT: ret i3 %sel
+; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp ugt i32 %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]]
+; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3
+; VI-NEXT: ret i3 %[[SEL_3]]
+define i3 @select_ugt_i3(i3 %a, i3 %b) {
+  %cmp = icmp ugt i3 %a, %b
+  %sel = select i1 %cmp, i3 %a, i3 %b
+  ret i3 %sel
+}
+
+; GCN-LABEL: @select_uge_i3(
+; SI: %cmp = icmp uge i3 %a, %b
+; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b
+; SI-NEXT: ret i3 %sel
+; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp uge i32 %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]]
+; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3
+; VI-NEXT: ret i3 %[[SEL_3]]
+define i3 @select_uge_i3(i3 %a, i3 %b) {
+  %cmp = icmp uge i3 %a, %b
+  %sel = select i1 %cmp, i3 %a, i3 %b
+  ret i3 %sel
+}
+
+; GCN-LABEL: @select_ult_i3(
+; SI: %cmp = icmp ult i3 %a, %b
+; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b
+; SI-NEXT: ret i3 %sel
+; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp ult i32 %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]]
+; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3
+; VI-NEXT: ret i3 %[[SEL_3]]
+define i3 @select_ult_i3(i3 %a, i3 %b) {
+  %cmp = icmp ult i3 %a, %b
+  %sel = select i1 %cmp, i3 %a, i3 %b
+  ret i3 %sel
+}
+
+; GCN-LABEL: @select_ule_i3(
+; SI: %cmp = icmp ule i3 %a, %b
+; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b
+; SI-NEXT: ret i3 %sel
+; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp ule i32 %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]]
+; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3
+; VI-NEXT: ret i3 %[[SEL_3]]
+define i3 @select_ule_i3(i3 %a, i3 %b) {
+  %cmp = icmp ule i3 %a, %b
+  %sel = select i1 %cmp, i3 %a, i3 %b
+  ret i3 %sel
+}
+
+; GCN-LABEL: @select_sgt_i3(
+; SI: %cmp = icmp sgt i3 %a, %b
+; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b
+; SI-NEXT: ret i3 %sel
+; VI: %[[A_32_0:[0-9]+]] = sext i3 %a to i32
+; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i3 %b to i32
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp sgt i32 %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = sext i3 %a to i32
+; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i3 %b to i32
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]]
+; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3
+; VI-NEXT: ret i3 %[[SEL_3]]
+define i3 @select_sgt_i3(i3 %a, i3 %b) {
+  %cmp = icmp sgt i3 %a, %b
+  %sel = select i1 %cmp, i3 %a, i3 %b
+  ret i3 %sel
+}
+
+; GCN-LABEL: @select_sge_i3(
+; SI: %cmp = icmp sge i3 %a, %b
+; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b
+; SI-NEXT: ret i3 %sel
+; VI: %[[A_32_0:[0-9]+]] = sext i3 %a to i32
+; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i3 %b to i32
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp sge i32 %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = sext i3 %a to i32
+; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i3 %b to i32
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]]
+; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3
+; VI-NEXT: ret i3 %[[SEL_3]]
+define i3 @select_sge_i3(i3 %a, i3 %b) {
+  %cmp = icmp sge i3 %a, %b
+  %sel = select i1 %cmp, i3 %a, i3 %b
+  ret i3 %sel
+}
+
+; GCN-LABEL: @select_slt_i3(
+; SI: %cmp = icmp slt i3 %a, %b
+; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b
+; SI-NEXT: ret i3 %sel
+; VI: %[[A_32_0:[0-9]+]] = sext i3 %a to i32
+; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i3 %b to i32
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp slt i32 %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = sext i3 %a to i32
+; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i3 %b to i32
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]]
+; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3
+; VI-NEXT: ret i3 %[[SEL_3]]
+define i3 @select_slt_i3(i3 %a, i3 %b) {
+  %cmp = icmp slt i3 %a, %b
+  %sel = select i1 %cmp, i3 %a, i3 %b
+  ret i3 %sel
+}
+
+; GCN-LABEL: @select_sle_i3(
+; SI: %cmp = icmp sle i3 %a, %b
+; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b
+; SI-NEXT: ret i3 %sel
+; VI: %[[A_32_0:[0-9]+]] = sext i3 %a to i32
+; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i3 %b to i32
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp sle i32 %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = sext i3 %a to i32
+; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i3 %b to i32
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]]
+; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3
+; VI-NEXT: ret i3 %[[SEL_3]]
+define i3 @select_sle_i3(i3 %a, i3 %b) {
+  %cmp = icmp sle i3 %a, %b
+  %sel = select i1 %cmp, i3 %a, i3 %b
+  ret i3 %sel
+}
+
+declare i3 @llvm.bitreverse.i3(i3)
+; GCN-LABEL: @bitreverse_i3(
+; SI: %brev = call i3 @llvm.bitreverse.i3(i3 %a)
+; SI-NEXT: ret i3 %brev
+; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
+; VI-NEXT: %[[R_32:[0-9]+]] = call i32 @llvm.bitreverse.i32(i32 %[[A_32]])
+; VI-NEXT: %[[S_32:[0-9]+]] = lshr i32 %[[R_32]], 29
+; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[S_32]] to i3
+; VI-NEXT: ret i3 %[[R_3]]
+define i3 @bitreverse_i3(i3 %a) {
+  %brev = call i3 @llvm.bitreverse.i3(i3 %a)
+  ret i3 %brev
+}
+
 ; GCN-LABEL: @add_i16(
 ; SI: %r = add i16 %a, %b
 ; SI-NEXT: ret i16 %r
@@ -520,6 +1039,525 @@ define i16 @bitreverse_i16(i16 %a) {
   ret i16 %brev
 }
 
+; GCN-LABEL: @add_3xi15(
+; SI: %r = add <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = add <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @add_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = add <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @add_nsw_3xi15(
+; SI: %r = add nsw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = add nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @add_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = add nsw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @add_nuw_3xi15(
+; SI: %r = add nuw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @add_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = add nuw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @add_nuw_nsw_3xi15(
+; SI: %r = add nuw nsw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @add_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = add nuw nsw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @sub_3xi15(
+; SI: %r = sub <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = sub <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @sub_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = sub <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @sub_nsw_3xi15(
+; SI: %r = sub nsw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @sub_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = sub nsw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @sub_nuw_3xi15(
+; SI: %r = sub nuw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @sub_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = sub nuw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @sub_nuw_nsw_3xi15(
+; SI: %r = sub nuw nsw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @sub_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = sub nuw nsw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @mul_3xi15(
+; SI: %r = mul <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = mul <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @mul_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = mul <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @mul_nsw_3xi15(
+; SI: %r = mul nsw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @mul_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = mul nsw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @mul_nuw_3xi15(
+; SI: %r = mul nuw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @mul_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = mul nuw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @mul_nuw_nsw_3xi15(
+; SI: %r = mul nuw nsw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @mul_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = mul nuw nsw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @urem_3xi15(
+; SI: %r = urem <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = urem <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @urem_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = urem <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @srem_3xi15(
+; SI: %r = srem <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = srem <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @srem_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = srem <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @shl_3xi15(
+; SI: %r = shl <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = shl <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @shl_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = shl <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @shl_nsw_3xi15(
+; SI: %r = shl nsw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @shl_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = shl nsw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @shl_nuw_3xi15(
+; SI: %r = shl nuw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @shl_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = shl nuw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @shl_nuw_nsw_3xi15(
+; SI: %r = shl nuw nsw <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @shl_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = shl nuw nsw <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @lshr_3xi15(
+; SI: %r = lshr <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = lshr <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @lshr_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = lshr <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @lshr_exact_3xi15(
+; SI: %r = lshr exact <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = lshr exact <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @lshr_exact_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = lshr exact <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @ashr_3xi15(
+; SI: %r = ashr <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = ashr <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @ashr_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = ashr <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @ashr_exact_3xi15(
+; SI: %r = ashr exact <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = ashr exact <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @ashr_exact_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = ashr exact <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @and_3xi15(
+; SI: %r = and <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = and <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @and_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = and <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @or_3xi15(
+; SI: %r = or <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = or <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @or_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = or <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @xor_3xi15(
+; SI: %r = xor <3 x i15> %a, %b
+; SI-NEXT: ret <3 x i15> %r
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = xor <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @xor_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %r = xor <3 x i15> %a, %b
+  ret <3 x i15> %r
+}
+
+; GCN-LABEL: @select_eq_3xi15(
+; SI: %cmp = icmp eq <3 x i15> %a, %b
+; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+; SI-NEXT: ret <3 x i15> %sel
+; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp eq <3 x i32> %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]]
+; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[SEL_15]]
+define <3 x i15> @select_eq_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %cmp = icmp eq <3 x i15> %a, %b
+  %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+  ret <3 x i15> %sel
+}
+
+; GCN-LABEL: @select_ne_3xi15(
+; SI: %cmp = icmp ne <3 x i15> %a, %b
+; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+; SI-NEXT: ret <3 x i15> %sel
+; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp ne <3 x i32> %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]]
+; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[SEL_15]]
+define <3 x i15> @select_ne_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %cmp = icmp ne <3 x i15> %a, %b
+  %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+  ret <3 x i15> %sel
+}
+
+; GCN-LABEL: @select_ugt_3xi15(
+; SI: %cmp = icmp ugt <3 x i15> %a, %b
+; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+; SI-NEXT: ret <3 x i15> %sel
+; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp ugt <3 x i32> %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]]
+; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[SEL_15]]
+define <3 x i15> @select_ugt_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %cmp = icmp ugt <3 x i15> %a, %b
+  %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+  ret <3 x i15> %sel
+}
+
+; GCN-LABEL: @select_uge_3xi15(
+; SI: %cmp = icmp uge <3 x i15> %a, %b
+; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+; SI-NEXT: ret <3 x i15> %sel
+; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp uge <3 x i32> %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]]
+; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[SEL_15]]
+define <3 x i15> @select_uge_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %cmp = icmp uge <3 x i15> %a, %b
+  %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+  ret <3 x i15> %sel
+}
+
+; GCN-LABEL: @select_ult_3xi15(
+; SI: %cmp = icmp ult <3 x i15> %a, %b
+; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+; SI-NEXT: ret <3 x i15> %sel
+; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp ult <3 x i32> %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]]
+; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[SEL_15]]
+define <3 x i15> @select_ult_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %cmp = icmp ult <3 x i15> %a, %b
+  %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+  ret <3 x i15> %sel
+}
+
+; GCN-LABEL: @select_ule_3xi15(
+; SI: %cmp = icmp ule <3 x i15> %a, %b
+; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+; SI-NEXT: ret <3 x i15> %sel
+; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp ule <3 x i32> %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]]
+; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[SEL_15]]
+define <3 x i15> @select_ule_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %cmp = icmp ule <3 x i15> %a, %b
+  %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+  ret <3 x i15> %sel
+}
+
+; GCN-LABEL: @select_sgt_3xi15(
+; SI: %cmp = icmp sgt <3 x i15> %a, %b
+; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+; SI-NEXT: ret <3 x i15> %sel
+; VI: %[[A_32_0:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp sgt <3 x i32> %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]]
+; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[SEL_15]]
+define <3 x i15> @select_sgt_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %cmp = icmp sgt <3 x i15> %a, %b
+  %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+  ret <3 x i15> %sel
+}
+
+; GCN-LABEL: @select_sge_3xi15(
+; SI: %cmp = icmp sge <3 x i15> %a, %b
+; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+; SI-NEXT: ret <3 x i15> %sel
+; VI: %[[A_32_0:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp sge <3 x i32> %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]]
+; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[SEL_15]]
+define <3 x i15> @select_sge_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %cmp = icmp sge <3 x i15> %a, %b
+  %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+  ret <3 x i15> %sel
+}
+
+; GCN-LABEL: @select_slt_3xi15(
+; SI: %cmp = icmp slt <3 x i15> %a, %b
+; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+; SI-NEXT: ret <3 x i15> %sel
+; VI: %[[A_32_0:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp slt <3 x i32> %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]]
+; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[SEL_15]]
+define <3 x i15> @select_slt_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %cmp = icmp slt <3 x i15> %a, %b
+  %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+  ret <3 x i15> %sel
+}
+
+; GCN-LABEL: @select_sle_3xi15(
+; SI: %cmp = icmp sle <3 x i15> %a, %b
+; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+; SI-NEXT: ret <3 x i15> %sel
+; VI: %[[A_32_0:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[CMP:[0-9]+]] = icmp sle <3 x i32> %[[A_32_0]], %[[B_32_0]]
+; VI-NEXT: %[[A_32_1:[0-9]+]] = sext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i15> %b to <3 x i32>
+; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]]
+; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[SEL_15]]
+define <3 x i15> @select_sle_3xi15(<3 x i15> %a, <3 x i15> %b) {
+  %cmp = icmp sle <3 x i15> %a, %b
+  %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b
+  ret <3 x i15> %sel
+}
+
+declare <3 x i15> @llvm.bitreverse.v3i15(<3 x i15>)
+; GCN-LABEL: @bitreverse_3xi15(
+; SI: %brev = call <3 x i15> @llvm.bitreverse.v3i15(<3 x i15> %a)
+; SI-NEXT: ret <3 x i15> %brev
+; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
+; VI-NEXT: %[[R_32:[0-9]+]] = call <3 x i32> @llvm.bitreverse.v3i32(<3 x i32> %[[A_32]])
+; VI-NEXT: %[[S_32:[0-9]+]] = lshr <3 x i32> %[[R_32]], <i32 17, i32 17, i32 17>
+; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[S_32]] to <3 x i15>
+; VI-NEXT: ret <3 x i15> %[[R_15]]
+define <3 x i15> @bitreverse_3xi15(<3 x i15> %a) {
+  %brev = call <3 x i15> @llvm.bitreverse.v3i15(<3 x i15> %a)
+  ret <3 x i15> %brev
+}
+
 ; GCN-LABEL: @add_3xi16(
 ; SI: %r = add <3 x i16> %a, %b
 ; SI-NEXT: ret <3 x i16> %r

Modified: llvm/trunk/test/CodeGen/AMDGPU/ctlz.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ctlz.ll?rev=283555&r1=283554&r2=283555&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/ctlz.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/ctlz.ll Fri Oct  7 09:22:58 2016
@@ -228,12 +228,15 @@ define void @v_ctlz_i32_sel_ne_bitwidth(
   ret void
 }
 
+; FIXME: Need to handle non-uniform case for function below (load without gep).
 ; FUNC-LABEL: {{^}}v_ctlz_i8_sel_eq_neg1:
-; GCN: buffer_load_ubyte [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_ubyte [[VAL:v[0-9]+]],
 ; GCN: v_ffbh_u32_e32 [[FFBH:v[0-9]+]], [[VAL]]
-; GCN: buffer_store_byte [[FFBH]],
+; GCN: {{buffer|flat}}_store_byte [[FFBH]],
  define void @v_ctlz_i8_sel_eq_neg1(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
-  %val = load i8, i8 addrspace(1)* %valptr
+  %tid = call i32 @llvm.r600.read.tidig.x()
+  %valptr.gep = getelementptr i8, i8 addrspace(1)* %valptr, i32 %tid
+  %val = load i8, i8 addrspace(1)* %valptr.gep
   %ctlz = call i8 @llvm.ctlz.i8(i8 %val, i1 false) nounwind readnone
   %cmp = icmp eq i8 %val, 0
   %sel = select i1 %cmp, i8 -1, i8 %ctlz
@@ -254,13 +257,16 @@ define void @v_ctlz_i32_sel_ne_bitwidth(
   ret void
 }
 
+; FIXME: Need to handle non-uniform case for function below (load without gep).
 ; FUNC-LABEL: {{^}}v_ctlz_i7_sel_eq_neg1:
-; GCN: buffer_load_ubyte [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_ubyte [[VAL:v[0-9]+]],
 ; GCN: v_ffbh_u32_e32 [[FFBH:v[0-9]+]], [[VAL]]
 ; GCN: v_and_b32_e32 [[TRUNC:v[0-9]+]], 0x7f, [[FFBH]]
-; GCN: buffer_store_byte [[TRUNC]],
- define void @v_ctlz_i7_sel_eq_neg1(i7 addrspace(1)* noalias %out, i7 addrspace(1)* noalias %valptr) nounwind {
-  %val = load i7, i7 addrspace(1)* %valptr
+; GCN: {{buffer|flat}}_store_byte [[TRUNC]],
+define void @v_ctlz_i7_sel_eq_neg1(i7 addrspace(1)* noalias %out, i7 addrspace(1)* noalias %valptr) nounwind {
+  %tid = call i32 @llvm.r600.read.tidig.x()
+  %valptr.gep = getelementptr i7, i7 addrspace(1)* %valptr, i32 %tid
+  %val = load i7, i7 addrspace(1)* %valptr.gep
   %ctlz = call i7 @llvm.ctlz.i7(i7 %val, i1 false) nounwind readnone
   %cmp = icmp eq i7 %val, 0
   %sel = select i1 %cmp, i7 -1, i7 %ctlz

Modified: llvm/trunk/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ctlz_zero_undef.ll?rev=283555&r1=283554&r2=283555&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/ctlz_zero_undef.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/ctlz_zero_undef.ll Fri Oct  7 09:22:58 2016
@@ -172,12 +172,15 @@ define void @v_ctlz_zero_undef_i32_sel_n
   ret void
 }
 
+; FIXME: Need to handle non-uniform case for function below (load without gep).
 ; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i8_sel_eq_neg1:
-; SI: buffer_load_ubyte [[VAL:v[0-9]+]],
+; SI: {{buffer|flat}}_load_ubyte [[VAL:v[0-9]+]],
 ; SI: v_ffbh_u32_e32 [[FFBH:v[0-9]+]], [[VAL]]
-; SI: buffer_store_byte [[FFBH]],
- define void @v_ctlz_zero_undef_i8_sel_eq_neg1(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
-  %val = load i8, i8 addrspace(1)* %valptr
+; SI: {{buffer|flat}}_store_byte [[FFBH]],
+define void @v_ctlz_zero_undef_i8_sel_eq_neg1(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
+  %tid = call i32 @llvm.r600.read.tidig.x()
+  %valptr.gep = getelementptr i8, i8 addrspace(1)* %valptr, i32 %tid
+  %val = load i8, i8 addrspace(1)* %valptr.gep
   %ctlz = call i8 @llvm.ctlz.i8(i8 %val, i1 true) nounwind readnone
   %cmp = icmp eq i8 %val, 0
   %sel = select i1 %cmp, i8 -1, i8 %ctlz

Modified: llvm/trunk/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll?rev=283555&r1=283554&r2=283555&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll Fri Oct  7 09:22:58 2016
@@ -1,6 +1,9 @@
 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
 
+declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
+declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
+
 ; SI-LABEL: {{^}}load_i8_to_f32:
 ; SI: buffer_load_ubyte [[LOADREG:v[0-9]+]],
 ; SI-NOT: bfe
@@ -80,9 +83,10 @@ define void @load_v4i8_to_v4f32_unaligne
   ret void
 }
 
+; FIXME: Need to handle non-uniform case for function below (load without gep).
 ; Instructions still emitted to repack bytes for add use.
 ; SI-LABEL: {{^}}load_v4i8_to_v4f32_2_uses:
-; SI: buffer_load_dword
+; SI: {{buffer|flat}}_load_dword
 ; SI-DAG: v_cvt_f32_ubyte0_e32
 ; SI-DAG: v_cvt_f32_ubyte1_e32
 ; SI-DAG: v_cvt_f32_ubyte2_e32
@@ -96,12 +100,14 @@ define void @load_v4i8_to_v4f32_unaligne
 ; SI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xff00,
 ; SI-DAG: v_add_i32
 
-; SI: buffer_store_dwordx4
-; SI: buffer_store_dword
+; SI: {{buffer|flat}}_store_dwordx4
+; SI: {{buffer|flat}}_store_dword
 
 ; SI: s_endpgm
 define void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %out2, <4 x i8> addrspace(1)* noalias %in) nounwind {
-  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
+  %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
+  %in.ptr = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
+  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in.ptr, align 4
   %cvt = uitofp <4 x i8> %load to <4 x float>
   store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
   %add = add <4 x i8> %load, <i8 9, i8 9, i8 9, i8 9> ; Second use of %load

Modified: llvm/trunk/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll?rev=283555&r1=283554&r2=283555&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll Fri Oct  7 09:22:58 2016
@@ -77,12 +77,19 @@ define void @test_umul24_i16_vgpr(i32 ad
   ret void
 }
 
-; FUNC-LABEL: {{^}}test_umul24_i8:
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; FUNC-LABEL: {{^}}test_umul24_i8_vgpr:
 ; GCN: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
 ; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
-define void @test_umul24_i8(i32 addrspace(1)* %out, i8 %a, i8 %b) {
+define void @test_umul24_i8_vgpr(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b) {
 entry:
-  %mul = mul i8 %a, %b
+  %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid.y = call i32 @llvm.amdgcn.workitem.id.y()
+  %a.ptr = getelementptr i8, i8 addrspace(1)* %a, i32 %tid.x
+  %b.ptr = getelementptr i8, i8 addrspace(1)* %b, i32 %tid.y
+  %a.l = load i8, i8 addrspace(1)* %a.ptr
+  %b.l = load i8, i8 addrspace(1)* %b.ptr
+  %mul = mul i8 %a.l, %b.l
   %ext = sext i8 %mul to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/AMDGPU/trunc-cmp-constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/trunc-cmp-constant.ll?rev=283555&r1=283554&r2=283555&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/trunc-cmp-constant.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/trunc-cmp-constant.ll Fri Oct  7 09:22:58 2016
@@ -1,6 +1,8 @@
 ; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
 
+declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
+
 ; FUNC-LABEL {{^}}sextload_i1_to_i32_trunc_cmp_eq_0:
 ; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
 ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
@@ -154,13 +156,16 @@ define void @zextload_i1_to_i32_trunc_cm
   ret void
 }
 
+; FIXME: Need to handle non-uniform case for function below (load without gep).
 ; FUNC-LABEL: {{^}}masked_load_i1_to_i32_trunc_cmp_ne_neg1:
-; SI: buffer_load_sbyte [[LOAD:v[0-9]+]]
+; SI: {{buffer|flat}}_load_sbyte [[LOAD:v[0-9]+]]
 ; SI: v_cmp_ne_u32_e32 vcc, -1, [[LOAD]]{{$}}
 ; SI-NEXT: v_cndmask_b32_e64
-; SI: buffer_store_byte
+; SI: {{buffer|flat}}_store_byte
 define void @masked_load_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
-  %load = load i8, i8 addrspace(1)* %in
+  %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
+  %in.ptr = getelementptr i8, i8 addrspace(1)* %in, i32 %tid.x
+  %load = load i8, i8 addrspace(1)* %in.ptr
   %masked = and i8 %load, 255
   %ext = sext i8 %masked to i32
   %cmp = icmp ne i32 %ext, -1




More information about the llvm-commits mailing list