[llvm] r349630 - [X86][SSE] Auto upgrade PADDUS/PSUBUS intrinsics to UADD_SAT/USUB_SAT generic intrinsics (llvm)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Dec 19 06:43:36 PST 2018
Author: rksimon
Date: Wed Dec 19 06:43:36 2018
New Revision: 349630
URL: http://llvm.org/viewvc/llvm-project?rev=349630&view=rev
Log:
[X86][SSE] Auto upgrade PADDUS/PSUBUS intrinsics to UADD_SAT/USUB_SAT generic intrinsics (llvm)
Now that we use the generic ISD opcodes, we can use the generic intrinsics directly as well. This fixes the poor fast-isel codegen by not expanding to an easily broken IR code sequence.
I'm intending to deal with the signed saturation equivalents as well.
Clang counterpart: https://reviews.llvm.org/D55879
Differential Revision: https://reviews.llvm.org/D55855
Modified:
llvm/trunk/lib/IR/AutoUpgrade.cpp
llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-canonical.ll
llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-canonical.ll
llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
Modified: llvm/trunk/lib/IR/AutoUpgrade.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/AutoUpgrade.cpp?rev=349630&r1=349629&r2=349630&view=diff
==============================================================================
--- llvm/trunk/lib/IR/AutoUpgrade.cpp (original)
+++ llvm/trunk/lib/IR/AutoUpgrade.cpp Wed Dec 19 06:43:36 2018
@@ -926,26 +926,13 @@ static Value *UpgradeX86ALIGNIntrinsics(
static Value *UpgradeX86AddSubSatIntrinsics(IRBuilder<> &Builder, CallInst &CI,
bool IsAddition) {
+ Type *Ty = CI.getType();
Value *Op0 = CI.getOperand(0);
Value *Op1 = CI.getOperand(1);
- // Collect vector elements and type data.
- Type *ResultType = CI.getType();
-
- Value *Res;
- if (IsAddition) {
- // ADDUS: a > (a+b) ? ~0 : (a+b)
- // If Op0 > Add, overflow occured.
- Value *Add = Builder.CreateAdd(Op0, Op1);
- Value *ICmp = Builder.CreateICmp(ICmpInst::ICMP_UGT, Op0, Add);
- Value *Max = llvm::Constant::getAllOnesValue(ResultType);
- Res = Builder.CreateSelect(ICmp, Max, Add);
- } else {
- // SUBUS: max(a, b) - b
- Value *ICmp = Builder.CreateICmp(ICmpInst::ICMP_UGT, Op0, Op1);
- Value *Select = Builder.CreateSelect(ICmp, Op0, Op1);
- Res = Builder.CreateSub(Select, Op1);
- }
+ Intrinsic::ID IID = IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat;
+ Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+ Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
if (CI.getNumArgOperands() == 4) { // For masked intrinsics.
Value *VecSrc = CI.getOperand(2);
Modified: llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll?rev=349630&r1=349629&r2=349630&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll Wed Dec 19 06:43:36 2018
@@ -124,13 +124,11 @@ define <4 x i64> @test_mm256_adds_epu8(<
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
- %1 = add <32 x i8> %arg0, %arg1
- %2 = icmp ugt <32 x i8> %arg0, %1
- %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
- %bc = bitcast <32 x i8> %3 to <4 x i64>
+ %res = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %arg0, <32 x i8> %arg1)
+ %bc = bitcast <32 x i8> %res to <4 x i64>
ret <4 x i64> %bc
}
-
+declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>)
define <4 x i64> @test_mm256_adds_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_adds_epu16:
@@ -139,12 +137,11 @@ define <4 x i64> @test_mm256_adds_epu16(
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
- %1 = add <16 x i16> %arg0, %arg1
- %2 = icmp ugt <16 x i16> %arg0, %1
- %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
- %bc = bitcast <16 x i16> %3 to <4 x i64>
+ %res = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %arg0, <16 x i16> %arg1)
+ %bc = bitcast <16 x i16> %res to <4 x i64>
ret <4 x i64> %bc
}
+declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
define <4 x i64> @test_mm256_alignr_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_alignr_epi8:
@@ -2552,33 +2549,28 @@ declare <16 x i16> @llvm.x86.avx2.psubs.
define <4 x i64> @test_mm256_subs_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_subs_epu8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vpsubusb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
- %cmp = icmp ugt <32 x i8> %arg0, %arg1
- %sel = select <32 x i1> %cmp, <32 x i8> %arg0, <32 x i8> %arg1
- %sub = sub <32 x i8> %sel, %arg1
- %bc = bitcast <32 x i8> %sub to <4 x i64>
+ %res = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %arg0, <32 x i8> %arg1)
+ %bc = bitcast <32 x i8> %res to <4 x i64>
ret <4 x i64> %bc
}
-
+declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>)
define <4 x i64> @test_mm256_subs_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_subs_epu16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vpsubusw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
- %cmp = icmp ugt <16 x i16> %arg0, %arg1
- %sel = select <16 x i1> %cmp, <16 x i16> %arg0, <16 x i16> %arg1
- %sub = sub <16 x i16> %sel, %arg1
- %bc = bitcast <16 x i16> %sub to <4 x i64>
+ %res = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %arg0, <16 x i16> %arg1)
+ %bc = bitcast <16 x i16> %res to <4 x i64>
ret <4 x i64> %bc
}
+declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
define <4 x i64> @test_mm256_unpackhi_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpackhi_epi8:
Modified: llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-canonical.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-canonical.ll?rev=349630&r1=349629&r2=349630&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-canonical.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-canonical.ll Wed Dec 19 06:43:36 2018
@@ -15,11 +15,10 @@ define <32 x i16> @test_mask_adds_epu16_
; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpaddusw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
- %1 = add <32 x i16> %a, %b
- %2 = icmp ugt <32 x i16> %a, %1
- %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
- ret <32 x i16> %3
+ %res = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+ ret <32 x i16> %res
}
+declare <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16>, <32 x i16>)
define <32 x i16> @test_mask_adds_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_adds_epu16_rrk_512:
@@ -35,12 +34,10 @@ define <32 x i16> @test_mask_adds_epu16_
; AVX512F-32-NEXT: vpaddusw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-32-NEXT: retl
- %1 = add <32 x i16> %a, %b
- %2 = icmp ugt <32 x i16> %a, %1
- %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
- %4 = bitcast i32 %mask to <32 x i1>
- %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> %passThru
- ret <32 x i16> %5
+ %1 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_adds_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
@@ -55,12 +52,10 @@ define <32 x i16> @test_mask_adds_epu16_
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpaddusw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
- %1 = add <32 x i16> %a, %b
- %2 = icmp ugt <32 x i16> %a, %1
- %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
- %4 = bitcast i32 %mask to <32 x i1>
- %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> zeroinitializer
- ret <32 x i16> %5
+ %1 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_adds_epu16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
@@ -75,10 +70,8 @@ define <32 x i16> @test_mask_adds_epu16_
; AVX512F-32-NEXT: vpaddusw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %1 = add <32 x i16> %a, %b
- %2 = icmp ugt <32 x i16> %a, %1
- %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
- ret <32 x i16> %3
+ %1 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+ ret <32 x i16> %1
}
define <32 x i16> @test_mask_adds_epu16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
@@ -97,12 +90,10 @@ define <32 x i16> @test_mask_adds_epu16_
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %1 = add <32 x i16> %a, %b
- %2 = icmp ugt <32 x i16> %a, %1
- %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
- %4 = bitcast i32 %mask to <32 x i1>
- %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> %passThru
- ret <32 x i16> %5
+ %1 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_adds_epu16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
@@ -119,12 +110,10 @@ define <32 x i16> @test_mask_adds_epu16_
; AVX512F-32-NEXT: vpaddusw (%eax), %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %1 = add <32 x i16> %a, %b
- %2 = icmp ugt <32 x i16> %a, %1
- %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
- %4 = bitcast i32 %mask to <32 x i1>
- %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> zeroinitializer
- ret <32 x i16> %5
+ %1 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_subs_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) {
@@ -137,11 +126,10 @@ define <32 x i16> @test_mask_subs_epu16_
; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsubusw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
- %cmp = icmp ugt <32 x i16> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
- %sub = sub <32 x i16> %sel, %b
+ %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
ret <32 x i16> %sub
}
+declare <32 x i16> @llvm.usub.sat.v32i16(<32 x i16>, <32 x i16>)
define <32 x i16> @test_mask_subs_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_subs_epu16_rrk_512:
@@ -157,9 +145,7 @@ define <32 x i16> @test_mask_subs_epu16_
; AVX512F-32-NEXT: vpsubusw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-32-NEXT: retl
- %cmp = icmp ugt <32 x i16> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
- %sub = sub <32 x i16> %sel, %b
+ %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
%bc = bitcast i32 %mask to <32 x i1>
%res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> %passThru
ret <32 x i16> %res
@@ -177,9 +163,7 @@ define <32 x i16> @test_mask_subs_epu16_
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsubusw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
- %cmp = icmp ugt <32 x i16> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
- %sub = sub <32 x i16> %sel, %b
+ %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
%bc = bitcast i32 %mask to <32 x i1>
%res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> zeroinitializer
ret <32 x i16> %res
@@ -197,9 +181,7 @@ define <32 x i16> @test_mask_subs_epu16_
; AVX512F-32-NEXT: vpsubusw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %cmp = icmp ugt <32 x i16> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
- %sub = sub <32 x i16> %sel, %b
+ %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
ret <32 x i16> %sub
}
@@ -219,9 +201,7 @@ define <32 x i16> @test_mask_subs_epu16_
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %cmp = icmp ugt <32 x i16> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
- %sub = sub <32 x i16> %sel, %b
+ %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
%bc = bitcast i32 %mask to <32 x i1>
%res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> %passThru
ret <32 x i16> %res
@@ -241,9 +221,7 @@ define <32 x i16> @test_mask_subs_epu16_
; AVX512F-32-NEXT: vpsubusw (%eax), %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %cmp = icmp ugt <32 x i16> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
- %sub = sub <32 x i16> %sel, %b
+ %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
%bc = bitcast i32 %mask to <32 x i1>
%res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> zeroinitializer
ret <32 x i16> %res
@@ -272,11 +250,10 @@ define <64 x i16> @test_mask_adds_epu16_
; AVX512F-32-NEXT: popl %ebp
; AVX512F-32-NEXT: .cfi_def_cfa %esp, 4
; AVX512F-32-NEXT: retl
- %1 = add <64 x i16> %a, %b
- %2 = icmp ugt <64 x i16> %a, %1
- %3 = select <64 x i1> %2, <64 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <64 x i16> %1
- ret <64 x i16> %3
+ %1 = call <64 x i16> @llvm.uadd.sat.v64i16(<64 x i16> %a, <64 x i16> %b)
+ ret <64 x i16> %1
}
+declare <64 x i16> @llvm.uadd.sat.v64i16(<64 x i16>, <64 x i16>)
define <64 x i16> @test_mask_subs_epu16_rr_1024(<64 x i16> %a, <64 x i16> %b) {
; AVX512BW-LABEL: test_mask_subs_epu16_rr_1024:
@@ -300,9 +277,7 @@ define <64 x i16> @test_mask_subs_epu16_
; AVX512F-32-NEXT: popl %ebp
; AVX512F-32-NEXT: .cfi_def_cfa %esp, 4
; AVX512F-32-NEXT: retl
- %cmp = icmp ugt <64 x i16> %a, %b
- %sel = select <64 x i1> %cmp, <64 x i16> %a, <64 x i16> %b
- %sub = sub <64 x i16> %sel, %b
+ %sub = call <64 x i16> @llvm.usub.sat.v64i16(<64 x i16> %a, <64 x i16> %b)
ret <64 x i16> %sub
}
-
+declare <64 x i16> @llvm.usub.sat.v64i16(<64 x i16>, <64 x i16>)
Modified: llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-canonical.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-canonical.ll?rev=349630&r1=349629&r2=349630&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-canonical.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-canonical.ll Wed Dec 19 06:43:36 2018
@@ -9,11 +9,10 @@ define <8 x i16> @test_mask_adds_epu16_r
; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <8 x i16> %a, %b
- %2 = icmp ugt <8 x i16> %a, %1
- %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
- ret <8 x i16> %3
+ %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i16> %1
}
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
define <8 x i16> @test_mask_adds_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rrk_128:
@@ -22,12 +21,10 @@ define <8 x i16> @test_mask_adds_epu16_r
; CHECK-NEXT: vpaddusw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdd,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <8 x i16> %a, %b
- %2 = icmp ugt <8 x i16> %a, %1
- %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
- %4 = bitcast i8 %mask to <8 x i1>
- %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> %passThru
- ret <8 x i16> %5
+ %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_adds_epu16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
@@ -36,12 +33,10 @@ define <8 x i16> @test_mask_adds_epu16_r
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <8 x i16> %a, %b
- %2 = icmp ugt <8 x i16> %a, %1
- %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
- %4 = bitcast i8 %mask to <8 x i1>
- %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> zeroinitializer
- ret <8 x i16> %5
+ %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_adds_epu16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
@@ -50,10 +45,8 @@ define <8 x i16> @test_mask_adds_epu16_r
; CHECK-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %1 = add <8 x i16> %a, %b
- %2 = icmp ugt <8 x i16> %a, %1
- %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
- ret <8 x i16> %3
+ %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i16> %1
}
define <8 x i16> @test_mask_adds_epu16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
@@ -64,12 +57,10 @@ define <8 x i16> @test_mask_adds_epu16_r
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %1 = add <8 x i16> %a, %b
- %2 = icmp ugt <8 x i16> %a, %1
- %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
- %4 = bitcast i8 %mask to <8 x i1>
- %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> %passThru
- ret <8 x i16> %5
+ %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_adds_epu16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
@@ -79,12 +70,10 @@ define <8 x i16> @test_mask_adds_epu16_r
; CHECK-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %1 = add <8 x i16> %a, %b
- %2 = icmp ugt <8 x i16> %a, %1
- %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
- %4 = bitcast i8 %mask to <8 x i1>
- %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> zeroinitializer
- ret <8 x i16> %5
+ %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+ ret <8 x i16> %3
}
define <16 x i16> @test_mask_adds_epu16_rr_256(<16 x i16> %a, <16 x i16> %b) {
@@ -92,11 +81,10 @@ define <16 x i16> @test_mask_adds_epu16_
; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <16 x i16> %a, %b
- %2 = icmp ugt <16 x i16> %a, %1
- %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
- ret <16 x i16> %3
+ %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+ ret <16 x i16> %1
}
+declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
define <16 x i16> @test_mask_adds_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rrk_256:
@@ -105,12 +93,10 @@ define <16 x i16> @test_mask_adds_epu16_
; CHECK-NEXT: vpaddusw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdd,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <16 x i16> %a, %b
- %2 = icmp ugt <16 x i16> %a, %1
- %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
- %4 = bitcast i16 %mask to <16 x i1>
- %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> %passThru
- ret <16 x i16> %5
+ %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_adds_epu16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
@@ -119,12 +105,10 @@ define <16 x i16> @test_mask_adds_epu16_
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <16 x i16> %a, %b
- %2 = icmp ugt <16 x i16> %a, %1
- %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
- %4 = bitcast i16 %mask to <16 x i1>
- %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> zeroinitializer
- ret <16 x i16> %5
+ %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_adds_epu16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
@@ -133,10 +117,8 @@ define <16 x i16> @test_mask_adds_epu16_
; CHECK-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %1 = add <16 x i16> %a, %b
- %2 = icmp ugt <16 x i16> %a, %1
- %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
- ret <16 x i16> %3
+ %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+ ret <16 x i16> %1
}
define <16 x i16> @test_mask_adds_epu16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
@@ -147,12 +129,10 @@ define <16 x i16> @test_mask_adds_epu16_
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %1 = add <16 x i16> %a, %b
- %2 = icmp ugt <16 x i16> %a, %1
- %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
- %4 = bitcast i16 %mask to <16 x i1>
- %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> %passThru
- ret <16 x i16> %5
+ %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_adds_epu16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
@@ -162,12 +142,10 @@ define <16 x i16> @test_mask_adds_epu16_
; CHECK-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %1 = add <16 x i16> %a, %b
- %2 = icmp ugt <16 x i16> %a, %1
- %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
- %4 = bitcast i16 %mask to <16 x i1>
- %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> zeroinitializer
- ret <16 x i16> %5
+ %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+ ret <16 x i16> %3
}
define <8 x i16> @test_mask_subs_epu16_rr_128(<8 x i16> %a, <8 x i16> %b) {
@@ -175,11 +153,10 @@ define <8 x i16> @test_mask_subs_epu16_r
; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <8 x i16> %a, %b
- %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
- %sub = sub <8 x i16> %sel, %b
+ %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
ret <8 x i16> %sub
}
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
define <8 x i16> @test_mask_subs_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rrk_128:
@@ -188,9 +165,7 @@ define <8 x i16> @test_mask_subs_epu16_r
; CHECK-NEXT: vpsubusw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd9,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <8 x i16> %a, %b
- %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
- %sub = sub <8 x i16> %sel, %b
+ %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
%bc = bitcast i8 %mask to <8 x i1>
%res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> %passThru
ret <8 x i16> %res
@@ -202,9 +177,7 @@ define <8 x i16> @test_mask_subs_epu16_r
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <8 x i16> %a, %b
- %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
- %sub = sub <8 x i16> %sel, %b
+ %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
%bc = bitcast i8 %mask to <8 x i1>
%res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> zeroinitializer
ret <8 x i16> %res
@@ -216,9 +189,7 @@ define <8 x i16> @test_mask_subs_epu16_r
; CHECK-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %cmp = icmp ugt <8 x i16> %a, %b
- %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
- %sub = sub <8 x i16> %sel, %b
+ %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
ret <8 x i16> %sub
}
@@ -230,9 +201,7 @@ define <8 x i16> @test_mask_subs_epu16_r
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %cmp = icmp ugt <8 x i16> %a, %b
- %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
- %sub = sub <8 x i16> %sel, %b
+ %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
%bc = bitcast i8 %mask to <8 x i1>
%res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> %passThru
ret <8 x i16> %res
@@ -245,9 +214,7 @@ define <8 x i16> @test_mask_subs_epu16_r
; CHECK-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %cmp = icmp ugt <8 x i16> %a, %b
- %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
- %sub = sub <8 x i16> %sel, %b
+ %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
%bc = bitcast i8 %mask to <8 x i1>
%res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> zeroinitializer
ret <8 x i16> %res
@@ -258,11 +225,10 @@ define <16 x i16> @test_mask_subs_epu16_
; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <16 x i16> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
- %sub = sub <16 x i16> %sel, %b
+ %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
ret <16 x i16> %sub
}
+declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
define <16 x i16> @test_mask_subs_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rrk_256:
@@ -271,9 +237,7 @@ define <16 x i16> @test_mask_subs_epu16_
; CHECK-NEXT: vpsubusw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd9,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <16 x i16> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
- %sub = sub <16 x i16> %sel, %b
+ %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
%bc = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> %passThru
ret <16 x i16> %res
@@ -285,9 +249,7 @@ define <16 x i16> @test_mask_subs_epu16_
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <16 x i16> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
- %sub = sub <16 x i16> %sel, %b
+ %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
%bc = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> zeroinitializer
ret <16 x i16> %res
@@ -299,9 +261,7 @@ define <16 x i16> @test_mask_subs_epu16_
; CHECK-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %cmp = icmp ugt <16 x i16> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
- %sub = sub <16 x i16> %sel, %b
+ %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
ret <16 x i16> %sub
}
@@ -313,9 +273,7 @@ define <16 x i16> @test_mask_subs_epu16_
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %cmp = icmp ugt <16 x i16> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
- %sub = sub <16 x i16> %sel, %b
+ %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
%bc = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> %passThru
ret <16 x i16> %res
@@ -328,9 +286,7 @@ define <16 x i16> @test_mask_subs_epu16_
; CHECK-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %cmp = icmp ugt <16 x i16> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
- %sub = sub <16 x i16> %sel, %b
+ %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
%bc = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> zeroinitializer
ret <16 x i16> %res
@@ -341,11 +297,10 @@ define <16 x i8> @test_mask_adds_epu8_rr
; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <16 x i8> %a, %b
- %2 = icmp ugt <16 x i8> %a, %1
- %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
- ret <16 x i8> %3
+ %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %1
}
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
define <16 x i8> @test_mask_adds_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rrk_128:
@@ -354,12 +309,10 @@ define <16 x i8> @test_mask_adds_epu8_rr
; CHECK-NEXT: vpaddusb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdc,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <16 x i8> %a, %b
- %2 = icmp ugt <16 x i8> %a, %1
- %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
- %4 = bitcast i16 %mask to <16 x i1>
- %5 = select <16 x i1> %4, <16 x i8> %3, <16 x i8> %passThru
- ret <16 x i8> %5
+ %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+ ret <16 x i8> %3
}
define <16 x i8> @test_mask_adds_epu8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
@@ -368,12 +321,10 @@ define <16 x i8> @test_mask_adds_epu8_rr
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <16 x i8> %a, %b
- %2 = icmp ugt <16 x i8> %a, %1
- %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
- %4 = bitcast i16 %mask to <16 x i1>
- %5 = select <16 x i1> %4, <16 x i8> %3, <16 x i8> zeroinitializer
- ret <16 x i8> %5
+ %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+ ret <16 x i8> %3
}
define <16 x i8> @test_mask_adds_epu8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
@@ -382,10 +333,8 @@ define <16 x i8> @test_mask_adds_epu8_rm
; CHECK-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
- %1 = add <16 x i8> %a, %b
- %2 = icmp ugt <16 x i8> %a, %1
- %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
- ret <16 x i8> %3
+ %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %1
}
define <16 x i8> @test_mask_adds_epu8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
@@ -396,12 +345,10 @@ define <16 x i8> @test_mask_adds_epu8_rm
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
- %1 = add <16 x i8> %a, %b
- %2 = icmp ugt <16 x i8> %a, %1
- %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
- %4 = bitcast i16 %mask to <16 x i1>
- %5 = select <16 x i1> %4, <16 x i8> %3, <16 x i8> %passThru
- ret <16 x i8> %5
+ %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+ ret <16 x i8> %3
}
define <16 x i8> @test_mask_adds_epu8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
@@ -411,12 +358,10 @@ define <16 x i8> @test_mask_adds_epu8_rm
; CHECK-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdc,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
- %1 = add <16 x i8> %a, %b
- %2 = icmp ugt <16 x i8> %a, %1
- %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
- %4 = bitcast i16 %mask to <16 x i1>
- %5 = select <16 x i1> %4, <16 x i8> %3, <16 x i8> zeroinitializer
- ret <16 x i8> %5
+ %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+ ret <16 x i8> %3
}
define <32 x i8> @test_mask_adds_epu8_rr_256(<32 x i8> %a, <32 x i8> %b) {
@@ -424,11 +369,10 @@ define <32 x i8> @test_mask_adds_epu8_rr
; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <32 x i8> %a, %b
- %2 = icmp ugt <32 x i8> %a, %1
- %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
- ret <32 x i8> %3
+ %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+ ret <32 x i8> %1
}
+declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>)
define <32 x i8> @test_mask_adds_epu8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rrk_256:
@@ -437,12 +381,10 @@ define <32 x i8> @test_mask_adds_epu8_rr
; CHECK-NEXT: vpaddusb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdc,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <32 x i8> %a, %b
- %2 = icmp ugt <32 x i8> %a, %1
- %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
- %4 = bitcast i32 %mask to <32 x i1>
- %5 = select <32 x i1> %4, <32 x i8> %3, <32 x i8> %passThru
- ret <32 x i8> %5
+ %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+ ret <32 x i8> %3
}
define <32 x i8> @test_mask_adds_epu8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
@@ -451,12 +393,10 @@ define <32 x i8> @test_mask_adds_epu8_rr
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %1 = add <32 x i8> %a, %b
- %2 = icmp ugt <32 x i8> %a, %1
- %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
- %4 = bitcast i32 %mask to <32 x i1>
- %5 = select <32 x i1> %4, <32 x i8> %3, <32 x i8> zeroinitializer
- ret <32 x i8> %5
+ %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+ ret <32 x i8> %3
}
define <32 x i8> @test_mask_adds_epu8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
@@ -465,10 +405,8 @@ define <32 x i8> @test_mask_adds_epu8_rm
; CHECK-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdc,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i8>, <32 x i8>* %ptr_b
- %1 = add <32 x i8> %a, %b
- %2 = icmp ugt <32 x i8> %a, %1
- %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
- ret <32 x i8> %3
+ %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+ ret <32 x i8> %1
}
define <32 x i8> @test_mask_adds_epu8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
@@ -479,12 +417,10 @@ define <32 x i8> @test_mask_adds_epu8_rm
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i8>, <32 x i8>* %ptr_b
- %1 = add <32 x i8> %a, %b
- %2 = icmp ugt <32 x i8> %a, %1
- %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
- %4 = bitcast i32 %mask to <32 x i1>
- %5 = select <32 x i1> %4, <32 x i8> %3, <32 x i8> %passThru
- ret <32 x i8> %5
+ %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+ ret <32 x i8> %3
}
define <32 x i8> @test_mask_adds_epu8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
@@ -494,12 +430,10 @@ define <32 x i8> @test_mask_adds_epu8_rm
; CHECK-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdc,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i8>, <32 x i8>* %ptr_b
- %1 = add <32 x i8> %a, %b
- %2 = icmp ugt <32 x i8> %a, %1
- %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
- %4 = bitcast i32 %mask to <32 x i1>
- %5 = select <32 x i1> %4, <32 x i8> %3, <32 x i8> zeroinitializer
- ret <32 x i8> %5
+ %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+ ret <32 x i8> %3
}
define <16 x i8> @test_mask_subs_epu8_rr_128(<16 x i8> %a, <16 x i8> %b) {
@@ -507,11 +441,10 @@ define <16 x i8> @test_mask_subs_epu8_rr
; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <16 x i8> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
- %sub = sub <16 x i8> %sel, %b
+ %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
ret <16 x i8> %sub
}
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
define <16 x i8> @test_mask_subs_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rrk_128:
@@ -520,9 +453,7 @@ define <16 x i8> @test_mask_subs_epu8_rr
; CHECK-NEXT: vpsubusb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd8,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <16 x i8> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
- %sub = sub <16 x i8> %sel, %b
+ %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
%bc = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> %passThru
ret <16 x i8> %res
@@ -534,9 +465,7 @@ define <16 x i8> @test_mask_subs_epu8_rr
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <16 x i8> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
- %sub = sub <16 x i8> %sel, %b
+ %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
%bc = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> zeroinitializer
ret <16 x i8> %res
@@ -548,9 +477,7 @@ define <16 x i8> @test_mask_subs_epu8_rm
; CHECK-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
- %cmp = icmp ugt <16 x i8> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
- %sub = sub <16 x i8> %sel, %b
+ %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
ret <16 x i8> %sub
}
@@ -562,9 +489,7 @@ define <16 x i8> @test_mask_subs_epu8_rm
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
- %cmp = icmp ugt <16 x i8> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
- %sub = sub <16 x i8> %sel, %b
+ %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
%bc = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> %passThru
ret <16 x i8> %res
@@ -577,9 +502,7 @@ define <16 x i8> @test_mask_subs_epu8_rm
; CHECK-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
- %cmp = icmp ugt <16 x i8> %a, %b
- %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
- %sub = sub <16 x i8> %sel, %b
+ %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
%bc = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> zeroinitializer
ret <16 x i8> %res
@@ -590,11 +513,10 @@ define <32 x i8> @test_mask_subs_epu8_rr
; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <32 x i8> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
- %sub = sub <32 x i8> %sel, %b
+ %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
ret <32 x i8> %sub
}
+declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>)
define <32 x i8> @test_mask_subs_epu8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rrk_256:
@@ -603,9 +525,7 @@ define <32 x i8> @test_mask_subs_epu8_rr
; CHECK-NEXT: vpsubusb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd8,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <32 x i8> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
- %sub = sub <32 x i8> %sel, %b
+ %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
%bc = bitcast i32 %mask to <32 x i1>
%res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> %passThru
ret <32 x i8> %res
@@ -617,9 +537,7 @@ define <32 x i8> @test_mask_subs_epu8_rr
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %cmp = icmp ugt <32 x i8> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
- %sub = sub <32 x i8> %sel, %b
+ %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
%bc = bitcast i32 %mask to <32 x i1>
%res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> zeroinitializer
ret <32 x i8> %res
@@ -631,9 +549,7 @@ define <32 x i8> @test_mask_subs_epu8_rm
; CHECK-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i8>, <32 x i8>* %ptr_b
- %cmp = icmp ugt <32 x i8> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
- %sub = sub <32 x i8> %sel, %b
+ %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
ret <32 x i8> %sub
}
@@ -645,9 +561,7 @@ define <32 x i8> @test_mask_subs_epu8_rm
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i8>, <32 x i8>* %ptr_b
- %cmp = icmp ugt <32 x i8> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
- %sub = sub <32 x i8> %sel, %b
+ %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
%bc = bitcast i32 %mask to <32 x i1>
%res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> %passThru
ret <32 x i8> %res
@@ -660,9 +574,7 @@ define <32 x i8> @test_mask_subs_epu8_rm
; CHECK-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i8>, <32 x i8>* %ptr_b
- %cmp = icmp ugt <32 x i8> %a, %b
- %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
- %sub = sub <32 x i8> %sel, %b
+ %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
%bc = bitcast i32 %mask to <32 x i1>
%res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> zeroinitializer
ret <32 x i8> %res
Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll?rev=349630&r1=349629&r2=349630&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll Wed Dec 19 06:43:36 2018
@@ -197,12 +197,11 @@ define <2 x i64> @test_mm_adds_epu8(<2 x
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
- %1 = add <16 x i8> %arg0, %arg1
- %2 = icmp ugt <16 x i8> %arg0, %1
- %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
- %bc = bitcast <16 x i8> %3 to <2 x i64>
+ %res = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
+ %bc = bitcast <16 x i8> %res to <2 x i64>
ret <2 x i64> %bc
}
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
define <2 x i64> @test_mm_adds_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_adds_epu16:
@@ -221,12 +220,11 @@ define <2 x i64> @test_mm_adds_epu16(<2
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
%arg1 = bitcast <2 x i64> %a1 to <8 x i16>
- %1 = add <8 x i16> %arg0, %arg1
- %2 = icmp ugt <8 x i16> %arg0, %1
- %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
- %bc = bitcast <8 x i16> %3 to <2 x i64>
+ %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
+ %bc = bitcast <8 x i16> %res to <2 x i64>
ret <2 x i64> %bc
}
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
define <2 x double> @test_mm_and_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_and_pd:
@@ -6208,76 +6206,48 @@ declare <8 x i16> @llvm.x86.sse2.psubs.w
define <2 x i64> @test_mm_subs_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_subs_epu8:
; SSE: # %bb.0:
-; SSE-NEXT: pmaxub %xmm1, %xmm0 # encoding: [0x66,0x0f,0xde,0xc1]
-; SSE-NEXT: psubb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf8,0xc1]
+; SSE-NEXT: psubusb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd8,0xc1]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_subs_epu8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xde,0xc1]
-; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf8,0xc1]
+; AVX1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd8,0xc1]
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_subs_epu8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xde,0xc1]
-; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf8,0xc1]
+; AVX512-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
- %cmp = icmp ugt <16 x i8> %arg0, %arg1
- %sel = select <16 x i1> %cmp, <16 x i8> %arg0, <16 x i8> %arg1
- %sub = sub <16 x i8> %sel, %arg1
- %bc = bitcast <16 x i8> %sub to <2 x i64>
+ %res = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
+ %bc = bitcast <16 x i8> %res to <2 x i64>
ret <2 x i64> %bc
}
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
define <2 x i64> @test_mm_subs_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X86-SSE-LABEL: test_mm_subs_epu16:
-; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X86-SSE-NEXT: # encoding: [0x66,0x0f,0x6f,0x15,A,A,A,A]
-; X86-SSE-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-SSE-NEXT: movdqa %xmm1, %xmm3 # encoding: [0x66,0x0f,0x6f,0xd9]
-; X86-SSE-NEXT: pxor %xmm2, %xmm3 # encoding: [0x66,0x0f,0xef,0xda]
-; X86-SSE-NEXT: pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
-; X86-SSE-NEXT: pmaxsw %xmm3, %xmm0 # encoding: [0x66,0x0f,0xee,0xc3]
-; X86-SSE-NEXT: pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
-; X86-SSE-NEXT: psubw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf9,0xc1]
-; X86-SSE-NEXT: retl # encoding: [0xc3]
+; SSE-LABEL: test_mm_subs_epu16:
+; SSE: # %bb.0:
+; SSE-NEXT: psubusw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd9,0xc1]
+; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_subs_epu16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
-; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf9,0xc1]
+; AVX1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd9,0xc1]
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_subs_epu16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
-; AVX512-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf9,0xc1]
+; AVX512-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
-;
-; X64-SSE-LABEL: test_mm_subs_epu16:
-; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-SSE-NEXT: # encoding: [0x66,0x0f,0x6f,0x15,A,A,A,A]
-; X64-SSE-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
-; X64-SSE-NEXT: movdqa %xmm1, %xmm3 # encoding: [0x66,0x0f,0x6f,0xd9]
-; X64-SSE-NEXT: pxor %xmm2, %xmm3 # encoding: [0x66,0x0f,0xef,0xda]
-; X64-SSE-NEXT: pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
-; X64-SSE-NEXT: pmaxsw %xmm3, %xmm0 # encoding: [0x66,0x0f,0xee,0xc3]
-; X64-SSE-NEXT: pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
-; X64-SSE-NEXT: psubw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf9,0xc1]
-; X64-SSE-NEXT: retq # encoding: [0xc3]
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
%arg1 = bitcast <2 x i64> %a1 to <8 x i16>
- %cmp = icmp ugt <8 x i16> %arg0, %arg1
- %sel = select <8 x i1> %cmp, <8 x i16> %arg0, <8 x i16> %arg1
- %sub = sub <8 x i16> %sel, %arg1
- %bc = bitcast <8 x i16> %sub to <2 x i64>
+ %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
+ %bc = bitcast <8 x i16> %res to <2 x i64>
ret <2 x i64> %bc
}
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
define i32 @test_mm_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_ucomieq_sd:
More information about the llvm-commits
mailing list