[llvm] r347700 - [InstCombine] Add tests for saturating add/sub; NFC

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 27 11:52:57 PST 2018


Author: nikic
Date: Tue Nov 27 11:52:56 2018
New Revision: 347700

URL: http://llvm.org/viewvc/llvm-project?rev=347700&view=rev
Log:
[InstCombine] Add tests for saturating add/sub; NFC

These are baseline tests for D54534.

Added:
    llvm/trunk/test/Transforms/InstCombine/saturating-add-sub.ll

Added: llvm/trunk/test/Transforms/InstCombine/saturating-add-sub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/saturating-add-sub.ll?rev=347700&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/saturating-add-sub.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/saturating-add-sub.ll Tue Nov 27 11:52:56 2018
@@ -0,0 +1,669 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+;
+; Saturating addition.
+;
+
+declare i8 @llvm.uadd.sat.i8(i8, i8)
+declare i8 @llvm.sadd.sat.i8(i8, i8)
+declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
+declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>)
+
+; Constant uadd argument is canonicalized to the right.
+define i8 @test_scalar_uadd_canonical(i8 %a) {
+; CHECK-LABEL: @test_scalar_uadd_canonical(
+; CHECK-NEXT:    [[X:%.*]] = call i8 @llvm.uadd.sat.i8(i8 10, i8 [[A:%.*]])
+; CHECK-NEXT:    ret i8 [[X]]
+;
+  %x = call i8 @llvm.uadd.sat.i8(i8 10, i8 %a)
+  ret i8 %x
+}
+
+define <2 x i8> @test_vector_uadd_canonical(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_uadd_canonical(
+; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 20>, <2 x i8> [[A:%.*]])
+; CHECK-NEXT:    ret <2 x i8> [[X]]
+;
+  %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 20>, <2 x i8> %a)
+  ret <2 x i8> %x
+}
+
+; Constant sadd argument is canonicalized to the right.
+define i8 @test_scalar_sadd_canonical(i8 %a) {
+; CHECK-LABEL: @test_scalar_sadd_canonical(
+; CHECK-NEXT:    [[X:%.*]] = call i8 @llvm.sadd.sat.i8(i8 -10, i8 [[A:%.*]])
+; CHECK-NEXT:    ret i8 [[X]]
+;
+  %x = call i8 @llvm.sadd.sat.i8(i8 -10, i8 %a)
+  ret i8 %x
+}
+
+define <2 x i8> @test_vector_sadd_canonical(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_sadd_canonical(
+; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 10, i8 -20>, <2 x i8> [[A:%.*]])
+; CHECK-NEXT:    ret <2 x i8> [[X]]
+;
+  %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 10, i8 -20>, <2 x i8> %a)
+  ret <2 x i8> %x
+}
+
+; Can combine uadds with constant operands.
+define i8 @test_scalar_uadd_combine(i8 %a) {
+; CHECK-LABEL: @test_scalar_uadd_combine(
+; CHECK-NEXT:    [[X1:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    [[X2:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[X1]], i8 20)
+; CHECK-NEXT:    ret i8 [[X2]]
+;
+  %x1 = call i8 @llvm.uadd.sat.i8(i8 %a, i8 10)
+  %x2 = call i8 @llvm.uadd.sat.i8(i8 %x1, i8 20)
+  ret i8 %x2
+}
+
+define <2 x i8> @test_vector_uadd_combine(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_uadd_combine(
+; CHECK-NEXT:    [[X1:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 10>)
+; CHECK-NEXT:    [[X2:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X1]], <2 x i8> <i8 20, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[X2]]
+;
+  %x1 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
+  %x2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 20, i8 20>)
+  ret <2 x i8> %x2
+}
+
+; This could simplify, but currently doesn't.
+define <2 x i8> @test_vector_uadd_combine_non_splat(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_uadd_combine_non_splat(
+; CHECK-NEXT:    [[X1:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    [[X2:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X1]], <2 x i8> <i8 30, i8 40>)
+; CHECK-NEXT:    ret <2 x i8> [[X2]]
+;
+  %x1 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 20>)
+  %x2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 30, i8 40>)
+  ret <2 x i8> %x2
+}
+
+; Can combine uadds even if they overflow.
+define i8 @test_scalar_uadd_overflow(i8 %a) {
+; CHECK-LABEL: @test_scalar_uadd_overflow(
+; CHECK-NEXT:    [[Y1:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 100)
+; CHECK-NEXT:    [[Y2:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[Y1]], i8 -56)
+; CHECK-NEXT:    ret i8 [[Y2]]
+;
+  %y1 = call i8 @llvm.uadd.sat.i8(i8 %a, i8 100)
+  %y2 = call i8 @llvm.uadd.sat.i8(i8 %y1, i8 200)
+  ret i8 %y2
+}
+
+define <2 x i8> @test_vector_uadd_overflow(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_uadd_overflow(
+; CHECK-NEXT:    [[Y1:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 100, i8 100>)
+; CHECK-NEXT:    [[Y2:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[Y1]], <2 x i8> <i8 -56, i8 -56>)
+; CHECK-NEXT:    ret <2 x i8> [[Y2]]
+;
+  %y1 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 100, i8 100>)
+  %y2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %y1, <2 x i8> <i8 200, i8 200>)
+  ret <2 x i8> %y2
+}
+
+; Can combine sadds if sign matches.
+define i8 @test_scalar_sadd_both_positive(i8 %a) {
+; CHECK-LABEL: @test_scalar_sadd_both_positive(
+; CHECK-NEXT:    [[Z1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    [[Z2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[Z1]], i8 20)
+; CHECK-NEXT:    ret i8 [[Z2]]
+;
+  %z1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 10)
+  %z2 = call i8 @llvm.sadd.sat.i8(i8 %z1, i8 20)
+  ret i8 %z2
+}
+
+define <2 x i8> @test_vector_sadd_both_positive(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_sadd_both_positive(
+; CHECK-NEXT:    [[Z1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 10>)
+; CHECK-NEXT:    [[Z2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[Z1]], <2 x i8> <i8 20, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[Z2]]
+;
+  %z1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
+  %z2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %z1, <2 x i8> <i8 20, i8 20>)
+  ret <2 x i8> %z2
+}
+
+define i8 @test_scalar_sadd_both_negative(i8 %a) {
+; CHECK-LABEL: @test_scalar_sadd_both_negative(
+; CHECK-NEXT:    [[U1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
+; CHECK-NEXT:    [[U2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[U1]], i8 -20)
+; CHECK-NEXT:    ret i8 [[U2]]
+;
+  %u1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 -10)
+  %u2 = call i8 @llvm.sadd.sat.i8(i8 %u1, i8 -20)
+  ret i8 %u2
+}
+
+define <2 x i8> @test_vector_sadd_both_negative(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_sadd_both_negative(
+; CHECK-NEXT:    [[U1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -10, i8 -10>)
+; CHECK-NEXT:    [[U2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[U1]], <2 x i8> <i8 -20, i8 -20>)
+; CHECK-NEXT:    ret <2 x i8> [[U2]]
+;
+  %u1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -10, i8 -10>)
+  %u2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %u1, <2 x i8> <i8 -20, i8 -20>)
+  ret <2 x i8> %u2
+}
+
+; Can't combine sadds if constants have different sign.
+define i8 @test_scalar_sadd_different_sign(i8 %a) {
+; CHECK-LABEL: @test_scalar_sadd_different_sign(
+; CHECK-NEXT:    [[V1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    [[V2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[V1]], i8 -20)
+; CHECK-NEXT:    ret i8 [[V2]]
+;
+  %v1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 10)
+  %v2 = call i8 @llvm.sadd.sat.i8(i8 %v1, i8 -20)
+  ret i8 %v2
+}
+
+; Can't combine sadds if they overflow.
+define i8 @test_scalar_sadd_overflow(i8 %a) {
+; CHECK-LABEL: @test_scalar_sadd_overflow(
+; CHECK-NEXT:    [[W1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 100)
+; CHECK-NEXT:    [[W2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[W1]], i8 100)
+; CHECK-NEXT:    ret i8 [[W2]]
+;
+  %w1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 100)
+  %w2 = call i8 @llvm.sadd.sat.i8(i8 %w1, i8 100)
+  ret i8 %w2
+}
+
+; neg uadd neg always overflows.
+define i8 @test_scalar_uadd_neg_neg(i8 %a) {
+; CHECK-LABEL: @test_scalar_uadd_neg_neg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A_NEG]], i8 -10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_neg = or i8 %a, -128
+  %r = call i8 @llvm.uadd.sat.i8(i8 %a_neg, i8 -10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_uadd_neg_neg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_uadd_neg_neg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 -10, i8 -20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
+  %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 -10, i8 -20>)
+  ret <2 x i8> %r
+}
+
+; nneg uadd nneg never overflows.
+define i8 @test_scalar_uadd_nneg_nneg(i8 %a) {
+; CHECK-LABEL: @test_scalar_uadd_nneg_nneg(
+; CHECK-NEXT:    [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A_NNEG]], i8 10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_nneg = and i8 %a, 127
+  %r = call i8 @llvm.uadd.sat.i8(i8 %a_nneg, i8 10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_uadd_nneg_nneg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_uadd_nneg_nneg(
+; CHECK-NEXT:    [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A_NNEG]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
+  %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 10, i8 20>)
+  ret <2 x i8> %r
+}
+
+; neg uadd nneg might overflow.
+define i8 @test_scalar_uadd_neg_nneg(i8 %a) {
+; CHECK-LABEL: @test_scalar_uadd_neg_nneg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A_NEG]], i8 10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_neg = or i8 %a, -128
+  %r = call i8 @llvm.uadd.sat.i8(i8 %a_neg, i8 10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_uadd_neg_nneg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_uadd_neg_nneg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
+  %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
+  ret <2 x i8> %r
+}
+
+; neg sadd nneg never overflows.
+define i8 @test_scalar_sadd_neg_nneg(i8 %a) {
+; CHECK-LABEL: @test_scalar_sadd_neg_nneg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A_NEG]], i8 10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_neg = or i8 %a, -128
+  %r = call i8 @llvm.sadd.sat.i8(i8 %a_neg, i8 10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_sadd_neg_nneg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_sadd_neg_nneg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
+  %r = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
+  ret <2 x i8> %r
+}
+
+; nneg sadd neg never overflows.
+define i8 @test_scalar_sadd_nneg_neg(i8 %a) {
+; CHECK-LABEL: @test_scalar_sadd_nneg_neg(
+; CHECK-NEXT:    [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A_NNEG]], i8 -10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_nneg = and i8 %a, 127
+  %r = call i8 @llvm.sadd.sat.i8(i8 %a_nneg, i8 -10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_sadd_nneg_neg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_sadd_nneg_neg(
+; CHECK-NEXT:    [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A_NNEG]], <2 x i8> <i8 -10, i8 -20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
+  %r = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 -10, i8 -20>)
+  ret <2 x i8> %r
+}
+
+; neg sadd neg might overflow.
+define i8 @test_scalar_sadd_neg_neg(i8 %a) {
+; CHECK-LABEL: @test_scalar_sadd_neg_neg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A_NEG]], i8 -10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_neg = or i8 %a, -128
+  %r = call i8 @llvm.sadd.sat.i8(i8 %a_neg, i8 -10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_sadd_neg_neg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_sadd_neg_neg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 -10, i8 -20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
+  %r = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 -10, i8 -20>)
+  ret <2 x i8> %r
+}
+
+;
+; Saturating subtraction.
+;
+
+declare i8 @llvm.usub.sat.i8(i8, i8)
+declare i8 @llvm.ssub.sat.i8(i8, i8)
+declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>)
+declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>)
+
+; Cannot canonicalize usub to uadd.
+define i8 @test_scalar_usub_canonical(i8 %a) {
+; CHECK-LABEL: @test_scalar_usub_canonical(
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %r = call i8 @llvm.usub.sat.i8(i8 %a, i8 10)
+  ret i8 %r
+}
+
+; Canonicalize ssub to sadd.
+define i8 @test_scalar_ssub_canonical(i8 %a) {
+; CHECK-LABEL: @test_scalar_ssub_canonical(
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %r = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_ssub_canonical(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_ssub_canonical(
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 10>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
+  ret <2 x i8> %r
+}
+
+; Canonicalization for non-splat constants is not supported yet.
+define <2 x i8> @test_vector_ssub_canonical_min_non_splat(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_ssub_canonical_min_non_splat(
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 -10>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 -10>)
+  ret <2 x i8> %r
+}
+
+; Cannot canonicalize signed min.
+define i8 @test_scalar_ssub_canonical_min(i8 %a) {
+; CHECK-LABEL: @test_scalar_ssub_canonical_min(
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 -128)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %r = call i8 @llvm.ssub.sat.i8(i8 %a, i8 -128)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_ssub_canonical_min(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_ssub_canonical_min(
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -128, i8 -10>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -128, i8 -10>)
+  ret <2 x i8> %r
+}
+
+; Can combine usubs with constant operands.
+define i8 @test_scalar_usub_combine(i8 %a) {
+; CHECK-LABEL: @test_scalar_usub_combine(
+; CHECK-NEXT:    [[X1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    [[X2:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[X1]], i8 20)
+; CHECK-NEXT:    ret i8 [[X2]]
+;
+  %x1 = call i8 @llvm.usub.sat.i8(i8 %a, i8 10)
+  %x2 = call i8 @llvm.usub.sat.i8(i8 %x1, i8 20)
+  ret i8 %x2
+}
+
+define <2 x i8> @test_vector_usub_combine(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_usub_combine(
+; CHECK-NEXT:    [[X1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 10>)
+; CHECK-NEXT:    [[X2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[X1]], <2 x i8> <i8 20, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[X2]]
+;
+  %x1 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
+  %x2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 20, i8 20>)
+  ret <2 x i8> %x2
+}
+
+; This could simplify, but currently doesn't.
+define <2 x i8> @test_vector_usub_combine_non_splat(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_usub_combine_non_splat(
+; CHECK-NEXT:    [[X1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    [[X2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[X1]], <2 x i8> <i8 30, i8 40>)
+; CHECK-NEXT:    ret <2 x i8> [[X2]]
+;
+  %x1 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 20>)
+  %x2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 30, i8 40>)
+  ret <2 x i8> %x2
+}
+
+; Can combine usubs even if they overflow.
+define i8 @test_scalar_usub_overflow(i8 %a) {
+; CHECK-LABEL: @test_scalar_usub_overflow(
+; CHECK-NEXT:    [[Y1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 100)
+; CHECK-NEXT:    [[Y2:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[Y1]], i8 -56)
+; CHECK-NEXT:    ret i8 [[Y2]]
+;
+  %y1 = call i8 @llvm.usub.sat.i8(i8 %a, i8 100)
+  %y2 = call i8 @llvm.usub.sat.i8(i8 %y1, i8 200)
+  ret i8 %y2
+}
+
+define <2 x i8> @test_vector_usub_overflow(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_usub_overflow(
+; CHECK-NEXT:    [[Y1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 100, i8 100>)
+; CHECK-NEXT:    [[Y2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[Y1]], <2 x i8> <i8 -56, i8 -56>)
+; CHECK-NEXT:    ret <2 x i8> [[Y2]]
+;
+  %y1 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 100, i8 100>)
+  %y2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %y1, <2 x i8> <i8 200, i8 200>)
+  ret <2 x i8> %y2
+}
+
+; Can combine ssubs if sign matches.
+define i8 @test_scalar_ssub_both_positive(i8 %a) {
+; CHECK-LABEL: @test_scalar_ssub_both_positive(
+; CHECK-NEXT:    [[Z1:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    [[Z2:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[Z1]], i8 20)
+; CHECK-NEXT:    ret i8 [[Z2]]
+;
+  %z1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
+  %z2 = call i8 @llvm.ssub.sat.i8(i8 %z1, i8 20)
+  ret i8 %z2
+}
+
+define <2 x i8> @test_vector_ssub_both_positive(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_ssub_both_positive(
+; CHECK-NEXT:    [[Z1:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 10>)
+; CHECK-NEXT:    [[Z2:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[Z1]], <2 x i8> <i8 20, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[Z2]]
+;
+  %z1 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
+  %z2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %z1, <2 x i8> <i8 20, i8 20>)
+  ret <2 x i8> %z2
+}
+
+define i8 @test_scalar_ssub_both_negative(i8 %a) {
+; CHECK-LABEL: @test_scalar_ssub_both_negative(
+; CHECK-NEXT:    [[U1:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 -10)
+; CHECK-NEXT:    [[U2:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[U1]], i8 -20)
+; CHECK-NEXT:    ret i8 [[U2]]
+;
+  %u1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 -10)
+  %u2 = call i8 @llvm.ssub.sat.i8(i8 %u1, i8 -20)
+  ret i8 %u2
+}
+
+define <2 x i8> @test_vector_ssub_both_negative(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_ssub_both_negative(
+; CHECK-NEXT:    [[U1:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -10, i8 -10>)
+; CHECK-NEXT:    [[U2:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[U1]], <2 x i8> <i8 -20, i8 -20>)
+; CHECK-NEXT:    ret <2 x i8> [[U2]]
+;
+  %u1 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -10, i8 -10>)
+  %u2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %u1, <2 x i8> <i8 -20, i8 -20>)
+  ret <2 x i8> %u2
+}
+
+; Can't combine ssubs if constants have different sign.
+define i8 @test_scalar_ssub_different_sign(i8 %a) {
+; CHECK-LABEL: @test_scalar_ssub_different_sign(
+; CHECK-NEXT:    [[V1:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    [[V2:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[V1]], i8 -20)
+; CHECK-NEXT:    ret i8 [[V2]]
+;
+  %v1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
+  %v2 = call i8 @llvm.ssub.sat.i8(i8 %v1, i8 -20)
+  ret i8 %v2
+}
+
+; Can combine sadd and ssub with appropriate signs.
+define i8 @test_scalar_sadd_ssub(i8 %a) {
+; CHECK-LABEL: @test_scalar_sadd_ssub(
+; CHECK-NEXT:    [[V1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 10, i8 [[A:%.*]])
+; CHECK-NEXT:    [[V2:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[V1]], i8 -20)
+; CHECK-NEXT:    ret i8 [[V2]]
+;
+  %v1 = call i8 @llvm.sadd.sat.i8(i8 10, i8 %a)
+  %v2 = call i8 @llvm.ssub.sat.i8(i8 %v1, i8 -20)
+  ret i8 %v2
+}
+
+define <2 x i8> @test_vector_sadd_ssub(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_sadd_ssub(
+; CHECK-NEXT:    [[V1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 -10, i8 -10>, <2 x i8> [[A:%.*]])
+; CHECK-NEXT:    [[V2:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[V1]], <2 x i8> <i8 20, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[V2]]
+;
+  %v1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 -10, i8 -10>, <2 x i8> %a)
+  %v2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %v1, <2 x i8> <i8 20, i8 20>)
+  ret <2 x i8> %v2
+}
+
+; Can't combine ssubs if they overflow.
+define i8 @test_scalar_ssub_overflow(i8 %a) {
+; CHECK-LABEL: @test_scalar_ssub_overflow(
+; CHECK-NEXT:    [[W1:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 100)
+; CHECK-NEXT:    [[W2:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[W1]], i8 100)
+; CHECK-NEXT:    ret i8 [[W2]]
+;
+  %w1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 100)
+  %w2 = call i8 @llvm.ssub.sat.i8(i8 %w1, i8 100)
+  ret i8 %w2
+}
+
+; nneg usub neg always overflows.
+define i8 @test_scalar_usub_nneg_neg(i8 %a) {
+; CHECK-LABEL: @test_scalar_usub_nneg_neg(
+; CHECK-NEXT:    [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A_NNEG]], i8 -10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_nneg = and i8 %a, 127
+  %r = call i8 @llvm.usub.sat.i8(i8 %a_nneg, i8 -10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_usub_nneg_neg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_usub_nneg_neg(
+; CHECK-NEXT:    [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A_NNEG]], <2 x i8> <i8 -10, i8 -20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
+  %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 -10, i8 -20>)
+  ret <2 x i8> %r
+}
+
+; neg usub nneg never overflows.
+define i8 @test_scalar_usub_neg_nneg(i8 %a) {
+; CHECK-LABEL: @test_scalar_usub_neg_nneg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A_NEG]], i8 10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_neg = or i8 %a, -128
+  %r = call i8 @llvm.usub.sat.i8(i8 %a_neg, i8 10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_usub_neg_nneg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_usub_neg_nneg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
+  %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
+  ret <2 x i8> %r
+}
+
+; nneg usub nneg never may overflow.
+define i8 @test_scalar_usub_nneg_nneg(i8 %a) {
+; CHECK-LABEL: @test_scalar_usub_nneg_nneg(
+; CHECK-NEXT:    [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A_NNEG]], i8 10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_nneg = and i8 %a, 127
+  %r = call i8 @llvm.usub.sat.i8(i8 %a_nneg, i8 10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_usub_nneg_nneg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_usub_nneg_nneg(
+; CHECK-NEXT:    [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A_NNEG]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
+  %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 10, i8 20>)
+  ret <2 x i8> %r
+}
+
+; neg ssub neg never overflows.
+define i8 @test_scalar_ssub_neg_neg(i8 %a) {
+; CHECK-LABEL: @test_scalar_ssub_neg_neg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A_NEG]], i8 -10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_neg = or i8 %a, -128
+  %r = call i8 @llvm.ssub.sat.i8(i8 %a_neg, i8 -10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_ssub_neg_neg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_ssub_neg_neg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 -10, i8 -20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
+  %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 -10, i8 -20>)
+  ret <2 x i8> %r
+}
+
+; nneg ssub nneg never overflows.
+define i8 @test_scalar_ssub_nneg_nneg(i8 %a) {
+; CHECK-LABEL: @test_scalar_ssub_nneg_nneg(
+; CHECK-NEXT:    [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A_NNEG]], i8 10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_nneg = and i8 %a, 127
+  %r = call i8 @llvm.ssub.sat.i8(i8 %a_nneg, i8 10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_ssub_nneg_nneg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_ssub_nneg_nneg(
+; CHECK-NEXT:    [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A_NNEG]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
+  %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 10, i8 20>)
+  ret <2 x i8> %r
+}
+
+; neg ssub nneg may overflow.
+define i8 @test_scalar_ssub_neg_nneg(i8 %a) {
+; CHECK-LABEL: @test_scalar_ssub_neg_nneg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A_NEG]], i8 10)
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a_neg = or i8 %a, -128
+  %r = call i8 @llvm.ssub.sat.i8(i8 %a_neg, i8 10)
+  ret i8 %r
+}
+
+define <2 x i8> @test_vector_ssub_neg_nneg(<2 x i8> %a) {
+; CHECK-LABEL: @test_vector_ssub_neg_nneg(
+; CHECK-NEXT:    [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
+  %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
+  ret <2 x i8> %r
+}




More information about the llvm-commits mailing list