[llvm] r347328 - [ConstantFolding] Add support for saturating add/sub

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 20 09:05:55 PST 2018


Author: spatel
Date: Tue Nov 20 09:05:55 2018
New Revision: 347328

URL: http://llvm.org/viewvc/llvm-project?rev=347328&view=rev
Log:
[ConstantFolding] Add support for saturating add/sub

Support saturating add/sub in constant folding, based on the APInt methods introduced in D54332.

Patch by: @nikic (Nikita Popov)

Differential Revision: https://reviews.llvm.org/D54531

Added:
    llvm/trunk/test/Analysis/ConstantFolding/saturating-add-sub.ll
Modified:
    llvm/trunk/lib/Analysis/ConstantFolding.cpp

Modified: llvm/trunk/lib/Analysis/ConstantFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ConstantFolding.cpp?rev=347328&r1=347327&r2=347328&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ConstantFolding.cpp (original)
+++ llvm/trunk/lib/Analysis/ConstantFolding.cpp Tue Nov 20 09:05:55 2018
@@ -1399,6 +1399,10 @@ bool llvm::canConstantFoldCallTo(Immutab
   case Intrinsic::usub_with_overflow:
   case Intrinsic::smul_with_overflow:
   case Intrinsic::umul_with_overflow:
+  case Intrinsic::sadd_sat:
+  case Intrinsic::uadd_sat:
+  case Intrinsic::ssub_sat:
+  case Intrinsic::usub_sat:
   case Intrinsic::convert_from_fp16:
   case Intrinsic::convert_to_fp16:
   case Intrinsic::bitreverse:
@@ -2019,6 +2023,14 @@ Constant *ConstantFoldScalarCall(StringR
           };
           return ConstantStruct::get(cast<StructType>(Ty), Ops);
         }
+        case Intrinsic::uadd_sat:
+          return ConstantInt::get(Ty, Op1->getValue().uadd_sat(Op2->getValue()));
+        case Intrinsic::sadd_sat:
+          return ConstantInt::get(Ty, Op1->getValue().sadd_sat(Op2->getValue()));
+        case Intrinsic::usub_sat:
+          return ConstantInt::get(Ty, Op1->getValue().usub_sat(Op2->getValue()));
+        case Intrinsic::ssub_sat:
+          return ConstantInt::get(Ty, Op1->getValue().ssub_sat(Op2->getValue()));
         case Intrinsic::cttz:
           if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
             return UndefValue::get(Ty);

Added: llvm/trunk/test/Analysis/ConstantFolding/saturating-add-sub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ConstantFolding/saturating-add-sub.ll?rev=347328&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/ConstantFolding/saturating-add-sub.ll (added)
+++ llvm/trunk/test/Analysis/ConstantFolding/saturating-add-sub.ll Tue Nov 20 09:05:55 2018
@@ -0,0 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -constprop -S | FileCheck %s
+
+declare void @dummy(i8)
+declare void @dummy_vec(<2 x i8>)
+
+declare i8 @llvm.uadd.sat.i8(i8, i8)
+declare i8 @llvm.sadd.sat.i8(i8, i8)
+declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
+declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>)
+
+declare i8 @llvm.usub.sat.i8(i8, i8)
+declare i8 @llvm.ssub.sat.i8(i8, i8)
+declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>)
+declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>)
+
+define void @test_add_scalar() {
+; CHECK-LABEL: @test_add_scalar(
+; CHECK-NEXT:    call void @dummy(i8 30)
+; CHECK-NEXT:    call void @dummy(i8 -1)
+; CHECK-NEXT:    call void @dummy(i8 -10)
+; CHECK-NEXT:    call void @dummy(i8 127)
+; CHECK-NEXT:    call void @dummy(i8 -128)
+; CHECK-NEXT:    ret void
+;
+  %x1 = call i8 @llvm.uadd.sat.i8(i8 10, i8 20)
+  call void @dummy(i8 %x1)
+  %x2 = call i8 @llvm.uadd.sat.i8(i8 250, i8 100)
+  call void @dummy(i8 %x2)
+
+  %y1 = call i8 @llvm.sadd.sat.i8(i8 10, i8 -20)
+  call void @dummy(i8 %y1)
+  %y2 = call i8 @llvm.sadd.sat.i8(i8 120, i8 10)
+  call void @dummy(i8 %y2)
+  %y3 = call i8 @llvm.sadd.sat.i8(i8 -120, i8 -10)
+  call void @dummy(i8 %y3)
+
+  ret void
+}
+
+define void @test_add_vector(<2 x i8> %a) {
+; CHECK-LABEL: @test_add_vector(
+; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 20, i8 30>)
+; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 -1, i8 -1>)
+; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 -10, i8 -30>)
+; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 127, i8 127>)
+; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 -128, i8 -128>)
+; CHECK-NEXT:    ret void
+;
+  %x1 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 15>, <2 x i8> <i8 10, i8 15>)
+  call void @dummy_vec(<2 x i8> %x1)
+  %x2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 100, i8 200>, <2 x i8> <i8 250, i8 100>)
+  call void @dummy_vec(<2 x i8> %x2)
+
+  %y1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 10, i8 -15>, <2 x i8> <i8 -20, i8 -15>)
+  call void @dummy_vec(<2 x i8> %y1)
+  %y2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 100, i8 10>, <2 x i8> <i8 30, i8 120>)
+  call void @dummy_vec(<2 x i8> %y2)
+  %y3 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 -100, i8 -10>, <2 x i8> <i8 -30, i8 -120>)
+  call void @dummy_vec(<2 x i8> %y3)
+
+  ret void
+}
+
+define void @test_usub_ssub_scalar() {
+; CHECK-LABEL: @test_usub_ssub_scalar(
+; CHECK-NEXT:    call void @dummy(i8 10)
+; CHECK-NEXT:    call void @dummy(i8 0)
+; CHECK-NEXT:    call void @dummy(i8 -30)
+; CHECK-NEXT:    call void @dummy(i8 127)
+; CHECK-NEXT:    call void @dummy(i8 -128)
+; CHECK-NEXT:    ret void
+;
+  %x1 = call i8 @llvm.usub.sat.i8(i8 20, i8 10)
+  call void @dummy(i8 %x1)
+  %x2 = call i8 @llvm.usub.sat.i8(i8 200, i8 250)
+  call void @dummy(i8 %x2)
+
+  %y1 = call i8 @llvm.ssub.sat.i8(i8 -10, i8 20)
+  call void @dummy(i8 %y1)
+  %y2 = call i8 @llvm.ssub.sat.i8(i8 120, i8 -10)
+  call void @dummy(i8 %y2)
+  %y3 = call i8 @llvm.ssub.sat.i8(i8 -120, i8 10)
+  call void @dummy(i8 %y3)
+
+  ret void
+}
+
+define void @test_sub_vector(<2 x i8> %a) {
+; CHECK-LABEL: @test_sub_vector(
+; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 10, i8 5>)
+; CHECK-NEXT:    call void @dummy_vec(<2 x i8> zeroinitializer)
+; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 30, i8 0>)
+; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 127, i8 127>)
+; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 -128, i8 -128>)
+; CHECK-NEXT:    ret void
+;
+  %x1 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 20, i8 15>, <2 x i8> <i8 10, i8 10>)
+  call void @dummy_vec(<2 x i8> %x1)
+  %x2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 100, i8 200>, <2 x i8> <i8 150, i8 250>)
+  call void @dummy_vec(<2 x i8> %x2)
+
+  %y1 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 10, i8 -15>, <2 x i8> <i8 -20, i8 -15>)
+  call void @dummy_vec(<2 x i8> %y1)
+  %y2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 100, i8 10>, <2 x i8> <i8 -30, i8 -120>)
+  call void @dummy_vec(<2 x i8> %y2)
+  %y3 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 -100, i8 -10>, <2 x i8> <i8 30, i8 120>)
+  call void @dummy_vec(<2 x i8> %y3)
+
+  ret void
+}




More information about the llvm-commits mailing list