[clang] [clang] Fix sub-integer __builtin_elementwise_(add|sub)_sat (PR #119423)

Fraser Cormack via cfe-commits cfe-commits at lists.llvm.org
Tue Dec 10 09:48:40 PST 2024


https://github.com/frasercrmck created https://github.com/llvm/llvm-project/pull/119423

These builtins would unconditionally perform the usual arithmetic conversions on promotable scalar integer arguments. This meant in practice that char and short arguments were promoted to int, and the operation was truncated back down afterwards. This in effect silently replaced a saturating add/sub with a regular add/sub, which is not intuitive (or intended) behaviour.

With this patch, promotable scalar integer types are not promoted to int, but are kept intact. If the types differ, the smaller integer is promoted to the larger one. The signedness of the operation matches the larger integer type.

No change is made to vector types, which are both not promoted and whose element types must match.

>From 98d796a81960b231ab02fd7aba78e33e5a176fee Mon Sep 17 00:00:00 2001
From: Fraser Cormack <fraser at codeplay.com>
Date: Tue, 10 Dec 2024 17:41:07 +0000
Subject: [PATCH] [clang] Fix sub-integer __builtin_elementwise_(add|sub)_sat

These builtins would unconditionally perform the usual arithmetic
conversions on promotable scalar integer arguments. This meant in
practice that char and short arguments were promoted to int, and the
operation was truncated back down afterwards. This in effect silently
replaced a saturating add/sub with a regular add/sub, which is not
intuitive (or intended) behaviour.

With this patch, promotable scalar integer types are not promoted to
int, but are kept intact. If the types differ, the smaller integer is
promoted to the larger one. The signedness of the operation matches the
larger integer type.

No change is made to vector types, which are both not promoted and whose
element types must match.
---
 clang/lib/Sema/SemaChecking.cpp               | 38 +++++++
 .../test/CodeGen/builtins-elementwise-math.c  | 98 ++++++++++++++++++-
 2 files changed, 134 insertions(+), 2 deletions(-)

diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index a248a6b53b0d06..6107eb854fb95e 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -2765,6 +2765,44 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
   // types only.
   case Builtin::BI__builtin_elementwise_add_sat:
   case Builtin::BI__builtin_elementwise_sub_sat: {
+    if (checkArgCount(TheCall, 2))
+      return ExprError();
+    ExprResult LHS = TheCall->getArg(0);
+    ExprResult RHS = TheCall->getArg(1);
+    QualType LHSType = LHS.get()->getType().getUnqualifiedType();
+    QualType RHSType = RHS.get()->getType().getUnqualifiedType();
+    // If both LHS/RHS are promotable integer types, do not perform the usual
+    // conversions - we must keep the saturating operation at the correct
+    // bitwidth.
+    if (Context.isPromotableIntegerType(LHSType) &&
+        Context.isPromotableIntegerType(RHSType)) {
+      // First, convert each argument to an r-value.
+      ExprResult ResLHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+      if (ResLHS.isInvalid())
+        return ExprError();
+      LHS = ResLHS.get();
+
+      ExprResult ResRHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+      if (ResRHS.isInvalid())
+        return ExprError();
+      RHS = ResRHS.get();
+
+      LHSType = LHS.get()->getType().getUnqualifiedType();
+      RHSType = RHS.get()->getType().getUnqualifiedType();
+
+      // If the two integer types are not of equal order, cast the smaller
+      // integer one to the larger one
+      if (int Order = Context.getIntegerTypeOrder(LHSType, RHSType); Order == 1)
+        RHS = ImpCastExprToType(RHS.get(), LHSType, CK_IntegralCast);
+      else if (Order == -1)
+        LHS = ImpCastExprToType(LHS.get(), RHSType, CK_IntegralCast);
+
+      TheCall->setArg(0, LHS.get());
+      TheCall->setArg(1, RHS.get());
+      TheCall->setType(LHS.get()->getType().getUnqualifiedType());
+      break;
+    }
+
     if (BuiltinElementwiseMath(TheCall))
       return ExprError();
 
diff --git a/clang/test/CodeGen/builtins-elementwise-math.c b/clang/test/CodeGen/builtins-elementwise-math.c
index 7f6b5f26eb9307..4ac6fe18c4d5a3 100644
--- a/clang/test/CodeGen/builtins-elementwise-math.c
+++ b/clang/test/CodeGen/builtins-elementwise-math.c
@@ -68,7 +68,10 @@ void test_builtin_elementwise_add_sat(float f1, float f2, double d1, double d2,
                                       long long int i2, si8 vi1, si8 vi2,
                                       unsigned u1, unsigned u2, u4 vu1, u4 vu2,
                                       _BitInt(31) bi1, _BitInt(31) bi2,
-                                      unsigned _BitInt(55) bu1, unsigned _BitInt(55) bu2) {
+                                      unsigned _BitInt(55) bu1, unsigned _BitInt(55) bu2,
+                                      char c1, char c2, unsigned char uc1,
+                                      unsigned char uc2, short s1, short s2,
+                                      unsigned short us1, unsigned short us2) {
   // CHECK:      [[I1:%.+]] = load i64, ptr %i1.addr, align 8
   // CHECK-NEXT: [[I2:%.+]] = load i64, ptr %i2.addr, align 8
   // CHECK-NEXT: call i64 @llvm.sadd.sat.i64(i64 [[I1]], i64 [[I2]])
@@ -114,6 +117,50 @@ void test_builtin_elementwise_add_sat(float f1, float f2, double d1, double d2,
 
   // CHECK: store i64 98, ptr %i1.addr, align 8
   i1 = __builtin_elementwise_add_sat(1, 'a');
+
+  // CHECK:      [[C1:%.+]] = load i8, ptr %c1.addr, align 1
+  // CHECK-NEXT: [[C2:%.+]] = load i8, ptr %c2.addr, align 1
+  // CHECK-NEXT: call i8 @llvm.sadd.sat.i8(i8 [[C1]], i8 [[C2]])
+  c1 = __builtin_elementwise_add_sat(c1, c2);
+
+  // CHECK:      [[UC1:%.+]] = load i8, ptr %uc1.addr, align 1
+  // CHECK-NEXT: [[UC2:%.+]] = load i8, ptr %uc2.addr, align 1
+  // CHECK-NEXT: call i8 @llvm.uadd.sat.i8(i8 [[UC1]], i8 [[UC2]])
+  uc1 = __builtin_elementwise_add_sat(uc1, uc2);
+
+  // CHECK:      [[S1:%.+]] = load i16, ptr %s1.addr, align 2
+  // CHECK-NEXT: [[S2:%.+]] = load i16, ptr %s2.addr, align 2
+  // CHECK-NEXT: call i16 @llvm.sadd.sat.i16(i16 [[S1]], i16 [[S2]])
+  s1 = __builtin_elementwise_add_sat(s1, s2);
+
+  // CHECK:      [[US1:%.+]] = load i16, ptr %us1.addr, align 2
+  // CHECK-NEXT: [[US2:%.+]] = load i16, ptr %us2.addr, align 2
+  // CHECK-NEXT: call i16 @llvm.uadd.sat.i16(i16 [[US1]], i16 [[US2]])
+  us1 = __builtin_elementwise_add_sat(us1, us2);
+
+  // CHECK:      [[C1:%.+]] = load i8, ptr %c1.addr, align 1
+  // CHECK-NEXT: [[C1EXT:%.+]] = sext i8 [[C1]] to i16
+  // CHECK-NEXT: [[S1:%.+]] = load i16, ptr %s1.addr, align 2
+  // CHECK-NEXT: call i16 @llvm.sadd.sat.i16(i16 [[C1EXT]], i16 [[S1]])
+  s1 = __builtin_elementwise_add_sat(c1, s1);
+
+  // CHECK:      [[S1:%.+]] = load i16, ptr %s1.addr, align 2
+  // CHECK-NEXT: [[C1:%.+]] = load i8, ptr %c1.addr, align 1
+  // CHECK-NEXT: [[C1EXT:%.+]] = sext i8 [[C1]] to i16
+  // CHECK-NEXT: call i16 @llvm.sadd.sat.i16(i16 [[S1]], i16 [[C1EXT]])
+  s1 = __builtin_elementwise_add_sat(s1, c1);
+
+  // CHECK:      [[UC1:%.+]] = load i8, ptr %uc1.addr, align 1
+  // CHECK-NEXT: [[UC1EXT:%.+]] = zext i8 [[UC1]] to i16
+  // CHECK-NEXT: [[S1:%.+]] = load i16, ptr %s1.addr, align 2
+  // CHECK-NEXT: call i16 @llvm.sadd.sat.i16(i16 [[UC1EXT]], i16 [[S1]])
+  s1 = __builtin_elementwise_add_sat(uc1, s1);
+
+  // CHECK:      [[US1:%.+]] = load i16, ptr %us1.addr, align 2
+  // CHECK-NEXT: [[C1:%.+]] = load i8, ptr %c1.addr, align 1
+  // CHECK-NEXT: [[C1EXT:%.+]] = sext i8 [[C1]] to i16
+  // CHECK-NEXT: call i16 @llvm.uadd.sat.i16(i16 [[US1]], i16 [[C1EXT]])
+  us1 = __builtin_elementwise_add_sat(us1, c1);
 }
 
 void test_builtin_elementwise_sub_sat(float f1, float f2, double d1, double d2,
@@ -121,7 +168,10 @@ void test_builtin_elementwise_sub_sat(float f1, float f2, double d1, double d2,
                                       long long int i2, si8 vi1, si8 vi2,
                                       unsigned u1, unsigned u2, u4 vu1, u4 vu2,
                                       _BitInt(31) bi1, _BitInt(31) bi2,
-                                      unsigned _BitInt(55) bu1, unsigned _BitInt(55) bu2) {
+                                      unsigned _BitInt(55) bu1, unsigned _BitInt(55) bu2,
+                                      char c1, char c2, unsigned char uc1,
+                                      unsigned char uc2, short s1, short s2,
+                                      unsigned short us1, unsigned short us2) {
   // CHECK:      [[I1:%.+]] = load i64, ptr %i1.addr, align 8
   // CHECK-NEXT: [[I2:%.+]] = load i64, ptr %i2.addr, align 8
   // CHECK-NEXT: call i64 @llvm.ssub.sat.i64(i64 [[I1]], i64 [[I2]])
@@ -167,6 +217,50 @@ void test_builtin_elementwise_sub_sat(float f1, float f2, double d1, double d2,
 
   // CHECK: store i64 -96, ptr %i1.addr, align 8
   i1 = __builtin_elementwise_sub_sat(1, 'a');
+
+  // CHECK:      [[C1:%.+]] = load i8, ptr %c1.addr, align 1
+  // CHECK-NEXT: [[C2:%.+]] = load i8, ptr %c2.addr, align 1
+  // CHECK-NEXT: call i8 @llvm.ssub.sat.i8(i8 [[C1]], i8 [[C2]])
+  c1 = __builtin_elementwise_sub_sat(c1, c2);
+
+  // CHECK:      [[UC1:%.+]] = load i8, ptr %uc1.addr, align 1
+  // CHECK-NEXT: [[UC2:%.+]] = load i8, ptr %uc2.addr, align 1
+  // CHECK-NEXT: call i8 @llvm.usub.sat.i8(i8 [[UC1]], i8 [[UC2]])
+  uc1 = __builtin_elementwise_sub_sat(uc1, uc2);
+
+  // CHECK:      [[S1:%.+]] = load i16, ptr %s1.addr, align 2
+  // CHECK-NEXT: [[S2:%.+]] = load i16, ptr %s2.addr, align 2
+  // CHECK-NEXT: call i16 @llvm.ssub.sat.i16(i16 [[S1]], i16 [[S2]])
+  s1 = __builtin_elementwise_sub_sat(s1, s2);
+
+  // CHECK:      [[US1:%.+]] = load i16, ptr %us1.addr, align 2
+  // CHECK-NEXT: [[US2:%.+]] = load i16, ptr %us2.addr, align 2
+  // CHECK-NEXT: call i16 @llvm.usub.sat.i16(i16 [[US1]], i16 [[US2]])
+  us1 = __builtin_elementwise_sub_sat(us1, us2);
+
+  // CHECK:      [[C1:%.+]] = load i8, ptr %c1.addr, align 1
+  // CHECK-NEXT: [[C1EXT:%.+]] = sext i8 [[C1]] to i16
+  // CHECK-NEXT: [[S1:%.+]] = load i16, ptr %s1.addr, align 2
+  // CHECK-NEXT: call i16 @llvm.ssub.sat.i16(i16 [[C1EXT]], i16 [[S1]])
+  s1 = __builtin_elementwise_sub_sat(c1, s1);
+
+  // CHECK:      [[S1:%.+]] = load i16, ptr %s1.addr, align 2
+  // CHECK-NEXT: [[C1:%.+]] = load i8, ptr %c1.addr, align 1
+  // CHECK-NEXT: [[C1EXT:%.+]] = sext i8 [[C1]] to i16
+  // CHECK-NEXT: call i16 @llvm.ssub.sat.i16(i16 [[S1]], i16 [[C1EXT]])
+  s1 = __builtin_elementwise_sub_sat(s1, c1);
+
+  // CHECK:      [[UC1:%.+]] = load i8, ptr %uc1.addr, align 1
+  // CHECK-NEXT: [[UC1EXT:%.+]] = zext i8 [[UC1]] to i16
+  // CHECK-NEXT: [[S1:%.+]] = load i16, ptr %s1.addr, align 2
+  // CHECK-NEXT: call i16 @llvm.ssub.sat.i16(i16 [[UC1EXT]], i16 [[S1]])
+  s1 = __builtin_elementwise_sub_sat(uc1, s1);
+
+  // CHECK:      [[US1:%.+]] = load i16, ptr %us1.addr, align 2
+  // CHECK-NEXT: [[C1:%.+]] = load i8, ptr %c1.addr, align 1
+  // CHECK-NEXT: [[C1EXT:%.+]] = sext i8 [[C1]] to i16
+  // CHECK-NEXT: call i16 @llvm.usub.sat.i16(i16 [[US1]], i16 [[C1EXT]])
+  us1 = __builtin_elementwise_sub_sat(us1, c1);
 }
 
 void test_builtin_elementwise_maximum(float f1, float f2, double d1, double d2,



More information about the cfe-commits mailing list