[cfe-commits] r131000 - in /cfe/trunk: lib/CodeGen/CGExprScalar.cpp test/CodeGen/altivec.c test/CodeGen/builtins-ppc-altivec.c test/CodeGen/ext-vector.c

Eli Friedman eli.friedman at gmail.com
Fri May 6 11:04:18 PDT 2011


Author: efriedma
Date: Fri May  6 13:04:18 2011
New Revision: 131000

URL: http://llvm.org/viewvc/llvm-project?rev=131000&view=rev
Log:
Don't emit nsw flags for vector operations; there's basically no benefit, and a lot of downside (like PR9850, which is about clang's xmmintrin.h making an unexpected transformation on an expression involving _mm_add_epi32).


Modified:
    cfe/trunk/lib/CodeGen/CGExprScalar.cpp
    cfe/trunk/test/CodeGen/altivec.c
    cfe/trunk/test/CodeGen/builtins-ppc-altivec.c
    cfe/trunk/test/CodeGen/ext-vector.c

Modified: cfe/trunk/lib/CodeGen/CGExprScalar.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGExprScalar.cpp?rev=131000&r1=130999&r2=131000&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGExprScalar.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGExprScalar.cpp Fri May  6 13:04:18 2011
@@ -400,7 +400,7 @@
 
   // Binary Operators.
   Value *EmitMul(const BinOpInfo &Ops) {
-    if (Ops.Ty->hasSignedIntegerRepresentation()) {
+    if (Ops.Ty->isSignedIntegerType()) {
       switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
       case LangOptions::SOB_Undefined:
         return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
@@ -1333,10 +1333,7 @@
     if (type->hasIntegerRepresentation()) {
       llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
 
-      if (type->hasSignedIntegerRepresentation())
-        value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
-      else
-        value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
+      value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
     } else {
       value = Builder.CreateFAdd(
                   value,
@@ -1829,7 +1826,7 @@
 
 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
   if (!Ops.Ty->isAnyPointerType()) {
-    if (Ops.Ty->hasSignedIntegerRepresentation()) {
+    if (Ops.Ty->isSignedIntegerType()) {
       switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
       case LangOptions::SOB_Undefined:
         return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
@@ -1914,7 +1911,7 @@
 
 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
   if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
-    if (Ops.Ty->hasSignedIntegerRepresentation()) {
+    if (Ops.Ty->isSignedIntegerType()) {
       switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
       case LangOptions::SOB_Undefined:
         return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");

Modified: cfe/trunk/test/CodeGen/altivec.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/altivec.c?rev=131000&r1=130999&r2=131000&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/altivec.c (original)
+++ cfe/trunk/test/CodeGen/altivec.c Fri May  6 13:04:18 2011
@@ -23,7 +23,7 @@
 // Check pre/post increment/decrement
 void test3() {
   vector int vi;
-  vi++;                                    // CHECK: add nsw <4 x i32> {{.*}} <i32 1, i32 1, i32 1, i32 1>
+  vi++;                                    // CHECK: add <4 x i32> {{.*}} <i32 1, i32 1, i32 1, i32 1>
   vector unsigned int vui;
   --vui;                                   // CHECK: add <4 x i32> {{.*}} <i32 -1, i32 -1, i32 -1, i32 -1>
   vector float vf;

Modified: cfe/trunk/test/CodeGen/builtins-ppc-altivec.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-ppc-altivec.c?rev=131000&r1=130999&r2=131000&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/builtins-ppc-altivec.c (original)
+++ cfe/trunk/test/CodeGen/builtins-ppc-altivec.c Fri May  6 13:04:18 2011
@@ -44,13 +44,13 @@
 void test1() {
 
   /* vec_abs */
-  vsc = vec_abs(vsc);                           // CHECK: sub nsw <16 x i8> zeroinitializer
+  vsc = vec_abs(vsc);                           // CHECK: sub <16 x i8> zeroinitializer
                                                 // CHECK: @llvm.ppc.altivec.vmaxsb
 
-  vs = vec_abs(vs);                             // CHECK: sub nsw <8 x i16> zeroinitializer
+  vs = vec_abs(vs);                             // CHECK: sub <8 x i16> zeroinitializer
                                                 // CHECK: @llvm.ppc.altivec.vmaxsh
 
-  vi = vec_abs(vi);                             // CHECK: sub nsw <4 x i32> zeroinitializer
+  vi = vec_abs(vi);                             // CHECK: sub <4 x i32> zeroinitializer
                                                 // CHECK: @llvm.ppc.altivec.vmaxsw
 
   vf = vec_abs(vf);                             // CHECK: and <4 x i32>
@@ -66,40 +66,40 @@
                                                 // CHECK: @llvm.ppc.altivec.vmaxsw
 
   /*  vec_add */
-  res_vsc = vec_add(vsc, vsc);                  // CHECK: add nsw <16 x i8>
-  res_vsc = vec_add(vbc, vsc);                  // CHECK: add nsw <16 x i8>
-  res_vsc = vec_add(vsc, vbc);                  // CHECK: add nsw <16 x i8>
+  res_vsc = vec_add(vsc, vsc);                  // CHECK: add <16 x i8>
+  res_vsc = vec_add(vbc, vsc);                  // CHECK: add <16 x i8>
+  res_vsc = vec_add(vsc, vbc);                  // CHECK: add <16 x i8>
   res_vuc = vec_add(vuc, vuc);                  // CHECK: add <16 x i8>
   res_vuc = vec_add(vbc, vuc);                  // CHECK: add <16 x i8>
   res_vuc = vec_add(vuc, vbc);                  // CHECK: add <16 x i8>
-  res_vs  = vec_add(vs, vs);                    // CHECK: add nsw <8 x i16>
-  res_vs  = vec_add(vbs, vs);                   // CHECK: add nsw <8 x i16>
-  res_vs  = vec_add(vs, vbs);                   // CHECK: add nsw <8 x i16>
+  res_vs  = vec_add(vs, vs);                    // CHECK: add <8 x i16>
+  res_vs  = vec_add(vbs, vs);                   // CHECK: add <8 x i16>
+  res_vs  = vec_add(vs, vbs);                   // CHECK: add <8 x i16>
   res_vus = vec_add(vus, vus);                  // CHECK: add <8 x i16>
   res_vus = vec_add(vbs, vus);                  // CHECK: add <8 x i16>
   res_vus = vec_add(vus, vbs);                  // CHECK: add <8 x i16>
-  res_vi  = vec_add(vi, vi);                    // CHECK: add nsw <4 x i32>
-  res_vi  = vec_add(vbi, vi);                   // CHECK: add nsw <4 x i32>
-  res_vi  = vec_add(vi, vbi);                   // CHECK: add nsw <4 x i32>
+  res_vi  = vec_add(vi, vi);                    // CHECK: add <4 x i32>
+  res_vi  = vec_add(vbi, vi);                   // CHECK: add <4 x i32>
+  res_vi  = vec_add(vi, vbi);                   // CHECK: add <4 x i32>
   res_vui = vec_add(vui, vui);                  // CHECK: add <4 x i32>
   res_vui = vec_add(vbi, vui);                  // CHECK: add <4 x i32>
   res_vui = vec_add(vui, vbi);                  // CHECK: add <4 x i32>
   res_vf  = vec_add(vf, vf);                    // CHECK: fadd <4 x float>
-  res_vsc = vec_vaddubm(vsc, vsc);              // CHECK: add nsw <16 x i8>
-  res_vsc = vec_vaddubm(vbc, vsc);              // CHECK: add nsw <16 x i8>
-  res_vsc = vec_vaddubm(vsc, vbc);              // CHECK: add nsw <16 x i8>
+  res_vsc = vec_vaddubm(vsc, vsc);              // CHECK: add <16 x i8>
+  res_vsc = vec_vaddubm(vbc, vsc);              // CHECK: add <16 x i8>
+  res_vsc = vec_vaddubm(vsc, vbc);              // CHECK: add <16 x i8>
   res_vuc = vec_vaddubm(vuc, vuc);              // CHECK: add <16 x i8>
   res_vuc = vec_vaddubm(vbc, vuc);              // CHECK: add <16 x i8>
   res_vuc = vec_vaddubm(vuc, vbc);              // CHECK: add <16 x i8>
-  res_vs  = vec_vadduhm(vs, vs);                // CHECK: add nsw <8 x i16>
-  res_vs  = vec_vadduhm(vbs, vs);               // CHECK: add nsw <8 x i16>
-  res_vs  = vec_vadduhm(vs, vbs);               // CHECK: add nsw <8 x i16>
+  res_vs  = vec_vadduhm(vs, vs);                // CHECK: add <8 x i16>
+  res_vs  = vec_vadduhm(vbs, vs);               // CHECK: add <8 x i16>
+  res_vs  = vec_vadduhm(vs, vbs);               // CHECK: add <8 x i16>
   res_vus = vec_vadduhm(vus, vus);              // CHECK: add <8 x i16>
   res_vus = vec_vadduhm(vbs, vus);              // CHECK: add <8 x i16>
   res_vus = vec_vadduhm(vus, vbs);              // CHECK: add <8 x i16>
-  res_vi  = vec_vadduwm(vi, vi);                // CHECK: add nsw <4 x i32>
-  res_vi  = vec_vadduwm(vbi, vi);               // CHECK: add nsw <4 x i32>
-  res_vi  = vec_vadduwm(vi, vbi);               // CHECK: add nsw <4 x i32>
+  res_vi  = vec_vadduwm(vi, vi);                // CHECK: add <4 x i32>
+  res_vi  = vec_vadduwm(vbi, vi);               // CHECK: add <4 x i32>
+  res_vi  = vec_vadduwm(vi, vbi);               // CHECK: add <4 x i32>
   res_vui = vec_vadduwm(vui, vui);              // CHECK: add <4 x i32>
   res_vui = vec_vadduwm(vbi, vui);              // CHECK: add <4 x i32>
   res_vui = vec_vadduwm(vui, vbi);              // CHECK: add <4 x i32>
@@ -689,14 +689,14 @@
   res_vus = vec_mladd(vus, vus, vus);           // CHECK: mul <8 x i16>
                                                 // CHECK: add <8 x i16>
 
-  res_vs = vec_mladd(vus, vs, vs);              // CHECK: mul nsw <8 x i16>
-                                                // CHECK: add nsw <8 x i16>
+  res_vs = vec_mladd(vus, vs, vs);              // CHECK: mul <8 x i16>
+                                                // CHECK: add <8 x i16>
 
-  res_vs = vec_mladd(vs, vus, vus);             // CHECK: mul nsw <8 x i16>
-                                                // CHECK: add nsw <8 x i16>
+  res_vs = vec_mladd(vs, vus, vus);             // CHECK: mul <8 x i16>
+                                                // CHECK: add <8 x i16>
 
-  res_vs = vec_mladd(vs, vs, vs);               // CHECK: mul nsw <8 x i16>
-                                                // CHECK: add nsw <8 x i16>
+  res_vs = vec_mladd(vs, vs, vs);               // CHECK: mul <8 x i16>
+                                                // CHECK: add <8 x i16>
 
   /* vec_mradds */
   res_vs = vec_mradds(vs, vs, vs);              // CHECK: @llvm.ppc.altivec.vmhraddshs
@@ -1592,40 +1592,40 @@
   vec_stvxl(vf, 0, &param_f);                   // CHECK: @llvm.ppc.altivec.stvxl
 
   /* vec_sub */
-  res_vsc = vec_sub(vsc, vsc);                  // CHECK: sub nsw <16 x i8>
-  res_vsc = vec_sub(vbc, vsc);                  // CHECK: sub nsw <16 x i8>
-  res_vsc = vec_sub(vsc, vbc);                  // CHECK: sub nsw <16 x i8>
+  res_vsc = vec_sub(vsc, vsc);                  // CHECK: sub <16 x i8>
+  res_vsc = vec_sub(vbc, vsc);                  // CHECK: sub <16 x i8>
+  res_vsc = vec_sub(vsc, vbc);                  // CHECK: sub <16 x i8>
   res_vuc = vec_sub(vuc, vuc);                  // CHECK: sub <16 x i8>
   res_vuc = vec_sub(vbc, vuc);                  // CHECK: sub <16 x i8>
   res_vuc = vec_sub(vuc, vbc);                  // CHECK: sub <16 x i8>
-  res_vs  = vec_sub(vs, vs);                    // CHECK: sub nsw <8 x i16>
-  res_vs  = vec_sub(vbs, vs);                   // CHECK: sub nsw <8 x i16>
-  res_vs  = vec_sub(vs, vbs);                   // CHECK: sub nsw <8 x i16>
+  res_vs  = vec_sub(vs, vs);                    // CHECK: sub <8 x i16>
+  res_vs  = vec_sub(vbs, vs);                   // CHECK: sub <8 x i16>
+  res_vs  = vec_sub(vs, vbs);                   // CHECK: sub <8 x i16>
   res_vus = vec_sub(vus, vus);                  // CHECK: sub <8 x i16>
   res_vus = vec_sub(vbs, vus);                  // CHECK: sub <8 x i16>
   res_vus = vec_sub(vus, vbs);                  // CHECK: sub <8 x i16>
-  res_vi  = vec_sub(vi, vi);                    // CHECK: sub nsw <4 x i32>
-  res_vi  = vec_sub(vbi, vi);                   // CHECK: sub nsw <4 x i32>
-  res_vi  = vec_sub(vi, vbi);                   // CHECK: sub nsw <4 x i32>
+  res_vi  = vec_sub(vi, vi);                    // CHECK: sub <4 x i32>
+  res_vi  = vec_sub(vbi, vi);                   // CHECK: sub <4 x i32>
+  res_vi  = vec_sub(vi, vbi);                   // CHECK: sub <4 x i32>
   res_vui = vec_sub(vui, vui);                  // CHECK: sub <4 x i32>
   res_vui = vec_sub(vbi, vui);                  // CHECK: sub <4 x i32>
   res_vui = vec_sub(vui, vbi);                  // CHECK: sub <4 x i32>
   res_vf  = vec_sub(vf, vf);                    // CHECK: fsub <4 x float>
-  res_vsc = vec_vsububm(vsc, vsc);              // CHECK: sub nsw <16 x i8>
-  res_vsc = vec_vsububm(vbc, vsc);              // CHECK: sub nsw <16 x i8>
-  res_vsc = vec_vsububm(vsc, vbc);              // CHECK: sub nsw <16 x i8>
+  res_vsc = vec_vsububm(vsc, vsc);              // CHECK: sub <16 x i8>
+  res_vsc = vec_vsububm(vbc, vsc);              // CHECK: sub <16 x i8>
+  res_vsc = vec_vsububm(vsc, vbc);              // CHECK: sub <16 x i8>
   res_vuc = vec_vsububm(vuc, vuc);              // CHECK: sub <16 x i8>
   res_vuc = vec_vsububm(vbc, vuc);              // CHECK: sub <16 x i8>
   res_vuc = vec_vsububm(vuc, vbc);              // CHECK: sub <16 x i8>
-  res_vs  = vec_vsubuhm(vs, vs);                // CHECK: sub nsw <8 x i16>
+  res_vs  = vec_vsubuhm(vs, vs);                // CHECK: sub <8 x i16>
   res_vs  = vec_vsubuhm(vbs, vus);              // CHECK: sub <8 x i16>
   res_vs  = vec_vsubuhm(vus, vbs);              // CHECK: sub <8 x i16>
   res_vus = vec_vsubuhm(vus, vus);              // CHECK: sub <8 x i16>
   res_vus = vec_vsubuhm(vbs, vus);              // CHECK: sub <8 x i16>
   res_vus = vec_vsubuhm(vus, vbs);              // CHECK: sub <8 x i16>
-  res_vi  = vec_vsubuwm(vi, vi);                // CHECK: sub nsw <4 x i32>
-  res_vi  = vec_vsubuwm(vbi, vi);               // CHECK: sub nsw <4 x i32>
-  res_vi  = vec_vsubuwm(vi, vbi);               // CHECK: sub nsw <4 x i32>
+  res_vi  = vec_vsubuwm(vi, vi);                // CHECK: sub <4 x i32>
+  res_vi  = vec_vsubuwm(vbi, vi);               // CHECK: sub <4 x i32>
+  res_vi  = vec_vsubuwm(vi, vbi);               // CHECK: sub <4 x i32>
   res_vui = vec_vsubuwm(vui, vui);              // CHECK: sub <4 x i32>
   res_vui = vec_vsubuwm(vbi, vui);              // CHECK: sub <4 x i32>
   res_vui = vec_vsubuwm(vui, vbi);              // CHECK: sub <4 x i32>

Modified: cfe/trunk/test/CodeGen/ext-vector.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/ext-vector.c?rev=131000&r1=130999&r2=131000&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/ext-vector.c (original)
+++ cfe/trunk/test/CodeGen/ext-vector.c Fri May  6 13:04:18 2011
@@ -131,9 +131,9 @@
   int4 a = *ap;
   int4 b = *bp;
 
-  // CHECK: add nsw <4 x i32>
-  // CHECK: sub nsw <4 x i32>
-  // CHECK: mul nsw <4 x i32>
+  // CHECK: add <4 x i32>
+  // CHECK: sub <4 x i32>
+  // CHECK: mul <4 x i32>
   // CHECK: sdiv <4 x i32>
   // CHECK: srem <4 x i32>
   a = a + b;
@@ -142,9 +142,9 @@
   a = a / b;
   a = a % b;
 
-  // CHECK: add nsw <4 x i32>
-  // CHECK: sub nsw <4 x i32>
-  // CHECK: mul nsw <4 x i32>
+  // CHECK: add <4 x i32>
+  // CHECK: sub <4 x i32>
+  // CHECK: mul <4 x i32>
   // CHECK: sdiv <4 x i32>
   // CHECK: srem <4 x i32>
   a = a + c;
@@ -153,9 +153,9 @@
   a = a / c;
   a = a % c;
 
-  // CHECK: add nsw <4 x i32>
-  // CHECK: sub nsw <4 x i32>
-  // CHECK: mul nsw <4 x i32>
+  // CHECK: add <4 x i32>
+  // CHECK: sub <4 x i32>
+  // CHECK: mul <4 x i32>
   // CHECK: sdiv <4 x i32>
   // CHECK: srem <4 x i32>
   a += b;
@@ -164,9 +164,9 @@
   a /= b;
   a %= b;
 
-  // CHECK: add nsw <4 x i32>
-  // CHECK: sub nsw <4 x i32>
-  // CHECK: mul nsw <4 x i32>
+  // CHECK: add <4 x i32>
+  // CHECK: sub <4 x i32>
+  // CHECK: mul <4 x i32>
   // CHECK: sdiv <4 x i32>
   // CHECK: srem <4 x i32>
   a += c;
@@ -220,7 +220,7 @@
 }
 
 // CHECK: @test10
-// CHECK: add nsw <4 x i32>
+// CHECK: add <4 x i32>
 // CHECK: extractelement <4 x i32>
 int test10(int4 V) {
   return (V+V).x;





More information about the cfe-commits mailing list