[llvm] 1b0400e - [X86] combineSubABS - handle NEG(ABD()) expanded patterns

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 11 05:19:06 PDT 2024


Author: Simon Pilgrim
Date: 2024-09-11T13:18:30+01:00
New Revision: 1b0400eed8613108d9f293b9ddd3380e3241ac60

URL: https://github.com/llvm/llvm-project/commit/1b0400eed8613108d9f293b9ddd3380e3241ac60
DIFF: https://github.com/llvm/llvm-project/commit/1b0400eed8613108d9f293b9ddd3380e3241ac60.diff

LOG: [X86] combineSubABS - handle NEG(ABD()) expanded patterns

combineSubABS already handles the "(sub Y, cmovns X, -X) -> (add Y, cmovns -X, X)" fold by flipping the cmov operands.

We can do something similar for the negation of ABDS/U patterns which have been expanded to a CMOVL/CMOVB with a pair of commuted subtractions: "NEG(ABD(X,Y)) -> NEG(CMOV(SUB(X,Y),SUB(Y,X))) -> CMOV(SUB(Y,X),SUB(X,Y))"

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/abds-neg.ll
    llvm/test/CodeGen/X86/abdu-neg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a1d466eee691c9..d0794cb9bfde3d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56096,34 +56096,50 @@ static SDValue combineSubABS(SDNode *N, SelectionDAG &DAG) {
   if (N1.getOpcode() != X86ISD::CMOV || !N1.hasOneUse())
     return SDValue();
 
-  X86::CondCode CC = (X86::CondCode)N1.getConstantOperandVal(2);
-  if (CC != X86::COND_S && CC != X86::COND_NS)
-    return SDValue();
-
-  // Condition should come from a negate operation.
   SDValue Cond = N1.getOperand(3);
-  if (Cond.getOpcode() != X86ISD::SUB || !isNullConstant(Cond.getOperand(0)))
+  if (Cond.getOpcode() != X86ISD::SUB)
     return SDValue();
   assert(Cond.getResNo() == 1 && "Unexpected result number");
 
-  // Get the X and -X from the negate.
-  SDValue NegX = Cond.getValue(0);
-  SDValue X = Cond.getOperand(1);
-
   SDValue FalseOp = N1.getOperand(0);
   SDValue TrueOp = N1.getOperand(1);
+  X86::CondCode CC = (X86::CondCode)N1.getConstantOperandVal(2);
+  MVT VT = N->getSimpleValueType(0);
+  SDLoc DL(N);
 
-  // Cmov operands should be X and NegX. Order doesn't matter.
-  if (!(TrueOp == X && FalseOp == NegX) && !(TrueOp == NegX && FalseOp == X))
-    return SDValue();
+  // ABS condition should come from a negate operation.
+  if ((CC == X86::COND_S || CC == X86::COND_NS) &&
+      isNullConstant(Cond.getOperand(0))) {
+    // Get the X and -X from the negate.
+    SDValue NegX = Cond.getValue(0);
+    SDValue X = Cond.getOperand(1);
 
-  // Build a new CMOV with the operands swapped.
-  SDLoc DL(N);
-  MVT VT = N->getSimpleValueType(0);
-  SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, TrueOp, FalseOp,
-                             N1.getOperand(2), Cond);
-  // Convert sub to add.
-  return DAG.getNode(ISD::ADD, DL, VT, N0, Cmov);
+    // Cmov operands should be X and NegX. Order doesn't matter.
+    if (!(TrueOp == X && FalseOp == NegX) && !(TrueOp == NegX && FalseOp == X))
+      return SDValue();
+
+    // Build a new CMOV with the operands swapped.
+    SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, TrueOp, FalseOp,
+                               N1.getOperand(2), Cond);
+    // Convert sub to add.
+    return DAG.getNode(ISD::ADD, DL, VT, N0, Cmov);
+  }
+
+  // Handle ABD special case:
+  // NEG(ABD(X,Y)) -> NEG(CMOV(SUB(X,Y),SUB(Y,X))) -> CMOV(SUB(Y,X),SUB(X,Y)).
+  // ABD condition should come from a pair of matching subtracts.
+  if ((CC == X86::COND_L || CC == X86::COND_B) && isNullConstant(N0) &&
+      (FalseOp == Cond.getValue(0) || TrueOp == Cond.getValue(0)) &&
+      (TrueOp.getOpcode() == ISD::SUB || TrueOp.getOpcode() == X86ISD::SUB) &&
+      (FalseOp.getOpcode() == ISD::SUB || FalseOp.getOpcode() == X86ISD::SUB) &&
+      (TrueOp.getOperand(0) == FalseOp.getOperand(1)) &&
+      (TrueOp.getOperand(1) == FalseOp.getOperand(0))) {
+    // Build a new CMOV with the operands swapped.
+    return DAG.getNode(X86ISD::CMOV, DL, VT, TrueOp, FalseOp, N1.getOperand(2),
+                       Cond);
+  }
+
+  return SDValue();
 }
 
 static SDValue combineSubSetcc(SDNode *N, SelectionDAG &DAG) {

diff  --git a/llvm/test/CodeGen/X86/abds-neg.ll b/llvm/test/CodeGen/X86/abds-neg.ll
index b9b3436dd1ed9f..6e22d855dc8315 100644
--- a/llvm/test/CodeGen/X86/abds-neg.ll
+++ b/llvm/test/CodeGen/X86/abds-neg.ll
@@ -112,8 +112,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -144,8 +143,7 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -176,8 +174,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -208,8 +205,7 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i32:
@@ -217,8 +213,7 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    subl %esi, %eax
 ; X64-NEXT:    subl %edi, %esi
-; X64-NEXT:    cmovgel %esi, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovll %esi, %eax
 ; X64-NEXT:    retq
   %aext = sext i32 %a to i64
   %bext = sext i32 %b to i64
@@ -237,8 +232,7 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i32_i16:
@@ -247,8 +241,7 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
 ; X64-NEXT:    movl %edi, %ecx
 ; X64-NEXT:    subl %eax, %ecx
 ; X64-NEXT:    subl %edi, %eax
-; X64-NEXT:    cmovll %ecx, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovgel %ecx, %eax
 ; X64-NEXT:    retq
   %aext = sext i32 %a to i64
   %bext = sext i16 %b to i64
@@ -267,8 +260,7 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i32_undef:
@@ -276,8 +268,7 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    subl %esi, %eax
 ; X64-NEXT:    subl %edi, %esi
-; X64-NEXT:    cmovgel %esi, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovll %esi, %eax
 ; X64-NEXT:    retq
   %aext = sext i32 %a to i64
   %bext = sext i32 %b to i64
@@ -319,8 +310,7 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
 ; X64-NEXT:    subq %rdi, %rsi
-; X64-NEXT:    cmovgeq %rsi, %rax
-; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovlq %rsi, %rax
 ; X64-NEXT:    retq
   %aext = sext i64 %a to i128
   %bext = sext i64 %b to i128
@@ -362,8 +352,7 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
 ; X64-NEXT:    subq %rdi, %rsi
-; X64-NEXT:    cmovgeq %rsi, %rax
-; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovlq %rsi, %rax
 ; X64-NEXT:    retq
   %aext = sext i64 %a to i128
   %bext = sext i64 %b to i128
@@ -558,8 +547,7 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -587,8 +575,7 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_minmax_i32:
@@ -596,8 +583,7 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    subl %esi, %eax
 ; X64-NEXT:    subl %edi, %esi
-; X64-NEXT:    cmovgel %esi, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovll %esi, %eax
 ; X64-NEXT:    retq
   %min = call i32 @llvm.smin.i32(i32 %a, i32 %b)
   %max = call i32 @llvm.smax.i32(i32 %a, i32 %b)
@@ -641,8 +627,7 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
 ; X64-NEXT:    subq %rdi, %rsi
-; X64-NEXT:    cmovgeq %rsi, %rax
-; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovlq %rsi, %rax
 ; X64-NEXT:    retq
   %min = call i64 @llvm.smin.i64(i64 %a, i64 %b)
   %max = call i64 @llvm.smax.i64(i64 %a, i64 %b)
@@ -776,8 +761,7 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -806,8 +790,7 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_cmp_i32:
@@ -815,8 +798,7 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    subl %esi, %eax
 ; X64-NEXT:    subl %edi, %esi
-; X64-NEXT:    cmovgel %esi, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovll %esi, %eax
 ; X64-NEXT:    retq
   %cmp = icmp sge i32 %a, %b
   %ab = sub i32 %a, %b
@@ -853,8 +835,7 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
 ; X64-NEXT:    subq %rdi, %rsi
-; X64-NEXT:    cmovgeq %rsi, %rax
-; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovlq %rsi, %rax
 ; X64-NEXT:    retq
   %cmp = icmp slt i64 %a, %b
   %ab = sub i64 %a, %b
@@ -1031,8 +1012,7 @@ define i32 @abd_subnsw_i32(i32 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_subnsw_i32:
@@ -1040,8 +1020,7 @@ define i32 @abd_subnsw_i32(i32 %a, i32 %b) nounwind {
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    subl %esi, %eax
 ; X64-NEXT:    subl %edi, %esi
-; X64-NEXT:    cmovgel %esi, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovll %esi, %eax
 ; X64-NEXT:    retq
   %sub = sub nsw i32 %a, %b
   %abs = call i32 @llvm.abs.i32(i32 %sub, i1 false)
@@ -1057,8 +1036,7 @@ define i32 @abd_subnsw_i32_undef(i32 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovll %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovgel %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_subnsw_i32_undef:
@@ -1066,8 +1044,7 @@ define i32 @abd_subnsw_i32_undef(i32 %a, i32 %b) nounwind {
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    subl %esi, %eax
 ; X64-NEXT:    subl %edi, %esi
-; X64-NEXT:    cmovgel %esi, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovll %esi, %eax
 ; X64-NEXT:    retq
   %sub = sub nsw i32 %a, %b
   %abs = call i32 @llvm.abs.i32(i32 %sub, i1 true)
@@ -1098,8 +1075,7 @@ define i64 @abd_subnsw_i64(i64 %a, i64 %b) nounwind {
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
 ; X64-NEXT:    subq %rdi, %rsi
-; X64-NEXT:    cmovgeq %rsi, %rax
-; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovlq %rsi, %rax
 ; X64-NEXT:    retq
   %sub = sub nsw i64 %a, %b
   %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
@@ -1130,8 +1106,7 @@ define i64 @abd_subnsw_i64_undef(i64 %a, i64 %b) nounwind {
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
 ; X64-NEXT:    subq %rdi, %rsi
-; X64-NEXT:    cmovgeq %rsi, %rax
-; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovlq %rsi, %rax
 ; X64-NEXT:    retq
   %sub = sub nsw i64 %a, %b
   %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)

diff  --git a/llvm/test/CodeGen/X86/abdu-neg.ll b/llvm/test/CodeGen/X86/abdu-neg.ll
index 1ded7e79e2510c..6bda99c89a37e3 100644
--- a/llvm/test/CodeGen/X86/abdu-neg.ll
+++ b/llvm/test/CodeGen/X86/abdu-neg.ll
@@ -112,8 +112,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovbl %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovael %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -144,8 +143,7 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovbl %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovael %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -176,8 +174,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovbl %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovael %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -208,8 +205,7 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovbl %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovael %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i32:
@@ -217,8 +213,7 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    subl %esi, %eax
 ; X64-NEXT:    subl %edi, %esi
-; X64-NEXT:    cmovael %esi, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovbl %esi, %eax
 ; X64-NEXT:    retq
   %aext = zext i32 %a to i64
   %bext = zext i32 %b to i64
@@ -237,8 +232,7 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovbl %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovael %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i32_i16:
@@ -247,8 +241,7 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
 ; X64-NEXT:    movl %edi, %ecx
 ; X64-NEXT:    subl %eax, %ecx
 ; X64-NEXT:    subl %edi, %eax
-; X64-NEXT:    cmovbl %ecx, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovael %ecx, %eax
 ; X64-NEXT:    retq
   %aext = zext i32 %a to i64
   %bext = zext i16 %b to i64
@@ -267,8 +260,7 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovbl %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovael %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i32_undef:
@@ -276,8 +268,7 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    subl %esi, %eax
 ; X64-NEXT:    subl %edi, %esi
-; X64-NEXT:    cmovael %esi, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovbl %esi, %eax
 ; X64-NEXT:    retq
   %aext = zext i32 %a to i64
   %bext = zext i32 %b to i64
@@ -313,8 +304,7 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
 ; X64-NEXT:    subq %rdi, %rsi
-; X64-NEXT:    cmovaeq %rsi, %rax
-; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovbq %rsi, %rax
 ; X64-NEXT:    retq
   %aext = zext i64 %a to i128
   %bext = zext i64 %b to i128
@@ -350,8 +340,7 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
 ; X64-NEXT:    subq %rdi, %rsi
-; X64-NEXT:    cmovaeq %rsi, %rax
-; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovbq %rsi, %rax
 ; X64-NEXT:    retq
   %aext = zext i64 %a to i128
   %bext = zext i64 %b to i128
@@ -540,8 +529,7 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovbl %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovael %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -569,8 +557,7 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovbl %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovael %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_minmax_i32:
@@ -578,8 +565,7 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    subl %esi, %eax
 ; X64-NEXT:    subl %edi, %esi
-; X64-NEXT:    cmovael %esi, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovbl %esi, %eax
 ; X64-NEXT:    retq
   %min = call i32 @llvm.umin.i32(i32 %a, i32 %b)
   %max = call i32 @llvm.umax.i32(i32 %a, i32 %b)
@@ -623,8 +609,7 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
 ; X64-NEXT:    subq %rdi, %rsi
-; X64-NEXT:    cmovaeq %rsi, %rax
-; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovbq %rsi, %rax
 ; X64-NEXT:    retq
   %min = call i64 @llvm.umin.i64(i64 %a, i64 %b)
   %max = call i64 @llvm.umax.i64(i64 %a, i64 %b)
@@ -758,8 +743,7 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovbl %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovael %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -788,8 +772,7 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    subl %eax, %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmovbl %edx, %eax
-; X86-NEXT:    negl %eax
+; X86-NEXT:    cmovael %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_cmp_i32:
@@ -797,8 +780,7 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    subl %esi, %eax
 ; X64-NEXT:    subl %edi, %esi
-; X64-NEXT:    cmovael %esi, %eax
-; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovbl %esi, %eax
 ; X64-NEXT:    retq
   %cmp = icmp uge i32 %a, %b
   %ab = sub i32 %a, %b
@@ -832,8 +814,7 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
 ; X64-NEXT:    subq %rdi, %rsi
-; X64-NEXT:    cmovaeq %rsi, %rax
-; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovbq %rsi, %rax
 ; X64-NEXT:    retq
   %cmp = icmp ult i64 %a, %b
   %ab = sub i64 %a, %b


        


More information about the llvm-commits mailing list