[llvm] [AArch64] Commute eligible operands for ccmn (PR #99884)

via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 22 07:35:50 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-aarch64

Author: AtariDreams (AtariDreams)

<details>
<summary>Changes</summary>

We can commute the operands for ccmp to ccmn if we know that both the LHS and RHS are non-zero for unsigned comparisons, and not SIGNED_MIN for signed comparisons.

Proof:

It is known that if a > b, then b < a.

It is also known that if b < a, then -b > -a.

For the degenerative cases of INT_MIN and 0 for signed and unsigned comparisons, this is where knownbits steps in: https://alive2.llvm.org/ce/z/8YTkie

---
Full diff: https://github.com/llvm/llvm-project/pull/99884.diff


2 Files Affected:

- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+7-4) 
- (modified) llvm/test/CodeGen/AArch64/cmp-chains.ll (+204) 


``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index eae0200f37f04..a7cfa9c1e0fe7 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3577,12 +3577,15 @@ static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
   } else if (isCMN(RHS, CC, DAG)) {
     Opcode = AArch64ISD::CCMN;
     RHS = RHS.getOperand(1);
-  } else if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
-             isIntEqualitySetCC(CC)) {
-    // As we are looking for EQ/NE compares, the operands can be commuted ; can
-    // we combine a (CCMP (sub 0, op1), op2) into a CCMN instruction ?
+  } else if (isCMN(LHS, CC, DAG) &&
+             (isIntEqualitySetCC(CC) ||
+              (isUnsignedIntSetCC(CC) && DAG.isKnownNeverZero(RHS)) ||
+              (isSignedIntSetCC(CC) && cannotBeIntMin(RHS, DAG)))) {
     Opcode = AArch64ISD::CCMN;
     LHS = LHS.getOperand(1);
+
+    // Swap LHS and RHS to avoid worrying about changed CC
+    std::swap(LHS, RHS);
   }
   if (Opcode == 0)
     Opcode = AArch64ISD::CCMP;
diff --git a/llvm/test/CodeGen/AArch64/cmp-chains.ll b/llvm/test/CodeGen/AArch64/cmp-chains.ll
index 4b816df75a730..9860a11693cab 100644
--- a/llvm/test/CodeGen/AArch64/cmp-chains.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-chains.ll
@@ -501,3 +501,207 @@ entry:
   %land.ext = zext i1 %0 to i32
   ret i32 %land.ext
 }
+
+define i32 @and_ult_eq_s0s1_or(i32 %s0a, i32 %s1a, i32 %s2, i32 %s3) {
+; SDISEL-LABEL: and_ult_eq_s0s1_or:
+; SDISEL:       // %bb.0: // %entry
+; SDISEL-NEXT:    orr w8, w1, #0x1
+; SDISEL-NEXT:    orr w9, w0, #0x1
+; SDISEL-NEXT:    cmp w2, w3
+; SDISEL-NEXT:    ccmn w8, w9, #2, eq
+; SDISEL-NEXT:    mov w8, #20 // =0x14
+; SDISEL-NEXT:    mov w9, #10 // =0xa
+; SDISEL-NEXT:    csel w0, w9, w8, lo
+; SDISEL-NEXT:    ret
+;
+; GISEL-LABEL: and_ult_eq_s0s1_or:
+; GISEL:       // %bb.0: // %entry
+; GISEL-NEXT:    orr w8, w0, #0x1
+; GISEL-NEXT:    orr w9, w1, #0x1
+; GISEL-NEXT:    cmp w2, w3
+; GISEL-NEXT:    neg w8, w8
+; GISEL-NEXT:    mov w10, #20 // =0x14
+; GISEL-NEXT:    mov w11, #10 // =0xa
+; GISEL-NEXT:    ccmp w8, w9, #2, eq
+; GISEL-NEXT:    csel w0, w11, w10, lo
+; GISEL-NEXT:    ret
+entry:
+  %a1 = or i32 %s0a, 1
+  %s1 = or i32 %s1a, 1
+  %s0 = sub i32 0, %a1
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp eq i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %r = select i1 %a, i32 10, i32 20
+  ret i32 %r
+}
+
+define i32 @and_ult_gt_s0s1_or(i32 %s0a, i32 %s1a, i32 %s2, i32 %s3) {
+; SDISEL-LABEL: and_ult_gt_s0s1_or:
+; SDISEL:       // %bb.0: // %entry
+; SDISEL-NEXT:    orr w8, w1, #0x1
+; SDISEL-NEXT:    orr w9, w0, #0x1
+; SDISEL-NEXT:    cmp w2, w3
+; SDISEL-NEXT:    ccmn w8, w9, #2, hi
+; SDISEL-NEXT:    mov w8, #20 // =0x14
+; SDISEL-NEXT:    mov w9, #10 // =0xa
+; SDISEL-NEXT:    csel w0, w9, w8, lo
+; SDISEL-NEXT:    ret
+;
+; GISEL-LABEL: and_ult_gt_s0s1_or:
+; GISEL:       // %bb.0: // %entry
+; GISEL-NEXT:    orr w8, w0, #0x1
+; GISEL-NEXT:    orr w9, w1, #0x1
+; GISEL-NEXT:    cmp w2, w3
+; GISEL-NEXT:    neg w8, w8
+; GISEL-NEXT:    mov w10, #20 // =0x14
+; GISEL-NEXT:    mov w11, #10 // =0xa
+; GISEL-NEXT:    ccmp w8, w9, #2, hi
+; GISEL-NEXT:    csel w0, w11, w10, lo
+; GISEL-NEXT:    ret
+entry:
+  %a1 = or i32 %s0a, 1
+  %s1 = or i32 %s1a, 1
+  %s0 = sub i32 0, %a1
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp ugt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %r = select i1 %a, i32 10, i32 20
+  ret i32 %r
+}
+
+define i32 @and_ult_sgt_s0s1_or(i32 %s0a, i32 %s1a, i32 %s2, i32 %s3) {
+; SDISEL-LABEL: and_ult_sgt_s0s1_or:
+; SDISEL:       // %bb.0: // %entry
+; SDISEL-NEXT:    orr w8, w1, #0x1
+; SDISEL-NEXT:    orr w9, w0, #0x1
+; SDISEL-NEXT:    cmp w2, w3
+; SDISEL-NEXT:    ccmn w8, w9, #2, gt
+; SDISEL-NEXT:    mov w8, #20 // =0x14
+; SDISEL-NEXT:    mov w9, #10 // =0xa
+; SDISEL-NEXT:    csel w0, w9, w8, lo
+; SDISEL-NEXT:    ret
+;
+; GISEL-LABEL: and_ult_sgt_s0s1_or:
+; GISEL:       // %bb.0: // %entry
+; GISEL-NEXT:    orr w8, w0, #0x1
+; GISEL-NEXT:    orr w9, w1, #0x1
+; GISEL-NEXT:    cmp w2, w3
+; GISEL-NEXT:    neg w8, w8
+; GISEL-NEXT:    mov w10, #20 // =0x14
+; GISEL-NEXT:    mov w11, #10 // =0xa
+; GISEL-NEXT:    ccmp w8, w9, #2, gt
+; GISEL-NEXT:    csel w0, w11, w10, lo
+; GISEL-NEXT:    ret
+entry:
+  %a1 = or i32 %s0a, 1
+  %s1 = or i32 %s1a, 1
+  %s0 = sub i32 0, %a1
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp sgt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %r = select i1 %a, i32 10, i32 20
+  ret i32 %r
+}
+
+define i32 @and_ult_slt_s0s1_or(i32 %s0a, i32 %s1a, i32 %s2, i32 %s3) {
+; SDISEL-LABEL: and_ult_slt_s0s1_or:
+; SDISEL:       // %bb.0: // %entry
+; SDISEL-NEXT:    orr w8, w1, #0x1
+; SDISEL-NEXT:    orr w9, w0, #0x1
+; SDISEL-NEXT:    cmp w2, w3
+; SDISEL-NEXT:    ccmn w8, w9, #2, lt
+; SDISEL-NEXT:    mov w8, #20 // =0x14
+; SDISEL-NEXT:    mov w9, #10 // =0xa
+; SDISEL-NEXT:    csel w0, w9, w8, lo
+; SDISEL-NEXT:    ret
+;
+; GISEL-LABEL: and_ult_slt_s0s1_or:
+; GISEL:       // %bb.0: // %entry
+; GISEL-NEXT:    orr w8, w0, #0x1
+; GISEL-NEXT:    orr w9, w1, #0x1
+; GISEL-NEXT:    cmp w2, w3
+; GISEL-NEXT:    neg w8, w8
+; GISEL-NEXT:    mov w10, #20 // =0x14
+; GISEL-NEXT:    mov w11, #10 // =0xa
+; GISEL-NEXT:    ccmp w8, w9, #2, lt
+; GISEL-NEXT:    csel w0, w11, w10, lo
+; GISEL-NEXT:    ret
+entry:
+  %a1 = or i32 %s0a, 1
+  %s1 = or i32 %s1a, 1
+  %s0 = sub i32 0, %a1
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp slt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %r = select i1 %a, i32 10, i32 20
+  ret i32 %r
+}
+
+define i32 @and_ult_ne_s0s1_or(i32 %s0a, i32 %s1a, i32 %s2, i32 %s3) {
+; SDISEL-LABEL: and_ult_ne_s0s1_or:
+; SDISEL:       // %bb.0: // %entry
+; SDISEL-NEXT:    orr w8, w1, #0x1
+; SDISEL-NEXT:    orr w9, w0, #0x1
+; SDISEL-NEXT:    cmp w2, w3
+; SDISEL-NEXT:    ccmn w8, w9, #2, ne
+; SDISEL-NEXT:    mov w8, #20 // =0x14
+; SDISEL-NEXT:    mov w9, #10 // =0xa
+; SDISEL-NEXT:    csel w0, w9, w8, lo
+; SDISEL-NEXT:    ret
+;
+; GISEL-LABEL: and_ult_ne_s0s1_or:
+; GISEL:       // %bb.0: // %entry
+; GISEL-NEXT:    orr w8, w0, #0x1
+; GISEL-NEXT:    orr w9, w1, #0x1
+; GISEL-NEXT:    cmp w2, w3
+; GISEL-NEXT:    neg w8, w8
+; GISEL-NEXT:    mov w10, #20 // =0x14
+; GISEL-NEXT:    mov w11, #10 // =0xa
+; GISEL-NEXT:    ccmp w8, w9, #2, ne
+; GISEL-NEXT:    csel w0, w11, w10, lo
+; GISEL-NEXT:    ret
+entry:
+  %a1 = or i32 %s0a, 1
+  %s1 = or i32 %s1a, 1
+  %s0 = sub i32 0, %a1
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %r = select i1 %a, i32 10, i32 20
+  ret i32 %r
+}
+
+define i32 @and_ult_ult_s0s1_or(i32 %s0a, i32 %s1a, i32 %s2, i32 %s3) {
+; SDISEL-LABEL: and_ult_ult_s0s1_or:
+; SDISEL:       // %bb.0: // %entry
+; SDISEL-NEXT:    orr w8, w1, #0x1
+; SDISEL-NEXT:    orr w9, w0, #0x1
+; SDISEL-NEXT:    cmp w2, w3
+; SDISEL-NEXT:    ccmn w8, w9, #2, lo
+; SDISEL-NEXT:    mov w8, #20 // =0x14
+; SDISEL-NEXT:    mov w9, #10 // =0xa
+; SDISEL-NEXT:    csel w0, w9, w8, lo
+; SDISEL-NEXT:    ret
+;
+; GISEL-LABEL: and_ult_ult_s0s1_or:
+; GISEL:       // %bb.0: // %entry
+; GISEL-NEXT:    orr w8, w0, #0x1
+; GISEL-NEXT:    orr w9, w1, #0x1
+; GISEL-NEXT:    cmp w2, w3
+; GISEL-NEXT:    neg w8, w8
+; GISEL-NEXT:    mov w10, #20 // =0x14
+; GISEL-NEXT:    mov w11, #10 // =0xa
+; GISEL-NEXT:    ccmp w8, w9, #2, lo
+; GISEL-NEXT:    csel w0, w11, w10, lo
+; GISEL-NEXT:    ret
+entry:
+  %a1 = or i32 %s0a, 1
+  %s1 = or i32 %s1a, 1
+  %s0 = sub i32 0, %a1
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp ult i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %r = select i1 %a, i32 10, i32 20
+  ret i32 %r
+}

``````````

</details>


https://github.com/llvm/llvm-project/pull/99884


More information about the llvm-commits mailing list