[llvm] c40744d - [AArch64] Add some CCMP testing. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 1 10:15:40 PST 2022


Author: David Green
Date: 2022-02-01T18:15:34Z
New Revision: c40744d4d6702a3cee749703ab970aebe9f469dc

URL: https://github.com/llvm/llvm-project/commit/c40744d4d6702a3cee749703ab970aebe9f469dc
DIFF: https://github.com/llvm/llvm-project/commit/c40744d4d6702a3cee749703ab970aebe9f469dc.diff

LOG: [AArch64] Add some CCMP testing. NFC

Added: 
    llvm/test/CodeGen/AArch64/andcompare.ll

Modified: 
    llvm/test/CodeGen/AArch64/arm64-ccmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/andcompare.ll b/llvm/test/CodeGen/AArch64/andcompare.ll
new file mode 100644
index 0000000000000..0820acc597f3e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/andcompare.ll
@@ -0,0 +1,1702 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-none-none-eabi -verify-machineinstrs %s -o - | FileCheck %s
+
+define i32 @and_eq_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_eq_eq:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, eq
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp eq i32 %s0, %s1
+  %c1 = icmp eq i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_eq_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_eq_ne:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp eq i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_eq_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_eq_ult:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp eq i32 %s0, %s1
+  %c1 = icmp ult i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_eq_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_eq_ule:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp eq i32 %s0, %s1
+  %c1 = icmp ule i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_eq_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_eq_ugt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp eq i32 %s0, %s1
+  %c1 = icmp ugt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_eq_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_eq_uge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp eq i32 %s0, %s1
+  %c1 = icmp uge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_eq_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_eq_slt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp eq i32 %s0, %s1
+  %c1 = icmp slt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_eq_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_eq_sle:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, le
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp eq i32 %s0, %s1
+  %c1 = icmp sle i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_eq_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_eq_sgt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp eq i32 %s0, %s1
+  %c1 = icmp sgt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_eq_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_eq_sge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ge
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp eq i32 %s0, %s1
+  %c1 = icmp sge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ne_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ne_eq:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, eq
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ne i32 %s0, %s1
+  %c1 = icmp eq i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ne_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ne_ne:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ne i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ne_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ne_ult:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ne i32 %s0, %s1
+  %c1 = icmp ult i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ne_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ne_ule:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ne i32 %s0, %s1
+  %c1 = icmp ule i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ne_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ne_ugt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ne i32 %s0, %s1
+  %c1 = icmp ugt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ne_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ne_uge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ne i32 %s0, %s1
+  %c1 = icmp uge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ne_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ne_slt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ne i32 %s0, %s1
+  %c1 = icmp slt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ne_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ne_sle:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, le
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ne i32 %s0, %s1
+  %c1 = icmp sle i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ne_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ne_sgt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ne i32 %s0, %s1
+  %c1 = icmp sgt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ne_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ne_sge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ge
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ne i32 %s0, %s1
+  %c1 = icmp sge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ult_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ult_eq:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, eq
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp eq i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ult_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ult_ne:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ult_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ult_ult:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp ult i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ult_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ult_ule:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp ule i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ult_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ult_ugt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp ugt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ult_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ult_uge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp uge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ult_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ult_slt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp slt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ult_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ult_sle:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, le
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp sle i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ult_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ult_sgt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp sgt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ult_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ult_sge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ge
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ult i32 %s0, %s1
+  %c1 = icmp sge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ule_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ule_eq:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ls
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, eq
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ule i32 %s0, %s1
+  %c1 = icmp eq i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ule_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ule_ne:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ls
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ule i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ule_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ule_ult:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ls
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ule i32 %s0, %s1
+  %c1 = icmp ult i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ule_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ule_ule:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ls
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ule i32 %s0, %s1
+  %c1 = icmp ule i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ule_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ule_ugt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ls
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ule i32 %s0, %s1
+  %c1 = icmp ugt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ule_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ule_uge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ls
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ule i32 %s0, %s1
+  %c1 = icmp uge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ule_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ule_slt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ls
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ule i32 %s0, %s1
+  %c1 = icmp slt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ule_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ule_sle:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ls
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, le
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ule i32 %s0, %s1
+  %c1 = icmp sle i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ule_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ule_sgt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ls
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ule i32 %s0, %s1
+  %c1 = icmp sgt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ule_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ule_sge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ls
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ge
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ule i32 %s0, %s1
+  %c1 = icmp sge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ugt_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ugt_eq:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, eq
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ugt i32 %s0, %s1
+  %c1 = icmp eq i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ugt_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ugt_ne:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ugt i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ugt_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ugt_ult:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ugt i32 %s0, %s1
+  %c1 = icmp ult i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ugt_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ugt_ule:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ugt i32 %s0, %s1
+  %c1 = icmp ule i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ugt_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ugt_ugt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ugt i32 %s0, %s1
+  %c1 = icmp ugt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ugt_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ugt_uge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ugt i32 %s0, %s1
+  %c1 = icmp uge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ugt_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ugt_slt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ugt i32 %s0, %s1
+  %c1 = icmp slt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ugt_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ugt_sle:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, le
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ugt i32 %s0, %s1
+  %c1 = icmp sle i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ugt_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ugt_sgt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ugt i32 %s0, %s1
+  %c1 = icmp sgt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_ugt_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_ugt_sge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ge
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp ugt i32 %s0, %s1
+  %c1 = icmp sge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_uge_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_uge_eq:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, eq
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp uge i32 %s0, %s1
+  %c1 = icmp eq i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_uge_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_uge_ne:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp uge i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_uge_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_uge_ult:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp uge i32 %s0, %s1
+  %c1 = icmp ult i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_uge_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_uge_ule:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp uge i32 %s0, %s1
+  %c1 = icmp ule i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_uge_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_uge_ugt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp uge i32 %s0, %s1
+  %c1 = icmp ugt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_uge_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_uge_uge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp uge i32 %s0, %s1
+  %c1 = icmp uge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_uge_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_uge_slt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp uge i32 %s0, %s1
+  %c1 = icmp slt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_uge_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_uge_sle:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, le
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp uge i32 %s0, %s1
+  %c1 = icmp sle i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_uge_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_uge_sgt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp uge i32 %s0, %s1
+  %c1 = icmp sgt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_uge_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_uge_sge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ge
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp uge i32 %s0, %s1
+  %c1 = icmp sge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_slt_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_slt_eq:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, eq
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp slt i32 %s0, %s1
+  %c1 = icmp eq i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_slt_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_slt_ne:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp slt i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_slt_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_slt_ult:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp slt i32 %s0, %s1
+  %c1 = icmp ult i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_slt_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_slt_ule:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp slt i32 %s0, %s1
+  %c1 = icmp ule i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_slt_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_slt_ugt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp slt i32 %s0, %s1
+  %c1 = icmp ugt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_slt_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_slt_uge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp slt i32 %s0, %s1
+  %c1 = icmp uge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_slt_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_slt_slt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp slt i32 %s0, %s1
+  %c1 = icmp slt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_slt_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_slt_sle:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, le
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp slt i32 %s0, %s1
+  %c1 = icmp sle i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_slt_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_slt_sgt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp slt i32 %s0, %s1
+  %c1 = icmp sgt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_slt_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_slt_sge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ge
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp slt i32 %s0, %s1
+  %c1 = icmp sge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sle_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sle_eq:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, le
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, eq
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sle i32 %s0, %s1
+  %c1 = icmp eq i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sle_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sle_ne:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, le
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sle i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sle_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sle_ult:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, le
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sle i32 %s0, %s1
+  %c1 = icmp ult i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sle_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sle_ule:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, le
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sle i32 %s0, %s1
+  %c1 = icmp ule i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sle_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sle_ugt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, le
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sle i32 %s0, %s1
+  %c1 = icmp ugt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sle_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sle_uge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, le
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sle i32 %s0, %s1
+  %c1 = icmp uge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sle_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sle_slt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, le
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sle i32 %s0, %s1
+  %c1 = icmp slt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sle_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sle_sle:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, le
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, le
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sle i32 %s0, %s1
+  %c1 = icmp sle i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sle_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sle_sgt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, le
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sle i32 %s0, %s1
+  %c1 = icmp sgt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sle_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sle_sge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, le
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ge
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sle i32 %s0, %s1
+  %c1 = icmp sge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sgt_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sgt_eq:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, eq
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp eq i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sgt_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sgt_ne:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sgt_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sgt_ult:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp ult i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sgt_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sgt_ule:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp ule i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sgt_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sgt_ugt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp ugt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sgt_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sgt_uge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp uge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sgt_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sgt_slt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp slt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sgt_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sgt_sle:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, le
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp sle i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sgt_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sgt_sgt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp sgt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sgt_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sgt_sge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ge
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp sge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sge_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sge_eq:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ge
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, eq
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sge i32 %s0, %s1
+  %c1 = icmp eq i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sge_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sge_ne:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ge
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sge i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sge_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sge_ult:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ge
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sge i32 %s0, %s1
+  %c1 = icmp ult i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sge_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sge_ule:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ge
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sge i32 %s0, %s1
+  %c1 = icmp ule i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sge_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sge_ugt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ge
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sge i32 %s0, %s1
+  %c1 = icmp ugt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sge_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sge_uge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ge
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sge i32 %s0, %s1
+  %c1 = icmp uge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sge_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sge_slt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ge
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sge i32 %s0, %s1
+  %c1 = icmp slt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sge_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sge_sle:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ge
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, le
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sge i32 %s0, %s1
+  %c1 = icmp sle i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sge_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sge_sgt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ge
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sge i32 %s0, %s1
+  %c1 = icmp sgt i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}
+
+define i32 @and_sge_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
+; CHECK-LABEL: and_sge_sge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, ge
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ge
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %c0 = icmp sge i32 %s0, %s1
+  %c1 = icmp sge i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  ret i32 %z
+}

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
index 256d22d47b18f..280e87b98901b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -1402,4 +1402,126 @@ define i32 @deep_or2(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) {
   ret i32 %sel
 }
 
+; This test is trying to test that multiple ccmp's don't get created in a way
+; that they would have multiple uses. It doesn't seem to.
+define i32 @multiccmp(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %x, i32 %y) #0 {
+; CHECK-LABEL: multiccmp:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
+; CHECK-NEXT:    stp x20, x19, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
+; CHECK-NEXT:    mov x19, x5
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w20, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w21, ne
+; CHECK-NEXT:    tst w20, w21
+; CHECK-NEXT:    csel w0, w5, w4, ne
+; CHECK-NEXT:    bl _callee
+; CHECK-NEXT:    tst w20, w21
+; CHECK-NEXT:    csel w0, w0, w19, ne
+; CHECK-NEXT:    bl _callee
+; CHECK-NEXT:    ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: multiccmp:
+; GISEL:       ; %bb.0: ; %entry
+; GISEL-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
+; GISEL-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; GISEL-NEXT:    mov x19, x5
+; GISEL-NEXT:    cmp w0, w1
+; GISEL-NEXT:    cset w8, gt
+; GISEL-NEXT:    cmp w2, w3
+; GISEL-NEXT:    cset w9, ne
+; GISEL-NEXT:    and w20, w8, w9
+; GISEL-NEXT:    tst w20, #0x1
+; GISEL-NEXT:    csel w0, w5, w4, ne
+; GISEL-NEXT:    bl _callee
+; GISEL-NEXT:    tst w20, #0x1
+; GISEL-NEXT:    csel w0, w0, w19, ne
+; GISEL-NEXT:    bl _callee
+; GISEL-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
+; GISEL-NEXT:    ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
+; GISEL-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %s = select i1 %a, i32 %y, i32 %x
+  %o = call i32 @callee(i32 %s)
+  %z1 = select i1 %a, i32 %o, i32 %y
+  %p = call i32 @callee(i32 %z1)
+  ret i32 %p
+}
+
+define i32 @multiccmp2(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %x, i32 %y) #0 {
+; CHECK-LABEL: multiccmp2:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
+; CHECK-NEXT:    stp x20, x19, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
+; CHECK-NEXT:    mov x19, x5
+; CHECK-NEXT:    mov x20, x3
+; CHECK-NEXT:    mov x21, x0
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w22, ne
+; CHECK-NEXT:    tst w8, w22
+; CHECK-NEXT:    csel w0, w5, w4, ne
+; CHECK-NEXT:    bl _callee
+; CHECK-NEXT:    cmp w21, w20
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    tst w22, w8
+; CHECK-NEXT:    csel w0, w0, w19, ne
+; CHECK-NEXT:    bl _callee
+; CHECK-NEXT:    ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: multiccmp2:
+; GISEL:       ; %bb.0: ; %entry
+; GISEL-NEXT:    stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
+; GISEL-NEXT:    stp x20, x19, [sp, #16] ; 16-byte Folded Spill
+; GISEL-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
+; GISEL-NEXT:    mov x19, x0
+; GISEL-NEXT:    mov x20, x3
+; GISEL-NEXT:    mov x21, x5
+; GISEL-NEXT:    cmp w0, w1
+; GISEL-NEXT:    cset w8, gt
+; GISEL-NEXT:    cmp w2, w3
+; GISEL-NEXT:    cset w22, ne
+; GISEL-NEXT:    and w8, w8, w22
+; GISEL-NEXT:    tst w8, #0x1
+; GISEL-NEXT:    csel w0, w5, w4, ne
+; GISEL-NEXT:    bl _callee
+; GISEL-NEXT:    cmp w19, w20
+; GISEL-NEXT:    cset w8, eq
+; GISEL-NEXT:    and w8, w22, w8
+; GISEL-NEXT:    tst w8, #0x1
+; GISEL-NEXT:    csel w0, w0, w21, ne
+; GISEL-NEXT:    bl _callee
+; GISEL-NEXT:    ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
+; GISEL-NEXT:    ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
+; GISEL-NEXT:    ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
+; GISEL-NEXT:    ret
+entry:
+  %c0 = icmp sgt i32 %s0, %s1
+  %c1 = icmp ne i32 %s2, %s3
+  %a = and i1 %c0, %c1
+  %z = zext i1 %a to i32
+  %s = select i1 %a, i32 %y, i32 %x
+  %o = call i32 @callee(i32 %s)
+
+  %c2 = icmp eq i32 %s0, %s3
+  %a1 = and i1 %c1, %c2
+  %z1 = select i1 %a1, i32 %o, i32 %y
+  %p = call i32 @callee(i32 %z1)
+  ret i32 %p
+}
+declare i32 @callee(i32)
+
 attributes #0 = { nounwind }


        


More information about the llvm-commits mailing list