[llvm] r319542 - [ARM] and + load combine tests

Sam Parker via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 1 05:42:39 PST 2017


Author: sam_parker
Date: Fri Dec  1 05:42:39 2017
New Revision: 319542

URL: http://llvm.org/viewvc/llvm-project?rev=319542&view=rev
Log:
[ARM] and + load combine tests

Adding autogenerated tests for narrow load combines.

Differential Revision: https://reviews.llvm.org/D40709

Added:
    llvm/trunk/test/CodeGen/ARM/and-load-combine.ll

Added: llvm/trunk/test/CodeGen/ARM/and-load-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/and-load-combine.ll?rev=319542&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/and-load-combine.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/and-load-combine.ll Fri Dec  1 05:42:39 2017
@@ -0,0 +1,674 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=armv7 %s -o - | FileCheck %s --check-prefix=ARM
+; RUN: llc -mtriple=armv7eb %s -o - | FileCheck %s --check-prefix=ARMEB
+; RUN: llc -mtriple=armv6m %s -o - | FileCheck %s --check-prefix=THUMB1
+; RUN: llc -mtriple=thumbv8m.main %s -o - | FileCheck %s --check-prefix=THUMB2
+
+define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a,
+; ARM-LABEL: cmp_xor8_short_short:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldrh r0, [r0]
+; ARM-NEXT:    ldrh r1, [r1]
+; ARM-NEXT:    eor r1, r1, r0
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, #255
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_xor8_short_short:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldrh r0, [r0]
+; ARMEB-NEXT:    ldrh r1, [r1]
+; ARMEB-NEXT:    eor r1, r1, r0
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, #255
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_xor8_short_short:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldrh r0, [r0]
+; THUMB1-NEXT:    ldrh r2, [r1]
+; THUMB1-NEXT:    eors r2, r0
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #24
+; THUMB1-NEXT:    beq .LBB0_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB0_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_xor8_short_short:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldrh r0, [r0]
+; THUMB2-NEXT:    ldrh r1, [r1]
+; THUMB2-NEXT:    eors r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #24
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                                    i16* nocapture readonly %b) {
+entry:
+  %0 = load i16, i16* %a, align 2
+  %1 = load i16, i16* %b, align 2
+  %xor2 = xor i16 %1, %0
+  %2 = and i16 %xor2, 255
+  %cmp = icmp eq i16 %2, 0
+  ret i1 %cmp
+}
+
+define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a,
+; ARM-LABEL: cmp_xor8_short_int:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldrh r0, [r0]
+; ARM-NEXT:    ldr r1, [r1]
+; ARM-NEXT:    eor r1, r1, r0
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, #255
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_xor8_short_int:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldrh r0, [r0]
+; ARMEB-NEXT:    ldr r1, [r1]
+; ARMEB-NEXT:    eor r1, r1, r0
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, #255
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_xor8_short_int:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldrh r0, [r0]
+; THUMB1-NEXT:    ldr r2, [r1]
+; THUMB1-NEXT:    eors r2, r0
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #24
+; THUMB1-NEXT:    beq .LBB1_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB1_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_xor8_short_int:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldrh r0, [r0]
+; THUMB2-NEXT:    ldr r1, [r1]
+; THUMB2-NEXT:    eors r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #24
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                                  i32* nocapture readonly %b) {
+entry:
+  %0 = load i16, i16* %a, align 2
+  %conv = zext i16 %0 to i32
+  %1 = load i32, i32* %b, align 4
+  %xor = xor i32 %1, %conv
+  %and = and i32 %xor, 255
+  %cmp = icmp eq i32 %and, 0
+  ret i1 %cmp
+}
+
+define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a,
+; ARM-LABEL: cmp_xor8_int_int:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldr r0, [r0]
+; ARM-NEXT:    ldr r1, [r1]
+; ARM-NEXT:    eor r1, r1, r0
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, #255
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_xor8_int_int:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldr r0, [r0]
+; ARMEB-NEXT:    ldr r1, [r1]
+; ARMEB-NEXT:    eor r1, r1, r0
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, #255
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_xor8_int_int:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldr r0, [r0]
+; THUMB1-NEXT:    ldr r2, [r1]
+; THUMB1-NEXT:    eors r2, r0
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #24
+; THUMB1-NEXT:    beq .LBB2_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB2_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_xor8_int_int:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldr r0, [r0]
+; THUMB2-NEXT:    ldr r1, [r1]
+; THUMB2-NEXT:    eors r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #24
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                                i32* nocapture readonly %b) {
+entry:
+  %0 = load i32, i32* %a, align 4
+  %1 = load i32, i32* %b, align 4
+  %xor = xor i32 %1, %0
+  %and = and i32 %xor, 255
+  %cmp = icmp eq i32 %and, 0
+  ret i1 %cmp
+}
+
+define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a,
+; ARM-LABEL: cmp_xor16:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldr r0, [r0]
+; ARM-NEXT:    movw r2, #65535
+; ARM-NEXT:    ldr r1, [r1]
+; ARM-NEXT:    eor r1, r1, r0
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, r2
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_xor16:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldr r0, [r0]
+; ARMEB-NEXT:    movw r2, #65535
+; ARMEB-NEXT:    ldr r1, [r1]
+; ARMEB-NEXT:    eor r1, r1, r0
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, r2
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_xor16:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldr r0, [r0]
+; THUMB1-NEXT:    ldr r2, [r1]
+; THUMB1-NEXT:    eors r2, r0
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #16
+; THUMB1-NEXT:    beq .LBB3_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB3_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_xor16:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldr r0, [r0]
+; THUMB2-NEXT:    ldr r1, [r1]
+; THUMB2-NEXT:    eors r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #16
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                         i32* nocapture readonly %b) {
+entry:
+  %0 = load i32, i32* %a, align 4
+  %1 = load i32, i32* %b, align 4
+  %xor = xor i32 %1, %0
+  %and = and i32 %xor, 65535
+  %cmp = icmp eq i32 %and, 0
+  ret i1 %cmp
+}
+
+define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a,
+; ARM-LABEL: cmp_or8_short_short:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldrh r0, [r0]
+; ARM-NEXT:    ldrh r1, [r1]
+; ARM-NEXT:    orr r1, r1, r0
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, #255
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_or8_short_short:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldrh r0, [r0]
+; ARMEB-NEXT:    ldrh r1, [r1]
+; ARMEB-NEXT:    orr r1, r1, r0
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, #255
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_or8_short_short:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldrh r0, [r0]
+; THUMB1-NEXT:    ldrh r2, [r1]
+; THUMB1-NEXT:    orrs r2, r0
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #24
+; THUMB1-NEXT:    beq .LBB4_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB4_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_or8_short_short:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldrh r0, [r0]
+; THUMB2-NEXT:    ldrh r1, [r1]
+; THUMB2-NEXT:    orrs r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #24
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                                   i16* nocapture readonly %b) {
+entry:
+  %0 = load i16, i16* %a, align 2
+  %1 = load i16, i16* %b, align 2
+  %or2 = or i16 %1, %0
+  %2 = and i16 %or2, 255
+  %cmp = icmp eq i16 %2, 0
+  ret i1 %cmp
+}
+
+define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a,
+; ARM-LABEL: cmp_or8_short_int:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldrh r0, [r0]
+; ARM-NEXT:    ldr r1, [r1]
+; ARM-NEXT:    orr r1, r1, r0
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, #255
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_or8_short_int:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldrh r0, [r0]
+; ARMEB-NEXT:    ldr r1, [r1]
+; ARMEB-NEXT:    orr r1, r1, r0
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, #255
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_or8_short_int:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldrh r0, [r0]
+; THUMB1-NEXT:    ldr r2, [r1]
+; THUMB1-NEXT:    orrs r2, r0
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #24
+; THUMB1-NEXT:    beq .LBB5_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB5_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_or8_short_int:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldrh r0, [r0]
+; THUMB2-NEXT:    ldr r1, [r1]
+; THUMB2-NEXT:    orrs r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #24
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                                 i32* nocapture readonly %b) {
+entry:
+  %0 = load i16, i16* %a, align 2
+  %conv = zext i16 %0 to i32
+  %1 = load i32, i32* %b, align 4
+  %or = or i32 %1, %conv
+  %and = and i32 %or, 255
+  %cmp = icmp eq i32 %and, 0
+  ret i1 %cmp
+}
+
+define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a,
+; ARM-LABEL: cmp_or8_int_int:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldr r0, [r0]
+; ARM-NEXT:    ldr r1, [r1]
+; ARM-NEXT:    orr r1, r1, r0
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, #255
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_or8_int_int:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldr r0, [r0]
+; ARMEB-NEXT:    ldr r1, [r1]
+; ARMEB-NEXT:    orr r1, r1, r0
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, #255
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_or8_int_int:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldr r0, [r0]
+; THUMB1-NEXT:    ldr r2, [r1]
+; THUMB1-NEXT:    orrs r2, r0
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #24
+; THUMB1-NEXT:    beq .LBB6_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB6_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_or8_int_int:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldr r0, [r0]
+; THUMB2-NEXT:    ldr r1, [r1]
+; THUMB2-NEXT:    orrs r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #24
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                               i32* nocapture readonly %b) {
+entry:
+  %0 = load i32, i32* %a, align 4
+  %1 = load i32, i32* %b, align 4
+  %or = or i32 %1, %0
+  %and = and i32 %or, 255
+  %cmp = icmp eq i32 %and, 0
+  ret i1 %cmp
+}
+
+define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a,
+; ARM-LABEL: cmp_or16:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldr r0, [r0]
+; ARM-NEXT:    movw r2, #65535
+; ARM-NEXT:    ldr r1, [r1]
+; ARM-NEXT:    orr r1, r1, r0
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, r2
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_or16:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldr r0, [r0]
+; ARMEB-NEXT:    movw r2, #65535
+; ARMEB-NEXT:    ldr r1, [r1]
+; ARMEB-NEXT:    orr r1, r1, r0
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, r2
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_or16:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldr r0, [r0]
+; THUMB1-NEXT:    ldr r2, [r1]
+; THUMB1-NEXT:    orrs r2, r0
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #16
+; THUMB1-NEXT:    beq .LBB7_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB7_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_or16:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldr r0, [r0]
+; THUMB2-NEXT:    ldr r1, [r1]
+; THUMB2-NEXT:    orrs r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #16
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                        i32* nocapture readonly %b) {
+entry:
+  %0 = load i32, i32* %a, align 4
+  %1 = load i32, i32* %b, align 4
+  %or = or i32 %1, %0
+  %and = and i32 %or, 65535
+  %cmp = icmp eq i32 %and, 0
+  ret i1 %cmp
+}
+
+define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a,
+; ARM-LABEL: cmp_and8_short_short:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldrh r1, [r1]
+; ARM-NEXT:    ldrh r0, [r0]
+; ARM-NEXT:    and r1, r0, r1
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, #255
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_and8_short_short:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldrh r1, [r1]
+; ARMEB-NEXT:    ldrh r0, [r0]
+; ARMEB-NEXT:    and r1, r0, r1
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, #255
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_and8_short_short:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldrh r1, [r1]
+; THUMB1-NEXT:    ldrh r2, [r0]
+; THUMB1-NEXT:    ands r2, r1
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #24
+; THUMB1-NEXT:    beq .LBB8_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB8_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_and8_short_short:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldrh r1, [r1]
+; THUMB2-NEXT:    ldrh r0, [r0]
+; THUMB2-NEXT:    ands r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #24
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                                    i16* nocapture readonly %b) {
+entry:
+  %0 = load i16, i16* %a, align 2
+  %1 = load i16, i16* %b, align 2
+  %and3 = and i16 %0, 255
+  %2 = and i16 %and3, %1
+  %cmp = icmp eq i16 %2, 0
+  ret i1 %cmp
+}
+
+define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a,
+; ARM-LABEL: cmp_and8_short_int:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldrh r0, [r0]
+; ARM-NEXT:    ldr r1, [r1]
+; ARM-NEXT:    and r1, r1, r0
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, #255
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_and8_short_int:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldrh r0, [r0]
+; ARMEB-NEXT:    ldr r1, [r1]
+; ARMEB-NEXT:    and r1, r1, r0
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, #255
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_and8_short_int:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldrh r0, [r0]
+; THUMB1-NEXT:    ldr r2, [r1]
+; THUMB1-NEXT:    ands r2, r0
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #24
+; THUMB1-NEXT:    beq .LBB9_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB9_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_and8_short_int:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldrh r0, [r0]
+; THUMB2-NEXT:    ldr r1, [r1]
+; THUMB2-NEXT:    ands r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #24
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                                  i32* nocapture readonly %b) {
+entry:
+  %0 = load i16, i16* %a, align 2
+  %1 = load i32, i32* %b, align 4
+  %2 = and i16 %0, 255
+  %and = zext i16 %2 to i32
+  %and1 = and i32 %1, %and
+  %cmp = icmp eq i32 %and1, 0
+  ret i1 %cmp
+}
+
+define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a,
+; ARM-LABEL: cmp_and8_int_int:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldr r1, [r1]
+; ARM-NEXT:    ldr r0, [r0]
+; ARM-NEXT:    and r1, r0, r1
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, #255
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_and8_int_int:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldr r1, [r1]
+; ARMEB-NEXT:    ldr r0, [r0]
+; ARMEB-NEXT:    and r1, r0, r1
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, #255
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_and8_int_int:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldr r1, [r1]
+; THUMB1-NEXT:    ldr r2, [r0]
+; THUMB1-NEXT:    ands r2, r1
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #24
+; THUMB1-NEXT:    beq .LBB10_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB10_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_and8_int_int:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldr r1, [r1]
+; THUMB2-NEXT:    ldr r0, [r0]
+; THUMB2-NEXT:    ands r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #24
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                                i32* nocapture readonly %b) {
+entry:
+  %0 = load i32, i32* %a, align 4
+  %1 = load i32, i32* %b, align 4
+  %and = and i32 %0, 255
+  %and1 = and i32 %and, %1
+  %cmp = icmp eq i32 %and1, 0
+  ret i1 %cmp
+}
+
+define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a,
+; ARM-LABEL: cmp_and16:
+; ARM:       @ BB#0: @ %entry
+; ARM-NEXT:    ldr r1, [r1]
+; ARM-NEXT:    movw r2, #65535
+; ARM-NEXT:    ldr r0, [r0]
+; ARM-NEXT:    and r1, r0, r1
+; ARM-NEXT:    mov r0, #0
+; ARM-NEXT:    tst r1, r2
+; ARM-NEXT:    movweq r0, #1
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: cmp_and16:
+; ARMEB:       @ BB#0: @ %entry
+; ARMEB-NEXT:    ldr r1, [r1]
+; ARMEB-NEXT:    movw r2, #65535
+; ARMEB-NEXT:    ldr r0, [r0]
+; ARMEB-NEXT:    and r1, r0, r1
+; ARMEB-NEXT:    mov r0, #0
+; ARMEB-NEXT:    tst r1, r2
+; ARMEB-NEXT:    movweq r0, #1
+; ARMEB-NEXT:    bx lr
+;
+; THUMB1-LABEL: cmp_and16:
+; THUMB1:       @ BB#0: @ %entry
+; THUMB1-NEXT:    ldr r1, [r1]
+; THUMB1-NEXT:    ldr r2, [r0]
+; THUMB1-NEXT:    ands r2, r1
+; THUMB1-NEXT:    movs r0, #1
+; THUMB1-NEXT:    movs r1, #0
+; THUMB1-NEXT:    lsls r2, r2, #16
+; THUMB1-NEXT:    beq .LBB11_2
+; THUMB1-NEXT:  @ BB#1: @ %entry
+; THUMB1-NEXT:    mov r0, r1
+; THUMB1-NEXT:  .LBB11_2: @ %entry
+; THUMB1-NEXT:    bx lr
+;
+; THUMB2-LABEL: cmp_and16:
+; THUMB2:       @ BB#0: @ %entry
+; THUMB2-NEXT:    ldr r1, [r1]
+; THUMB2-NEXT:    ldr r0, [r0]
+; THUMB2-NEXT:    ands r0, r1
+; THUMB2-NEXT:    lsls r0, r0, #16
+; THUMB2-NEXT:    mov.w r0, #0
+; THUMB2-NEXT:    it eq
+; THUMB2-NEXT:    moveq r0, #1
+; THUMB2-NEXT:    bx lr
+                                         i32* nocapture readonly %b) {
+entry:
+  %0 = load i32, i32* %a, align 4
+  %1 = load i32, i32* %b, align 4
+  %and = and i32 %0, 65535
+  %and1 = and i32 %and, %1
+  %cmp = icmp eq i32 %and1, 0
+  ret i1 %cmp
+}




More information about the llvm-commits mailing list