[llvm] r360881 - [NFC][CodeGen] Add some more tests for pulling binops through shifts
Roman Lebedev via llvm-commits
llvm-commits at lists.llvm.org
Thu May 16 05:26:53 PDT 2019
Author: lebedevri
Date: Thu May 16 05:26:53 2019
New Revision: 360881
URL: http://llvm.org/viewvc/llvm-project?rev=360881&view=rev
Log:
[NFC][CodeGen] Add some more tests for pulling binops through shifts
The ashr variant may see relaxation in https://reviews.llvm.org/D61918
Added:
llvm/trunk/test/CodeGen/AArch64/pull-binop-through-shift.ll
llvm/trunk/test/CodeGen/AArch64/pull-conditional-binop-through-shift.ll
llvm/trunk/test/CodeGen/X86/pull-binop-through-shift.ll
llvm/trunk/test/CodeGen/X86/pull-conditional-binop-through-shift.ll
Added: llvm/trunk/test/CodeGen/AArch64/pull-binop-through-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/pull-binop-through-shift.ll?rev=360881&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/pull-binop-through-shift.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/pull-binop-through-shift.ll Thu May 16 05:26:53 2019
@@ -0,0 +1,310 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+
+; shift left
+
+define i32 @and_signbit_shl(i32 %x, i32* %dst) {
+; CHECK-LABEL: and_signbit_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl w8, w0, #8
+; CHECK-NEXT: and w0, w8, #0xff000000
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_shl(i32 %x, i32* %dst) {
+; CHECK-LABEL: and_nosignbit_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl w8, w0, #8
+; CHECK-NEXT: and w0, w8, #0xff000000
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_shl(i32 %x, i32* %dst) {
+; CHECK-LABEL: or_signbit_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl w8, w0, #8
+; CHECK-NEXT: orr w0, w8, #0xff000000
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_shl(i32 %x, i32* %dst) {
+; CHECK-LABEL: or_nosignbit_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl w8, w0, #8
+; CHECK-NEXT: orr w0, w8, #0xff000000
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_shl(i32 %x, i32* %dst) {
+; CHECK-LABEL: xor_signbit_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl w8, w0, #8
+; CHECK-NEXT: eor w0, w8, #0xff000000
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_shl(i32 %x, i32* %dst) {
+; CHECK-LABEL: xor_nosignbit_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl w8, w0, #8
+; CHECK-NEXT: eor w0, w8, #0xff000000
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_shl(i32 %x, i32* %dst) {
+; CHECK-LABEL: add_signbit_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #-16777216
+; CHECK-NEXT: add w0, w8, w0, lsl #8
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_shl(i32 %x, i32* %dst) {
+; CHECK-LABEL: add_nosignbit_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #-16777216
+; CHECK-NEXT: add w0, w8, w0, lsl #8
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+; logical shift right
+
+define i32 @and_signbit_lshr(i32 %x, i32* %dst) {
+; CHECK-LABEL: and_signbit_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr w8, w0, #8
+; CHECK-NEXT: and w0, w8, #0xffff00
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_lshr(i32 %x, i32* %dst) {
+; CHECK-LABEL: and_nosignbit_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr w8, w0, #8
+; CHECK-NEXT: and w0, w8, #0x7fff00
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_lshr(i32 %x, i32* %dst) {
+; CHECK-LABEL: or_signbit_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr w8, w0, #8
+; CHECK-NEXT: orr w0, w8, #0xffff00
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_lshr(i32 %x, i32* %dst) {
+; CHECK-LABEL: or_nosignbit_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr w8, w0, #8
+; CHECK-NEXT: orr w0, w8, #0x7fff00
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_lshr(i32 %x, i32* %dst) {
+; CHECK-LABEL: xor_signbit_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr w8, w0, #8
+; CHECK-NEXT: eor w0, w8, #0xffff00
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_lshr(i32 %x, i32* %dst) {
+; CHECK-LABEL: xor_nosignbit_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr w8, w0, #8
+; CHECK-NEXT: eor w0, w8, #0x7fff00
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_lshr(i32 %x, i32* %dst) {
+; CHECK-LABEL: add_signbit_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub w8, w0, #16, lsl #12 // =65536
+; CHECK-NEXT: lsr w0, w8, #8
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_lshr(i32 %x, i32* %dst) {
+; CHECK-LABEL: add_nosignbit_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #2147418112
+; CHECK-NEXT: add w8, w0, w8
+; CHECK-NEXT: lsr w0, w8, #8
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+; arithmetic shift right
+
+define i32 @and_signbit_ashr(i32 %x, i32* %dst) {
+; CHECK-LABEL: and_signbit_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: asr w8, w0, #8
+; CHECK-NEXT: and w0, w8, #0xffffff00
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_ashr(i32 %x, i32* %dst) {
+; CHECK-LABEL: and_nosignbit_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr w8, w0, #8
+; CHECK-NEXT: and w0, w8, #0x7fff00
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_ashr(i32 %x, i32* %dst) {
+; CHECK-LABEL: or_signbit_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0xffff0000
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_ashr(i32 %x, i32* %dst) {
+; CHECK-LABEL: or_nosignbit_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: asr w8, w0, #8
+; CHECK-NEXT: orr w0, w8, #0x7fff00
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_ashr(i32 %x, i32* %dst) {
+; CHECK-LABEL: xor_signbit_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: eor w8, w0, #0xffff0000
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_ashr(i32 %x, i32* %dst) {
+; CHECK-LABEL: xor_nosignbit_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: asr w8, w0, #8
+; CHECK-NEXT: eor w0, w8, #0x7fff00
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_ashr(i32 %x, i32* %dst) {
+; CHECK-LABEL: add_signbit_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub w8, w0, #16, lsl #12 // =65536
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_ashr(i32 %x, i32* %dst) {
+; CHECK-LABEL: add_nosignbit_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #2147418112
+; CHECK-NEXT: add w8, w0, w8
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
Added: llvm/trunk/test/CodeGen/AArch64/pull-conditional-binop-through-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/pull-conditional-binop-through-shift.ll?rev=360881&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/pull-conditional-binop-through-shift.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/pull-conditional-binop-through-shift.ll Thu May 16 05:26:53 2019
@@ -0,0 +1,383 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+
+; shift left
+
+define i32 @and_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: and_signbit_select_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, #0xff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: and_nosignbit_select_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, #0xff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: or_signbit_select_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0xff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: or_nosignbit_select_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0xff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: xor_signbit_select_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: eor w8, w0, #0xff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: xor_nosignbit_select_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: eor w8, w0, #0xff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: add_signbit_select_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub w8, w0, #16, lsl #12 // =65536
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: add_nosignbit_select_shl:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #2147418112
+; CHECK-NEXT: add w8, w0, w8
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+; logical shift right
+
+define i32 @and_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: and_signbit_select_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, #0xffff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: and_nosignbit_select_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, #0x7fff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: or_signbit_select_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0xffff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: or_nosignbit_select_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0x7fff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: xor_signbit_select_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: eor w8, w0, #0xffff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: xor_nosignbit_select_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: eor w8, w0, #0x7fff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: add_signbit_select_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub w8, w0, #16, lsl #12 // =65536
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: add_nosignbit_select_lshr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #2147418112
+; CHECK-NEXT: add w8, w0, w8
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: lsr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+; arithmetic shift right
+
+define i32 @and_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: and_signbit_select_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, #0xffff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: and_nosignbit_select_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, #0x7fff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: or_signbit_select_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0xffff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: or_nosignbit_select_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0x7fff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: xor_signbit_select_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: eor w8, w0, #0xffff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: xor_nosignbit_select_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: eor w8, w0, #0x7fff0000
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: add_signbit_select_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub w8, w0, #16, lsl #12 // =65536
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; CHECK-LABEL: add_nosignbit_select_ashr:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #2147418112
+; CHECK-NEXT: add w8, w0, w8
+; CHECK-NEXT: tst w1, #0x1
+; CHECK-NEXT: csel w8, w8, w0, ne
+; CHECK-NEXT: asr w0, w8, #8
+; CHECK-NEXT: str w0, [x2]
+; CHECK-NEXT: ret
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
Added: llvm/trunk/test/CodeGen/X86/pull-binop-through-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pull-binop-through-shift.ll?rev=360881&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pull-binop-through-shift.ll (added)
+++ llvm/trunk/test/CodeGen/X86/pull-binop-through-shift.ll Thu May 16 05:26:53 2019
@@ -0,0 +1,546 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=ALL,X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=ALL,X32
+
+; shift left
+
+define i32 @and_signbit_shl(i32 %x, i32* %dst) {
+; X64-LABEL: and_signbit_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: andl $-16777216, %eax # imm = 0xFF000000
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_signbit_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shll $24, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_shl(i32 %x, i32* %dst) {
+; X64-LABEL: and_nosignbit_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: andl $-16777216, %eax # imm = 0xFF000000
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_nosignbit_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shll $24, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_shl(i32 %x, i32* %dst) {
+; X64-LABEL: or_signbit_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: orl $-16777216, %eax # imm = 0xFF000000
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_signbit_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: orl $-16777216, %eax # imm = 0xFF000000
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_shl(i32 %x, i32* %dst) {
+; X64-LABEL: or_nosignbit_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: orl $-16777216, %eax # imm = 0xFF000000
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_nosignbit_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: orl $-16777216, %eax # imm = 0xFF000000
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_shl(i32 %x, i32* %dst) {
+; X64-LABEL: xor_signbit_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: xorl $-16777216, %eax # imm = 0xFF000000
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_signbit_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $16711680, %eax # imm = 0xFF0000
+; X32-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_shl(i32 %x, i32* %dst) {
+; X64-LABEL: xor_nosignbit_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: xorl $-16777216, %eax # imm = 0xFF000000
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_nosignbit_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $16711680, %eax # imm = 0xFF0000
+; X32-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_shl(i32 %x, i32* %dst) {
+; X64-LABEL: add_signbit_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: addl $-16777216, %eax # imm = 0xFF000000
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_signbit_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: addl $-16777216, %eax # imm = 0xFF000000
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_shl(i32 %x, i32* %dst) {
+; X64-LABEL: add_nosignbit_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: addl $-16777216, %eax # imm = 0xFF000000
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_nosignbit_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: addl $-16777216, %eax # imm = 0xFF000000
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %r = shl i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+; logical shift right
+
+define i32 @and_signbit_lshr(i32 %x, i32* %dst) {
+; X64-LABEL: and_signbit_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: andl $16776960, %eax # imm = 0xFFFF00
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_signbit_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shll $16, %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_lshr(i32 %x, i32* %dst) {
+; X64-LABEL: and_nosignbit_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: andl $8388352, %eax # imm = 0x7FFF00
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_nosignbit_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_lshr(i32 %x, i32* %dst) {
+; X64-LABEL: or_signbit_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: orl $16776960, %eax # imm = 0xFFFF00
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_signbit_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_lshr(i32 %x, i32* %dst) {
+; X64-LABEL: or_nosignbit_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: orl $8388352, %eax # imm = 0x7FFF00
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_nosignbit_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_lshr(i32 %x, i32* %dst) {
+; X64-LABEL: xor_signbit_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: xorl $16776960, %eax # imm = 0xFFFF00
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_signbit_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_lshr(i32 %x, i32* %dst) {
+; X64-LABEL: xor_nosignbit_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: xorl $8388352, %eax # imm = 0x7FFF00
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_nosignbit_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_lshr(i32 %x, i32* %dst) {
+; X64-LABEL: add_signbit_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: addl $-65536, %eax # imm = 0xFFFF0000
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_signbit_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_lshr(i32 %x, i32* %dst) {
+; X64-LABEL: add_nosignbit_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: addl $2147418112, %eax # imm = 0x7FFF0000
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_nosignbit_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %r = lshr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+; arithmetic shift right
+
+define i32 @and_signbit_ashr(i32 %x, i32* %dst) {
+; X64-LABEL: and_signbit_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: andl $-256, %eax
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_signbit_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_ashr(i32 %x, i32* %dst) {
+; X64-LABEL: and_nosignbit_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: andl $8388352, %eax # imm = 0x7FFF00
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_nosignbit_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_ashr(i32 %x, i32* %dst) {
+; X64-LABEL: or_signbit_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $-65536, %eax # imm = 0xFFFF0000
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_signbit_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_ashr(i32 %x, i32* %dst) {
+; X64-LABEL: or_nosignbit_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: orl $8388352, %eax # imm = 0x7FFF00
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_nosignbit_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_ashr(i32 %x, i32* %dst) {
+; X64-LABEL: xor_signbit_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl $-65536, %eax # imm = 0xFFFF0000
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_signbit_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_ashr(i32 %x, i32* %dst) {
+; X64-LABEL: xor_nosignbit_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: xorl $8388352, %eax # imm = 0x7FFF00
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_nosignbit_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_ashr(i32 %x, i32* %dst) {
+; X64-LABEL: add_signbit_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: addl $-65536, %eax # imm = 0xFFFF0000
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_signbit_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_ashr(i32 %x, i32* %dst) {
+; X64-LABEL: add_nosignbit_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: addl $2147418112, %eax # imm = 0x7FFF0000
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_nosignbit_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %r = ashr i32 %t0, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
Added: llvm/trunk/test/CodeGen/X86/pull-conditional-binop-through-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pull-conditional-binop-through-shift.ll?rev=360881&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pull-conditional-binop-through-shift.ll (added)
+++ llvm/trunk/test/CodeGen/X86/pull-conditional-binop-through-shift.ll Thu May 16 05:26:53 2019
@@ -0,0 +1,717 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=ALL,X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=ALL,X32
+
+; shift left
+
+define i32 @and_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: and_signbit_select_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $16711680, %eax # imm = 0xFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_signbit_select_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB0_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: andl $16711680, %eax # imm = 0xFF0000
+; X32-NEXT: .LBB0_2:
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: and_nosignbit_select_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $16711680, %eax # imm = 0xFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_nosignbit_select_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB1_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: andl $16711680, %eax # imm = 0xFF0000
+; X32-NEXT: .LBB1_2:
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: or_signbit_select_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $16711680, %eax # imm = 0xFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_signbit_select_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB2_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: orl $16711680, %eax # imm = 0xFF0000
+; X32-NEXT: .LBB2_2:
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: or_nosignbit_select_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $16711680, %eax # imm = 0xFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_nosignbit_select_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB3_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: orl $16711680, %eax # imm = 0xFF0000
+; X32-NEXT: .LBB3_2:
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: xor_signbit_select_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl $16711680, %eax # imm = 0xFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_signbit_select_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB4_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: xorl $16711680, %eax # imm = 0xFF0000
+; X32-NEXT: .LBB4_2:
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: xor_nosignbit_select_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl $16711680, %eax # imm = 0xFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_nosignbit_select_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB5_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: xorl $16711680, %eax # imm = 0xFF0000
+; X32-NEXT: .LBB5_2:
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: add_signbit_select_shl:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal -65536(%rdi), %eax
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_signbit_select_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB6_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: addl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: .LBB6_2:
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: add_nosignbit_select_shl:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal 2147418112(%rdi), %eax
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shll $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_nosignbit_select_shl:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB7_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: addl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: .LBB7_2:
+; X32-NEXT: shll $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = shl i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+; logical shift right
+
+define i32 @and_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: and_signbit_select_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $-65536, %eax # imm = 0xFFFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_signbit_select_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB8_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: andl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: .LBB8_2:
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: and_nosignbit_select_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $2147418112, %eax # imm = 0x7FFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_nosignbit_select_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB9_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: andl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: .LBB9_2:
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: or_signbit_select_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $-65536, %eax # imm = 0xFFFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_signbit_select_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB10_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: orl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: .LBB10_2:
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: or_nosignbit_select_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $2147418112, %eax # imm = 0x7FFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_nosignbit_select_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB11_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: orl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: .LBB11_2:
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: xor_signbit_select_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl $-65536, %eax # imm = 0xFFFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_signbit_select_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB12_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: xorl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: .LBB12_2:
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: xor_nosignbit_select_lshr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl $2147418112, %eax # imm = 0x7FFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_nosignbit_select_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB13_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: xorl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: .LBB13_2:
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: add_signbit_select_lshr:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal -65536(%rdi), %eax
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_signbit_select_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB14_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: addl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: .LBB14_2:
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: add_nosignbit_select_lshr:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal 2147418112(%rdi), %eax
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_nosignbit_select_lshr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB15_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: addl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: .LBB15_2:
+; X32-NEXT: shrl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = lshr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+; arithmetic shift right
+
+define i32 @and_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: and_signbit_select_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $-65536, %eax # imm = 0xFFFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_signbit_select_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB16_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: andl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: .LBB16_2:
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @and_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: and_nosignbit_select_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $2147418112, %eax # imm = 0x7FFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: and_nosignbit_select_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB17_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: andl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: .LBB17_2:
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @or_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: or_signbit_select_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $-65536, %eax # imm = 0xFFFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_signbit_select_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB18_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: orl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: .LBB18_2:
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @or_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: or_nosignbit_select_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $2147418112, %eax # imm = 0x7FFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: or_nosignbit_select_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB19_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: orl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: .LBB19_2:
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @xor_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: xor_signbit_select_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl $-65536, %eax # imm = 0xFFFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_signbit_select_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB20_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: xorl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: .LBB20_2:
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @xor_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: xor_nosignbit_select_ashr:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl $2147418112, %eax # imm = 0x7FFF0000
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: xor_nosignbit_select_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB21_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: xorl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: .LBB21_2:
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+
+define i32 @add_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: add_signbit_select_ashr:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal -65536(%rdi), %eax
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_signbit_select_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB22_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: addl $-65536, %eax # imm = 0xFFFF0000
+; X32-NEXT: .LBB22_2:
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
+define i32 @add_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+; X64-LABEL: add_nosignbit_select_ashr:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal 2147418112(%rdi), %eax
+; X64-NEXT: testb $1, %sil
+; X64-NEXT: cmovel %edi, %eax
+; X64-NEXT: sarl $8, %eax
+; X64-NEXT: movl %eax, (%rdx)
+; X64-NEXT: retq
+;
+; X32-LABEL: add_nosignbit_select_ashr:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB23_2
+; X32-NEXT: # %bb.1:
+; X32-NEXT: addl $2147418112, %eax # imm = 0x7FFF0000
+; X32-NEXT: .LBB23_2:
+; X32-NEXT: sarl $8, %eax
+; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: retl
+ %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
+ %t1 = select i1 %cond, i32 %t0, i32 %x
+ %r = ashr i32 %t1, 8
+ store i32 %r, i32* %dst
+ ret i32 %r
+}
More information about the llvm-commits
mailing list