[llvm] r321349 - [DAGCombine] Revert r321259
Sam Parker via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 22 00:36:25 PST 2017
Author: sam_parker
Date: Fri Dec 22 00:36:25 2017
New Revision: 321349
URL: http://llvm.org/viewvc/llvm-project?rev=321349&view=rev
Log:
[DAGCombine] Revert r321259
Improve ReduceLoadWidth for SRL Patch is causing an issue on the
PPC64 BE santizer.
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/trunk/test/CodeGen/ARM/shift-combine.ll
llvm/trunk/test/CodeGen/X86/h-registers-1.ll
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=321349&r1=321348&r2=321349&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Fri Dec 22 00:36:25 2017
@@ -3788,16 +3788,6 @@ bool DAGCombiner::isLegalNarrowLoad(Load
if (LoadN->getNumValues() > 2)
return false;
- // Only allow byte offsets.
- if (ShAmt % 8)
- return false;
-
- // Ensure that this isn't going to produce an unsupported unaligned access.
- if (ShAmt && !TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
- ExtVT, LoadN->getAddressSpace(),
- ShAmt / 8))
- return false;
-
// If the load that we're shrinking is an extload and we're not just
// discarding the extension we can't simply shrink the load. Bail.
// TODO: It would be possible to merge the extensions in some cases.
@@ -8271,22 +8261,6 @@ SDValue DAGCombiner::ReduceLoadWidth(SDN
// then the result of the shift+trunc is zero/undef (handled elsewhere).
if (ShAmt >= cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits())
return SDValue();
-
- // If the SRL is only used by a masking AND, we may be able to adjust
- // the ExtVT to make the AND redundant.
- SDNode *Mask = *(N->use_begin());
- if (Mask->getOpcode() == ISD::AND &&
- isa<ConstantSDNode>(Mask->getOperand(1))) {
- const APInt &ShiftMask =
- cast<ConstantSDNode>(Mask->getOperand(1))->getAPIntValue();
- if (ShiftMask.isMask()) {
- EVT MaskedVT = EVT::getIntegerVT(*DAG.getContext(),
- ShiftMask.countTrailingOnes());
- // Recompute the type.
- if (TLI.isLoadExtLegal(ExtType, N0.getValueType(), MaskedVT))
- ExtVT = MaskedVT;
- }
- }
}
}
Modified: llvm/trunk/test/CodeGen/ARM/shift-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/shift-combine.ll?rev=321349&r1=321348&r2=321349&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/shift-combine.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/shift-combine.ll Fri Dec 22 00:36:25 2017
@@ -217,127 +217,29 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: test_shift7_mask8
-; CHECK-BE: ldr r1, [r0]
-; CHECK-COMMON: ldr r1, [r0]
-; CHECK-COMMON: ubfx r1, r1, #7, #8
-; CHECK-COMMON: str r1, [r0]
-define arm_aapcscc void @test_shift7_mask8(i32* nocapture %p) {
-entry:
- %0 = load i32, i32* %p, align 4
- %shl = lshr i32 %0, 7
- %and = and i32 %shl, 255
- store i32 %and, i32* %p, align 4
- ret void
-}
-
; CHECK-LABEL: test_shift8_mask8
-; CHECK-BE: ldrb r1, [r0, #2]
-; CHECK-COMMON: ldrb r1, [r0, #1]
-; CHECK-COMMON: str r1, [r0]
-define arm_aapcscc void @test_shift8_mask8(i32* nocapture %p) {
-entry:
- %0 = load i32, i32* %p, align 4
- %shl = lshr i32 %0, 8
- %and = and i32 %shl, 255
- store i32 %and, i32* %p, align 4
- ret void
-}
-
-; CHECK-LABEL: test_shift8_mask7
; CHECK-BE: ldr r1, [r0]
; CHECK-COMMON: ldr r1, [r0]
-; CHECK-COMMON: ubfx r1, r1, #8, #7
+; CHECK-COMMON: ubfx r1, r1, #8, #8
; CHECK-COMMON: str r1, [r0]
-define arm_aapcscc void @test_shift8_mask7(i32* nocapture %p) {
+define arm_aapcscc void @test_shift8_mask8(i32* nocapture %p) {
entry:
%0 = load i32, i32* %p, align 4
%shl = lshr i32 %0, 8
- %and = and i32 %shl, 127
- store i32 %and, i32* %p, align 4
- ret void
-}
-
-; CHECK-LABEL: test_shift9_mask8
-; CHECK-BE: ldr r1, [r0]
-; CHECK-COMMON: ldr r1, [r0]
-; CHECK-COMMON: ubfx r1, r1, #9, #8
-; CHECK-COMMON: str r1, [r0]
-define arm_aapcscc void @test_shift9_mask8(i32* nocapture %p) {
-entry:
- %0 = load i32, i32* %p, align 4
- %shl = lshr i32 %0, 9
%and = and i32 %shl, 255
store i32 %and, i32* %p, align 4
ret void
}
; CHECK-LABEL: test_shift8_mask16
-; CHECK-ALIGN: ldr r1, [r0]
-; CHECK-ALIGN: ubfx r1, r1, #8, #16
-; CHECK-BE: ldrh r1, [r0, #1]
-; CHECK-ARM: ldrh r1, [r0, #1]
-; CHECK-THUMB: ldrh.w r1, [r0, #1]
-; CHECK-COMMON: str r1, [r0]
-define arm_aapcscc void @test_shift8_mask16(i32* nocapture %p) {
-entry:
- %0 = load i32, i32* %p, align 4
- %shl = lshr i32 %0, 8
- %and = and i32 %shl, 65535
- store i32 %and, i32* %p, align 4
- ret void
-}
-
-; CHECK-LABEL: test_shift15_mask16
-; CHECK-COMMON: ldr r1, [r0]
-; CHECK-COMMON: ubfx r1, r1, #15, #16
-; CHECK-COMMON: str r1, [r0]
-define arm_aapcscc void @test_shift15_mask16(i32* nocapture %p) {
-entry:
- %0 = load i32, i32* %p, align 4
- %shl = lshr i32 %0, 15
- %and = and i32 %shl, 65535
- store i32 %and, i32* %p, align 4
- ret void
-}
-
-; CHECK-LABEL: test_shift16_mask15
-; CHECK-BE: ldrh r1, [r0]
-; CHECK-COMMON: ldrh r1, [r0, #2]
-; CHECK-COMMON: bfc r1, #15, #17
-; CHECK-COMMON: str r1, [r0]
-define arm_aapcscc void @test_shift16_mask15(i32* nocapture %p) {
-entry:
- %0 = load i32, i32* %p, align 4
- %shl = lshr i32 %0, 16
- %and = and i32 %shl, 32767
- store i32 %and, i32* %p, align 4
- ret void
-}
-
-; CHECK-LABEL: test_shift8_mask24
; CHECK-BE: ldr r1, [r0]
; CHECK-COMMON: ldr r1, [r0]
-; CHECK-ARM: lsr r1, r1, #8
-; CHECK-THUMB: lsrs r1, r1, #8
+; CHECK-COMMON: ubfx r1, r1, #8, #16
; CHECK-COMMON: str r1, [r0]
-define arm_aapcscc void @test_shift8_mask24(i32* nocapture %p) {
+define arm_aapcscc void @test_shift8_mask16(i32* nocapture %p) {
entry:
%0 = load i32, i32* %p, align 4
%shl = lshr i32 %0, 8
- %and = and i32 %shl, 16777215
- store i32 %and, i32* %p, align 4
- ret void
-}
-
-; CHECK-LABEL: test_shift24_mask16
-; CHECK-BE: ldrb r1, [r0]
-; CHECK-COMMON: ldrb r1, [r0, #3]
-; CHECK-COMMON: str r1, [r0]
-define arm_aapcscc void @test_shift24_mask16(i32* nocapture %p) {
-entry:
- %0 = load i32, i32* %p, align 4
- %shl = lshr i32 %0, 24
%and = and i32 %shl, 65535
store i32 %and, i32* %p, align 4
ret void
Modified: llvm/trunk/test/CodeGen/X86/h-registers-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/h-registers-1.ll?rev=321349&r1=321348&r2=321349&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/h-registers-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/h-registers-1.ll Fri Dec 22 00:36:25 2017
@@ -22,18 +22,21 @@ define i64 @foo(i64 %a, i64 %b, i64 %c,
; CHECK-NEXT: movzbl %ah, %eax # NOREX
; CHECK-NEXT: movq %rax, %r10
; CHECK-NEXT: movzbl %dh, %edx # NOREX
-; CHECK-NEXT: movzbl %ch, %ebp # NOREX
+; CHECK-NEXT: movzbl %ch, %eax # NOREX
+; CHECK-NEXT: movq %rax, %r11
; CHECK-NEXT: movq %r8, %rax
; CHECK-NEXT: movzbl %ah, %ecx # NOREX
; CHECK-NEXT: movq %r9, %rax
-; CHECK-NEXT: movzbl %ah, %ebx # NOREX
-; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %edi
+; CHECK-NEXT: movzbl %ah, %ebp # NOREX
+; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: movzbl %ah, %eax # NOREX
+; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ebx
+; CHECK-NEXT: movzbl %bh, %edi # NOREX
; CHECK-NEXT: movq %r10, %r8
; CHECK-NEXT: addq %r8, %rsi
-; CHECK-NEXT: addq %rbp, %rdx
+; CHECK-NEXT: addq %r11, %rdx
; CHECK-NEXT: addq %rsi, %rdx
-; CHECK-NEXT: addq %rbx, %rcx
+; CHECK-NEXT: addq %rbp, %rcx
; CHECK-NEXT: addq %rdi, %rax
; CHECK-NEXT: addq %rcx, %rax
; CHECK-NEXT: addq %rdx, %rax
@@ -55,18 +58,21 @@ define i64 @foo(i64 %a, i64 %b, i64 %c,
; GNUX32-NEXT: movzbl %ah, %eax # NOREX
; GNUX32-NEXT: movq %rax, %r10
; GNUX32-NEXT: movzbl %dh, %edx # NOREX
-; GNUX32-NEXT: movzbl %ch, %ebp # NOREX
+; GNUX32-NEXT: movzbl %ch, %eax # NOREX
+; GNUX32-NEXT: movq %rax, %r11
; GNUX32-NEXT: movq %r8, %rax
; GNUX32-NEXT: movzbl %ah, %ecx # NOREX
; GNUX32-NEXT: movq %r9, %rax
-; GNUX32-NEXT: movzbl %ah, %ebx # NOREX
-; GNUX32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; GNUX32-NEXT: movzbl {{[0-9]+}}(%esp), %edi
+; GNUX32-NEXT: movzbl %ah, %ebp # NOREX
+; GNUX32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GNUX32-NEXT: movzbl %ah, %eax # NOREX
+; GNUX32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; GNUX32-NEXT: movzbl %bh, %edi # NOREX
; GNUX32-NEXT: movq %r10, %r8
; GNUX32-NEXT: addq %r8, %rsi
-; GNUX32-NEXT: addq %rbp, %rdx
+; GNUX32-NEXT: addq %r11, %rdx
; GNUX32-NEXT: addq %rsi, %rdx
-; GNUX32-NEXT: addq %rbx, %rcx
+; GNUX32-NEXT: addq %rbp, %rcx
; GNUX32-NEXT: addq %rdi, %rax
; GNUX32-NEXT: addq %rcx, %rax
; GNUX32-NEXT: addq %rdx, %rax
More information about the llvm-commits
mailing list