[llvm] edcc68e - [X86] Make sure SF is updated when optimizing for `jg/jge/jl/jle`
Phoebe Wang via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 20 18:09:35 PDT 2022
Author: Phoebe Wang
Date: 2022-06-21T09:09:27+08:00
New Revision: edcc68e86f784fa6e1514f230a3c89a275a66bb6
URL: https://github.com/llvm/llvm-project/commit/edcc68e86f784fa6e1514f230a3c89a275a66bb6
DIFF: https://github.com/llvm/llvm-project/commit/edcc68e86f784fa6e1514f230a3c89a275a66bb6.diff
LOG: [X86] Make sure SF is updated when optimizing for `jg/jge/jl/jle`
This fixes issue #56103.
Reviewed By: mingmingl
Differential Revision: https://reviews.llvm.org/D128122
Added:
llvm/test/CodeGen/X86/pr56103.ll
Modified:
llvm/lib/Target/X86/X86InstrInfo.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index f2101ea4c0522..ec32ac2acad1a 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -4462,6 +4462,11 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
return false;
case X86::COND_G: case X86::COND_GE:
case X86::COND_L: case X86::COND_LE:
+ // If SF is used, but the instruction doesn't update the SF, then we
+ // can't do the optimization.
+ if (NoSignFlag)
+ return false;
+ LLVM_FALLTHROUGH;
case X86::COND_O: case X86::COND_NO:
// If OF is used, the instruction needs to clear it like CmpZero does.
if (!ClearsOverflowFlag)
diff --git a/llvm/test/CodeGen/X86/pr56103.ll b/llvm/test/CodeGen/X86/pr56103.ll
new file mode 100644
index 0000000000000..3d979a021dbfd
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr56103.ll
@@ -0,0 +1,65 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-linux-generic | FileCheck %s
+
+ at e = global i16 0, align 2
+ at a = global i32 0, align 4
+ at c = global i32 0, align 4
+ at b = global i64 0, align 8
+
+; Check the test instruction won't be optimizated by peephole opt.
+
+define dso_local i32 @main() nounwind {
+; CHECK-LABEL: main:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: movq e at GOTPCREL(%rip), %rax
+; CHECK-NEXT: movw $1, (%rax)
+; CHECK-NEXT: movq b at GOTPCREL(%rip), %rax
+; CHECK-NEXT: movq $1, (%rax)
+; CHECK-NEXT: movq a at GOTPCREL(%rip), %rax
+; CHECK-NEXT: movl (%rax), %ecx
+; CHECK-NEXT: movl $-2, %eax
+; CHECK-NEXT: sarl %cl, %eax
+; CHECK-NEXT: movq c at GOTPCREL(%rip), %rdx
+; CHECK-NEXT: movl (%rdx), %edx
+; CHECK-NEXT: decl %edx
+; CHECK-NEXT: movzwl %ax, %eax
+; CHECK-NEXT: decl %eax
+; CHECK-NEXT: xorl %edx, %eax
+; CHECK-NEXT: notl %ecx
+; CHECK-NEXT: andl %eax, %ecx
+; CHECK-NEXT: testq %rcx, %rcx
+; CHECK-NEXT: jle .LBB0_2
+; CHECK-NEXT: # %bb.1: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_2: # %if.then
+; CHECK-NEXT: callq abort at PLT
+entry:
+ store i16 1, ptr @e, align 2
+ store i64 1, ptr @b, align 8
+ %0 = load i32, ptr @a, align 4
+ %shr = ashr i32 -2, %0
+ %1 = load i32, ptr @c, align 4
+ %sub = add i32 %1, -1
+ %conv2 = zext i32 %sub to i64
+ %2 = and i32 %shr, 65535
+ %conv3 = zext i32 %2 to i64
+ %sub4 = add nsw i64 %conv3, -1
+ %xor = xor i64 %sub4, %conv2
+ %neg5 = xor i32 %0, -1
+ %conv6 = sext i32 %neg5 to i64
+ %and = and i64 %xor, %conv6
+ %cmp = icmp slt i64 %and, 1
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @abort() #2
+ unreachable
+
+if.end: ; preds = %entry
+ ret i32 0
+}
+
+declare void @abort()
More information about the llvm-commits
mailing list