[llvm] [AArch64] Use isKnownNonZero to optimize eligible compares to cmn (PR #96349)
via llvm-commits
llvm-commits at lists.llvm.org
Sat Jul 6 15:01:01 PDT 2024
https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/96349
>From 301907ee70d371ab83d91a8b0dfaaaf0ca91b7f5 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Fri, 21 Jun 2024 15:12:41 -0400
Subject: [PATCH 1/2] Pre-commit tests (NFC)
---
llvm/test/CodeGen/AArch64/cmp-chains.ll | 32 ++++++++++++++++++++
llvm/test/CodeGen/AArch64/cmp-select-sign.ll | 15 +++++++++
2 files changed, 47 insertions(+)
diff --git a/llvm/test/CodeGen/AArch64/cmp-chains.ll b/llvm/test/CodeGen/AArch64/cmp-chains.ll
index 14cb0c82b1c03..d51c9c946f467 100644
--- a/llvm/test/CodeGen/AArch64/cmp-chains.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-chains.ll
@@ -258,3 +258,35 @@ define i32 @neg_range_int(i32 %a, i32 %b, i32 %c) {
ret i32 %retval.0
}
+; (b > -3 || a < -(c | 1))
+define i32 @neg_range_int_cmn(i32 %a, i32 %b, i32 %c) {
+; SDISEL-LABEL: neg_range_int_cmn:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: orr w8, w2, #0x1
+; SDISEL-NEXT: neg w8, w8
+; SDISEL-NEXT: cmp w8, w0
+; SDISEL-NEXT: ccmn w1, #3, #0, le
+; SDISEL-NEXT: csel w0, w1, w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: neg_range_int_cmn:
+; GISEL: // %bb.0:
+; GISEL-NEXT: orr w8, w2, #0x1
+; GISEL-NEXT: cmn w1, #3
+; GISEL-NEXT: neg w8, w8
+; GISEL-NEXT: cset w9, gt
+; GISEL-NEXT: cmp w8, w0
+; GISEL-NEXT: cset w8, gt
+; GISEL-NEXT: orr w8, w9, w8
+; GISEL-NEXT: and w8, w8, #0x1
+; GISEL-NEXT: tst w8, #0x1
+; GISEL-NEXT: csel w0, w1, w0, ne
+; GISEL-NEXT: ret
+ %or = or i32 %c, 1
+ %sub = sub nsw i32 0, %or
+ %cmp = icmp sgt i32 %b, -3
+ %cmp1 = icmp sgt i32 %sub, %a
+ %1 = select i1 %cmp, i1 true, i1 %cmp1
+ %ret = select i1 %1, i32 %b, i32 %a
+ ret i32 %ret
+}
diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
index 09a6e26fe5a40..ca20a7a435a64 100644
--- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
@@ -262,4 +262,19 @@ define <4 x i65> @sign_4xi65(<4 x i65> %a) {
ret <4 x i65> %res
}
+define i32 @or_neg(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0x1
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %3 = or i32 %x, 1
+ %4 = sub nsw i32 0, %3
+ %5 = icmp sgt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
declare void @use_4xi1(<4 x i1>)
>From 8a14ebd8465bd19fc279015880bd4b216ce6fdb8 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Fri, 21 Jun 2024 15:26:02 -0400
Subject: [PATCH 2/2] [AArch64] Use isKnownNonZero to optimize to cmn instead
of cmp
Turning a cmp into cmn saves an extra mov and negate instruction, so take that into account when choosing when to flip the compare operands.
Also do not consider right-hand operands whose absolute value can be encoded into a cmn.
---
.../Target/AArch64/AArch64ISelLowering.cpp | 59 +++-
llvm/test/CodeGen/AArch64/addsub-shifted.ll | 206 +++++++++---
llvm/test/CodeGen/AArch64/cmp-chains.ll | 3 +-
llvm/test/CodeGen/AArch64/cmp-select-sign.ll | 5 +-
llvm/test/CodeGen/AArch64/cmp-to-cmn.ll | 313 ++++++++++--------
.../CodeGen/AArch64/typepromotion-overflow.ll | 16 +-
6 files changed, 388 insertions(+), 214 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 341cf51173ccc..1a73739300dca 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3391,6 +3391,11 @@ static bool isLegalArithImmed(uint64_t C) {
return IsLegal;
}
+static bool cannotBeIntMin(SDValue CheckedVal, SelectionDAG &DAG) {
+ KnownBits KnownSrc = DAG.computeKnownBits(CheckedVal);
+ return !KnownSrc.getSignedMinValue().isMinSignedValue();
+}
+
// Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on
// the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags
// can be set differently by this operation. It comes down to whether
@@ -3398,12 +3403,15 @@ static bool isLegalArithImmed(uint64_t C) {
// everything is fine. If not then the optimization is wrong. Thus general
// comparisons are only valid if op2 != 0.
//
-// So, finally, the only LLVM-native comparisons that don't mention C and V
-// are SETEQ and SETNE. They're the only ones we can safely use CMN for in
-// the absence of information about op2.
-static bool isCMN(SDValue Op, ISD::CondCode CC) {
+// So, finally, the only LLVM-native comparisons that don't mention C or V
+// are the ones that aren't unsigned comparisons. They're the only ones we can
+// safely use CMN for in the absence of information about op2.
+static bool isCMN(SDValue Op, SDValue CheckedVal, ISD::CondCode CC,
+ SelectionDAG &DAG) {
return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) &&
- (CC == ISD::SETEQ || CC == ISD::SETNE);
+ (isIntEqualitySetCC(CC) ||
+ (isUnsignedIntSetCC(CC) ? DAG.isKnownNeverZero(CheckedVal)
+ : cannotBeIntMin(CheckedVal, DAG)));
}
static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
@@ -3448,15 +3456,26 @@ static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// register to WZR/XZR if it ends up being unused.
unsigned Opcode = AArch64ISD::SUBS;
- if (isCMN(RHS, CC)) {
+ if (RHS.getOpcode() == ISD::SUB && isCMN(RHS, RHS.getOperand(1), CC, DAG)) {
// Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ?
Opcode = AArch64ISD::ADDS;
RHS = RHS.getOperand(1);
- } else if (isCMN(LHS, CC)) {
+ } else if (isCMN(LHS, RHS, CC, DAG) &&
+ isCMN(LHS, LHS.getOperand(1), CC, DAG)) {
// As we are looking for EQ/NE compares, the operands can be commuted ; can
// we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
+ // Not swapping operands, but negation requires inversion
+ CC = ISD::getSetCCSwappedOperands(CC);
Opcode = AArch64ISD::ADDS;
LHS = LHS.getOperand(1);
+ } else if (LHS.getOpcode() == ISD::SUB &&
+ isCMN(LHS, LHS.getOperand(1), CC, DAG)) {
+ // As we are looking for EQ/NE compares, the operands can be commuted ; can
+ // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
+ LHS = LHS.getOperand(1);
+ std::swap(LHS, RHS);
+ CC = ISD::getSetCCSwappedOperands(CC);
+ Opcode = AArch64ISD::ADDS;
} else if (isNullConstant(RHS) && !isUnsignedIntSetCC(CC)) {
if (LHS.getOpcode() == ISD::AND) {
// Similarly, (CMP (and X, Y), 0) can be implemented with a TST
@@ -3556,11 +3575,18 @@ static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
}
} else if (RHS.getOpcode() == ISD::SUB) {
SDValue SubOp0 = RHS.getOperand(0);
- if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
- // See emitComparison() on why we can only do this for SETEQ and SETNE.
+ if (isCMN(SubOp0, RHS.getOperand(1), CC, DAG)) {
Opcode = AArch64ISD::CCMN;
RHS = RHS.getOperand(1);
}
+ } else if (LHS.getOpcode() == ISD::SUB) {
+ SDValue SubOp0 = LHS.getOperand(0);
+ if (isCMN(SubOp0, LHS.getOperand(1), CC, DAG)) {
+ LHS = LHS.getOperand(1);
+ std::swap(LHS, RHS);
+ CC = ISD::getSetCCSwappedOperands(CC);
+ Opcode = AArch64ISD::CCMN;
+ }
}
if (Opcode == 0)
Opcode = AArch64ISD::CCMP;
@@ -3876,10 +3902,17 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// cmp w13, w12
// can be turned into:
// cmp w12, w11, lsl #1
- if (!isa<ConstantSDNode>(RHS) || !isLegalArithImmed(RHS->getAsZExtVal())) {
- SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS;
-
- if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) {
+ if (!isa<ConstantSDNode>(RHS) ||
+ !isLegalArithImmed(RHS->getAsAPIntVal().abs().getZExtValue())) {
+ bool LHSIsCMN =
+ LHS.getOpcode() == ISD::SUB && isCMN(LHS, LHS.getOperand(1), CC, DAG);
+ bool RHSIsCMN =
+ RHS.getOpcode() == ISD::SUB && isCMN(RHS, RHS.getOperand(1), CC, DAG);
+ SDValue TheLHS = LHSIsCMN ? LHS.getOperand(1) : LHS;
+ SDValue TheRHS = RHSIsCMN ? RHS.getOperand(1) : RHS;
+
+ if (getCmpOperandFoldingProfit(TheLHS) + (LHSIsCMN ? 1 : 0) >
+ getCmpOperandFoldingProfit(TheRHS) + (RHSIsCMN ? 1 : 0)) {
std::swap(LHS, RHS);
CC = ISD::getSetCCSwappedOperands(CC);
}
diff --git a/llvm/test/CodeGen/AArch64/addsub-shifted.ll b/llvm/test/CodeGen/AArch64/addsub-shifted.ll
index 2580d3532ba0d..177a379081f34 100644
--- a/llvm/test/CodeGen/AArch64/addsub-shifted.ll
+++ b/llvm/test/CodeGen/AArch64/addsub-shifted.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -debugify-and-strip-all-safe -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
; RUN: llc -debugify-and-strip-all-safe -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 -global-isel -pass-remarks-missed=gisel* 2>&1 | FileCheck %s --check-prefixes=CHECK,FALLBACK
@@ -8,233 +9,319 @@
define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
; CHECK-LABEL: test_lsl_arith:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: Lloh0:
+; CHECK-NEXT: adrp x8, _var32 at PAGE
+; CHECK-NEXT: ldr w9, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: add w9, w0, w9, lsl #18
+; CHECK-NEXT: str w9, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: ldr w9, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: add w9, w0, w9, lsl #31
+; CHECK-NEXT: str w9, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: ldr w9, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: sub w9, w0, w9, lsl #5
+; CHECK-NEXT: str w9, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: ldr w9, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: lsl w9, w9, #19
+; CHECK-NEXT: sub w9, w9, w0
+; CHECK-NEXT: str w9, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: ldr w9, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: neg w9, w9, lsl #15
+; CHECK-NEXT: str w9, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: Lloh1:
+; CHECK-NEXT: adrp x8, _var64 at PAGE
+; CHECK-NEXT: ldr x9, [x8, _var64 at PAGEOFF]
+; CHECK-NEXT: add x9, x2, x9, lsl #18
+; CHECK-NEXT: str x9, [x8, _var64 at PAGEOFF]
+; CHECK-NEXT: ldr x9, [x8, _var64 at PAGEOFF]
+; CHECK-NEXT: add x9, x2, x9, lsl #31
+; CHECK-NEXT: str x9, [x8, _var64 at PAGEOFF]
+; CHECK-NEXT: ldr x9, [x8, _var64 at PAGEOFF]
+; CHECK-NEXT: sub x9, x2, x9, lsl #5
+; CHECK-NEXT: str x9, [x8, _var64 at PAGEOFF]
+; CHECK-NEXT: ldr x9, [x8, _var64 at PAGEOFF]
+; CHECK-NEXT: lsl x9, x9, #19
+; CHECK-NEXT: sub x9, x9, x2
+; CHECK-NEXT: str x9, [x8, _var64 at PAGEOFF]
+; CHECK-NEXT: ldr x9, [x8, _var64 at PAGEOFF]
+; CHECK-NEXT: neg x9, x9, lsl #60
+; CHECK-NEXT: str x9, [x8, _var64 at PAGEOFF]
+; CHECK-NEXT: ret
+; CHECK-NEXT: .loh AdrpAdrp Lloh0, Lloh1
%rhs1 = load volatile i32, ptr @var32
%shift1 = shl i32 %rhs1, 18
%val1 = add i32 %lhs32, %shift1
store volatile i32 %val1, ptr @var32
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
%rhs2 = load volatile i32, ptr @var32
%shift2 = shl i32 %rhs2, 31
%val2 = add i32 %shift2, %lhs32
store volatile i32 %val2, ptr @var32
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
%rhs3 = load volatile i32, ptr @var32
%shift3 = shl i32 %rhs3, 5
%val3 = sub i32 %lhs32, %shift3
store volatile i32 %val3, ptr @var32
-; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
; Subtraction is not commutative!
%rhs4 = load volatile i32, ptr @var32
%shift4 = shl i32 %rhs4, 19
%val4 = sub i32 %shift4, %lhs32
store volatile i32 %val4, ptr @var32
-; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
%lhs4a = load volatile i32, ptr @var32
%shift4a = shl i32 %lhs4a, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, ptr @var32
-; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
%rhs5 = load volatile i64, ptr @var64
%shift5 = shl i64 %rhs5, 18
%val5 = add i64 %lhs64, %shift5
store volatile i64 %val5, ptr @var64
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
%rhs6 = load volatile i64, ptr @var64
%shift6 = shl i64 %rhs6, 31
%val6 = add i64 %shift6, %lhs64
store volatile i64 %val6, ptr @var64
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
%rhs7 = load volatile i64, ptr @var64
%shift7 = shl i64 %rhs7, 5
%val7 = sub i64 %lhs64, %shift7
store volatile i64 %val7, ptr @var64
-; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
; Subtraction is not commutative!
%rhs8 = load volatile i64, ptr @var64
%shift8 = shl i64 %rhs8, 19
%val8 = sub i64 %shift8, %lhs64
store volatile i64 %val8, ptr @var64
-; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
%lhs8a = load volatile i64, ptr @var64
%shift8a = shl i64 %lhs8a, 60
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, ptr @var64
-; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
ret void
-; CHECK: ret
}
define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
-; CHECK-LABEL: test_lsr_arith:
+; FALLBACK-LABEL: test_lsr_arith:
+; FALLBACK: ; %bb.0:
+; FALLBACK-NEXT: add w8, w0, w1, lsr #18
+; FALLBACK-NEXT: Lloh2:
+; FALLBACK-NEXT: adrp x9, _var32 at PAGE
+; FALLBACK-NEXT: add w10, w0, w1, lsr #31
+; FALLBACK-NEXT: lsr w11, w1, #19
+; FALLBACK-NEXT: str w8, [x9, _var32 at PAGEOFF]
+; FALLBACK-NEXT: sub w8, w0, w1, lsr #5
+; FALLBACK-NEXT: str w10, [x9, _var32 at PAGEOFF]
+; FALLBACK-NEXT: neg w10, w0, lsr #15
+; FALLBACK-NEXT: str w8, [x9, _var32 at PAGEOFF]
+; FALLBACK-NEXT: sub w8, w11, w0
+; FALLBACK-NEXT: lsr x11, x3, #19
+; FALLBACK-NEXT: str w8, [x9, _var32 at PAGEOFF]
+; FALLBACK-NEXT: add x8, x2, x3, lsr #18
+; FALLBACK-NEXT: str w10, [x9, _var32 at PAGEOFF]
+; FALLBACK-NEXT: Lloh3:
+; FALLBACK-NEXT: adrp x9, _var64 at PAGE
+; FALLBACK-NEXT: add x10, x2, x3, lsr #31
+; FALLBACK-NEXT: str x8, [x9, _var64 at PAGEOFF]
+; FALLBACK-NEXT: sub x8, x2, x3, lsr #5
+; FALLBACK-NEXT: str x10, [x9, _var64 at PAGEOFF]
+; FALLBACK-NEXT: neg x10, x2, lsr #45
+; FALLBACK-NEXT: str x8, [x9, _var64 at PAGEOFF]
+; FALLBACK-NEXT: sub x8, x11, x2
+; FALLBACK-NEXT: str x8, [x9, _var64 at PAGEOFF]
+; FALLBACK-NEXT: str x10, [x9, _var64 at PAGEOFF]
+; FALLBACK-NEXT: ret
+; FALLBACK-NEXT: .loh AdrpAdrp Lloh2, Lloh3
%shift1 = lshr i32 %rhs32, 18
%val1 = add i32 %lhs32, %shift1
store volatile i32 %val1, ptr @var32
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
%shift2 = lshr i32 %rhs32, 31
%val2 = add i32 %shift2, %lhs32
store volatile i32 %val2, ptr @var32
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
%shift3 = lshr i32 %rhs32, 5
%val3 = sub i32 %lhs32, %shift3
store volatile i32 %val3, ptr @var32
-; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
; Subtraction is not commutative!
%shift4 = lshr i32 %rhs32, 19
%val4 = sub i32 %shift4, %lhs32
store volatile i32 %val4, ptr @var32
-; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
%shift4a = lshr i32 %lhs32, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, ptr @var32
-; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
%shift5 = lshr i64 %rhs64, 18
%val5 = add i64 %lhs64, %shift5
store volatile i64 %val5, ptr @var64
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
%shift6 = lshr i64 %rhs64, 31
%val6 = add i64 %shift6, %lhs64
store volatile i64 %val6, ptr @var64
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
%shift7 = lshr i64 %rhs64, 5
%val7 = sub i64 %lhs64, %shift7
store volatile i64 %val7, ptr @var64
-; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
; Subtraction is not commutative!
%shift8 = lshr i64 %rhs64, 19
%val8 = sub i64 %shift8, %lhs64
store volatile i64 %val8, ptr @var64
-; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
%shift8a = lshr i64 %lhs64, 45
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, ptr @var64
-; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
ret void
-; CHECK: ret
}
define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
-; CHECK-LABEL: test_asr_arith:
+; FALLBACK-LABEL: test_asr_arith:
+; FALLBACK: ; %bb.0:
+; FALLBACK-NEXT: add w8, w0, w1, asr #18
+; FALLBACK-NEXT: Lloh4:
+; FALLBACK-NEXT: adrp x9, _var32 at PAGE
+; FALLBACK-NEXT: add w10, w0, w1, asr #31
+; FALLBACK-NEXT: asr w11, w1, #19
+; FALLBACK-NEXT: str w8, [x9, _var32 at PAGEOFF]
+; FALLBACK-NEXT: sub w8, w0, w1, asr #5
+; FALLBACK-NEXT: str w10, [x9, _var32 at PAGEOFF]
+; FALLBACK-NEXT: neg w10, w0, asr #15
+; FALLBACK-NEXT: str w8, [x9, _var32 at PAGEOFF]
+; FALLBACK-NEXT: sub w8, w11, w0
+; FALLBACK-NEXT: asr x11, x3, #19
+; FALLBACK-NEXT: str w8, [x9, _var32 at PAGEOFF]
+; FALLBACK-NEXT: add x8, x2, x3, asr #18
+; FALLBACK-NEXT: str w10, [x9, _var32 at PAGEOFF]
+; FALLBACK-NEXT: Lloh5:
+; FALLBACK-NEXT: adrp x9, _var64 at PAGE
+; FALLBACK-NEXT: add x10, x2, x3, asr #31
+; FALLBACK-NEXT: str x8, [x9, _var64 at PAGEOFF]
+; FALLBACK-NEXT: sub x8, x2, x3, asr #5
+; FALLBACK-NEXT: str x10, [x9, _var64 at PAGEOFF]
+; FALLBACK-NEXT: neg x10, x2, asr #45
+; FALLBACK-NEXT: str x8, [x9, _var64 at PAGEOFF]
+; FALLBACK-NEXT: sub x8, x11, x2
+; FALLBACK-NEXT: str x8, [x9, _var64 at PAGEOFF]
+; FALLBACK-NEXT: str x10, [x9, _var64 at PAGEOFF]
+; FALLBACK-NEXT: ret
+; FALLBACK-NEXT: .loh AdrpAdrp Lloh4, Lloh5
%shift1 = ashr i32 %rhs32, 18
%val1 = add i32 %lhs32, %shift1
store volatile i32 %val1, ptr @var32
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
%shift2 = ashr i32 %rhs32, 31
%val2 = add i32 %shift2, %lhs32
store volatile i32 %val2, ptr @var32
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
%shift3 = ashr i32 %rhs32, 5
%val3 = sub i32 %lhs32, %shift3
store volatile i32 %val3, ptr @var32
-; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
; Subtraction is not commutative!
%shift4 = ashr i32 %rhs32, 19
%val4 = sub i32 %shift4, %lhs32
store volatile i32 %val4, ptr @var32
-; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
%shift4a = ashr i32 %lhs32, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, ptr @var32
-; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
%shift5 = ashr i64 %rhs64, 18
%val5 = add i64 %lhs64, %shift5
store volatile i64 %val5, ptr @var64
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
%shift6 = ashr i64 %rhs64, 31
%val6 = add i64 %shift6, %lhs64
store volatile i64 %val6, ptr @var64
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
%shift7 = ashr i64 %rhs64, 5
%val7 = sub i64 %lhs64, %shift7
store volatile i64 %val7, ptr @var64
-; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
; Subtraction is not commutative!
%shift8 = ashr i64 %rhs64, 19
%val8 = sub i64 %shift8, %lhs64
store volatile i64 %val8, ptr @var64
-; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
%shift8a = ashr i64 %lhs64, 45
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, ptr @var64
-; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
ret void
-; CHECK: ret
}
define void @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64, i32 %v) {
; CHECK-LABEL: test_cmp:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: cmp w0, w1, lsl #13
+; CHECK-NEXT: b.lo LBB3_7
+; CHECK-NEXT: ; %bb.1: ; %t2
+; CHECK-NEXT: adrp x8, _var32 at PAGE
+; CHECK-NEXT: cmp w0, w1, lsr #20
+; CHECK-NEXT: str w4, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: b.eq LBB3_7
+; CHECK-NEXT: ; %bb.2: ; %t3
+; CHECK-NEXT: cmp w0, w1, asr #9
+; CHECK-NEXT: str w4, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: b.eq LBB3_7
+; CHECK-NEXT: ; %bb.3: ; %t4
+; CHECK-NEXT: cmp x2, x3, lsl #43
+; CHECK-NEXT: str w4, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: b.lo LBB3_7
+; CHECK-NEXT: ; %bb.4: ; %t5
+; CHECK-NEXT: cmp x2, x3, lsr #20
+; CHECK-NEXT: str w4, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: b.eq LBB3_7
+; CHECK-NEXT: ; %bb.5: ; %t6
+; CHECK-NEXT: cmp x2, x3, asr #59
+; CHECK-NEXT: str w4, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: b.eq LBB3_7
+; CHECK-NEXT: ; %bb.6: ; %t7
+; CHECK-NEXT: str w4, [x8, _var32 at PAGEOFF]
+; CHECK-NEXT: LBB3_7: ; %end
+; CHECK-NEXT: ret
%shift1 = shl i32 %rhs32, 13
%tst1 = icmp uge i32 %lhs32, %shift1
br i1 %tst1, label %t2, label %end
-; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13
t2:
store volatile i32 %v, ptr @var32
%shift2 = lshr i32 %rhs32, 20
%tst2 = icmp ne i32 %lhs32, %shift2
br i1 %tst2, label %t3, label %end
-; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
t3:
store volatile i32 %v, ptr @var32
%shift3 = ashr i32 %rhs32, 9
%tst3 = icmp ne i32 %lhs32, %shift3
br i1 %tst3, label %t4, label %end
-; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9
t4:
store volatile i32 %v, ptr @var32
%shift4 = shl i64 %rhs64, 43
%tst4 = icmp uge i64 %lhs64, %shift4
br i1 %tst4, label %t5, label %end
-; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43
t5:
store volatile i32 %v, ptr @var32
%shift5 = lshr i64 %rhs64, 20
%tst5 = icmp ne i64 %lhs64, %shift5
br i1 %tst5, label %t6, label %end
-; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
t6:
store volatile i32 %v, ptr @var32
%shift6 = ashr i64 %rhs64, 59
%tst6 = icmp ne i64 %lhs64, %shift6
br i1 %tst6, label %t7, label %end
-; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59
t7:
store volatile i32 %v, ptr @var32
@@ -242,11 +329,35 @@ t7:
end:
ret void
-; CHECK: ret
}
define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
-; CHECK-LABEL: test_cmn:
+; FALLBACK-LABEL: test_cmn:
+; FALLBACK: ; %bb.0:
+; FALLBACK-NEXT: neg w8, w1, lsl #13
+; FALLBACK-NEXT: cmp w0, w8
+; FALLBACK-NEXT: b.lo LBB4_6
+; FALLBACK-NEXT: ; %bb.1: ; %t2
+; FALLBACK-NEXT: cmn w0, w1, lsr #20
+; FALLBACK-NEXT: b.eq LBB4_6
+; FALLBACK-NEXT: ; %bb.2: ; %t3
+; FALLBACK-NEXT: cmn w0, w1, asr #9
+; FALLBACK-NEXT: b.ne LBB4_6
+; FALLBACK-NEXT: ; %bb.3: ; %t4
+; FALLBACK-NEXT: neg x8, x3, lsl #43
+; FALLBACK-NEXT: cmp x2, x8
+; FALLBACK-NEXT: b.ge LBB4_6
+; FALLBACK-NEXT: ; %bb.4: ; %t5
+; FALLBACK-NEXT: cmn x2, x3, lsr #20
+; FALLBACK-NEXT: b.eq LBB4_6
+; FALLBACK-NEXT: ; %bb.5: ; %t6
+; FALLBACK-NEXT: cmn x2, x3, asr #59
+; FALLBACK-NEXT: mov w0, #1 ; =0x1
+; FALLBACK-NEXT: b.ne LBB4_7
+; FALLBACK-NEXT: LBB4_6: ; %end
+; FALLBACK-NEXT: mov w0, wzr
+; FALLBACK-NEXT: LBB4_7: ; %common.ret
+; FALLBACK-NEXT: ret
%shift1 = shl i32 %rhs32, 13
%val1 = sub i32 0, %shift1
@@ -254,22 +365,18 @@ define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
br i1 %tst1, label %t2, label %end
; Important that this isn't lowered to a cmn instruction because if %rhs32 ==
; 0 then the results will differ.
-; CHECK: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
-; CHECK: cmp {{w[0-9]+}}, [[RHS]]
t2:
%shift2 = lshr i32 %rhs32, 20
%val2 = sub i32 0, %shift2
%tst2 = icmp ne i32 %lhs32, %val2
br i1 %tst2, label %t3, label %end
-; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
t3:
%shift3 = ashr i32 %rhs32, 9
%val3 = sub i32 0, %shift3
%tst3 = icmp eq i32 %lhs32, %val3
br i1 %tst3, label %t4, label %end
-; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, asr #9
t4:
%shift4 = shl i64 %rhs64, 43
@@ -277,27 +384,22 @@ t4:
%tst4 = icmp slt i64 %lhs64, %val4
br i1 %tst4, label %t5, label %end
; Again, it's important that cmn isn't used here in case %rhs64 == 0.
-; CHECK: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
-; CHECK: cmp {{x[0-9]+}}, [[RHS]]
t5:
%shift5 = lshr i64 %rhs64, 20
%val5 = sub i64 0, %shift5
%tst5 = icmp ne i64 %lhs64, %val5
br i1 %tst5, label %t6, label %end
-; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
t6:
%shift6 = ashr i64 %rhs64, 59
%val6 = sub i64 0, %shift6
%tst6 = icmp ne i64 %lhs64, %val6
br i1 %tst6, label %t7, label %end
-; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, asr #59
t7:
ret i32 1
end:
ret i32 0
-; CHECK: ret
}
diff --git a/llvm/test/CodeGen/AArch64/cmp-chains.ll b/llvm/test/CodeGen/AArch64/cmp-chains.ll
index d51c9c946f467..4ea515911b0c5 100644
--- a/llvm/test/CodeGen/AArch64/cmp-chains.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-chains.ll
@@ -263,8 +263,7 @@ define i32 @neg_range_int_cmn(i32 %a, i32 %b, i32 %c) {
; SDISEL-LABEL: neg_range_int_cmn:
; SDISEL: // %bb.0:
; SDISEL-NEXT: orr w8, w2, #0x1
-; SDISEL-NEXT: neg w8, w8
-; SDISEL-NEXT: cmp w8, w0
+; SDISEL-NEXT: cmn w0, w8
; SDISEL-NEXT: ccmn w1, #3, #0, le
; SDISEL-NEXT: csel w0, w1, w0, gt
; SDISEL-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
index ca20a7a435a64..37821d2993eb9 100644
--- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
@@ -266,9 +266,8 @@ define i32 @or_neg(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg:
; CHECK: // %bb.0:
; CHECK-NEXT: orr w8, w0, #0x1
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, lt
; CHECK-NEXT: ret
%3 = or i32 %x, 1
%4 = sub nsw i32 0, %3
diff --git a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
index 6da98142573f6..1cc194e77b94b 100644
--- a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
@@ -1,12 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "arm64"
define i1 @test_EQ_IllEbT(i64 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IllEbT
-; CHECK: cmn x1, x0
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IllEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn x0, x1
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%add = sub i64 0, %b
%cmp = icmp eq i64 %add, %a
@@ -14,10 +16,11 @@ entry:
}
define i1 @test_EQ_IliEbT(i64 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IliEbT
-; CHECK: cmn x0, w1, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IliEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn x0, w1, sxtw
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = sext i32 %b to i64
%add = sub i64 0, %a
@@ -26,10 +29,12 @@ entry:
}
define i1 @test_EQ_IlsEbT(i64 %a, i16 %b) {
-; CHECK-LABEL: test_EQ_IlsEbT
-; CHECK: cmn x0, w1, sxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IlsEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT: cmn x0, w1, sxth
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %b to i64
%add = sub i64 0, %a
@@ -38,10 +43,12 @@ entry:
}
define i1 @test_EQ_IlcEbT(i64 %a, i8 %b) {
-; CHECK-LABEL: test_EQ_IlcEbT
-; CHECK: cmn x0, w1, uxtb
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IlcEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT: cmn x0, w1, uxtb
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %b to i64
%add = sub i64 0, %a
@@ -50,10 +57,11 @@ entry:
}
define i1 @test_EQ_IilEbT(i32 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IilEbT
-; CHECK: cmn x1, w0, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IilEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn x1, w0, sxtw
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = sext i32 %a to i64
%add = sub i64 0, %b
@@ -62,10 +70,11 @@ entry:
}
define i1 @test_EQ_IiiEbT(i32 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IiiEbT
-; CHECK: cmn w1, w0
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IiiEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn w0, w1
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%add = sub i32 0, %b
%cmp = icmp eq i32 %add, %a
@@ -73,10 +82,11 @@ entry:
}
define i1 @test_EQ_IisEbT(i32 %a, i16 %b) {
-; CHECK-LABEL: test_EQ_IisEbT
-; CHECK: cmn w0, w1, sxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IisEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn w0, w1, sxth
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %b to i32
%add = sub i32 0, %a
@@ -85,10 +95,11 @@ entry:
}
define i1 @test_EQ_IicEbT(i32 %a, i8 %b) {
-; CHECK-LABEL: test_EQ_IicEbT
-; CHECK: cmn w0, w1, uxtb
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IicEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn w0, w1, uxtb
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %b to i32
%add = sub i32 0, %a
@@ -97,10 +108,12 @@ entry:
}
define i1 @test_EQ_IslEbT(i16 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IslEbT
-; CHECK: cmn x1, w0, sxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IslEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT: cmn x1, w0, sxth
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %a to i64
%add = sub i64 0, %b
@@ -109,10 +122,11 @@ entry:
}
define i1 @test_EQ_IsiEbT(i16 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IsiEbT
-; CHECK: cmn w1, w0, sxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IsiEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn w1, w0, sxth
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %a to i32
%add = sub i32 0, %b
@@ -121,11 +135,12 @@ entry:
}
define i1 @test_EQ_IssEbT(i16 %a, i16 %b) {
-; CHECK-LABEL: test_EQ_IssEbT
-; CHECK: sxth w8, w1
-; CHECK-NEXT: cmn w8, w0, sxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IssEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sxth w8, w0
+; CHECK-NEXT: cmn w8, w1, sxth
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %a to i32
%conv1 = sext i16 %b to i32
@@ -135,11 +150,12 @@ entry:
}
define i1 @test_EQ_IscEbT(i16 %a, i8 %b) {
-; CHECK-LABEL: test_EQ_IscEbT
-; CHECK: and w8, w1, #0xff
-; CHECK-NEXT: cmn w8, w0, sxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IscEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sxth w8, w0
+; CHECK-NEXT: cmn w8, w1, uxtb
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %a to i32
%conv1 = zext i8 %b to i32
@@ -149,10 +165,12 @@ entry:
}
define i1 @test_EQ_IclEbT(i8 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IclEbT
-; CHECK: cmn x1, w0, uxtb
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IclEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT: cmn x1, w0, uxtb
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %a to i64
%add = sub i64 0, %b
@@ -161,10 +179,11 @@ entry:
}
define i1 @test_EQ_IciEbT(i8 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IciEbT
-; CHECK: cmn w1, w0, uxtb
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IciEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn w1, w0, uxtb
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %a to i32
%add = sub i32 0, %b
@@ -173,11 +192,12 @@ entry:
}
define i1 @test_EQ_IcsEbT(i8 %a, i16 %b) {
-; CHECK-LABEL: test_EQ_IcsEbT
-; CHECK: sxth w8, w1
-; CHECK-NEXT: cmn w8, w0, uxtb
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IcsEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: and w8, w0, #0xff
+; CHECK-NEXT: cmn w8, w1, sxth
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %a to i32
%conv1 = sext i16 %b to i32
@@ -187,11 +207,12 @@ entry:
}
define i1 @test_EQ_IccEbT(i8 %a, i8 %b) {
-; CHECK-LABEL: test_EQ_IccEbT
-; CHECK: and w8, w1, #0xff
-; CHECK-NEXT: cmn w8, w0, uxtb
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_EQ_IccEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: and w8, w0, #0xff
+; CHECK-NEXT: cmn w8, w1, uxtb
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %a to i32
%conv1 = zext i8 %b to i32
@@ -201,10 +222,11 @@ entry:
}
define i1 @test_NE_IllEbT(i64 %a, i64 %b) {
-; CHECK-LABEL: test_NE_IllEbT
-; CHECK: cmn x1, x0
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IllEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn x0, x1
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%add = sub i64 0, %b
%cmp = icmp ne i64 %add, %a
@@ -212,10 +234,11 @@ entry:
}
define i1 @test_NE_IliEbT(i64 %a, i32 %b) {
-; CHECK-LABEL: test_NE_IliEbT
-; CHECK: cmn x0, w1, sxtw
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IliEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn x0, w1, sxtw
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = sext i32 %b to i64
%add = sub i64 0, %a
@@ -224,10 +247,12 @@ entry:
}
define i1 @test_NE_IlsEbT(i64 %a, i16 %b) {
-; CHECK-LABEL: test_NE_IlsEbT
-; CHECK: cmn x0, w1, sxth
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IlsEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT: cmn x0, w1, sxth
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %b to i64
%add = sub i64 0, %a
@@ -236,10 +261,12 @@ entry:
}
define i1 @test_NE_IlcEbT(i64 %a, i8 %b) {
-; CHECK-LABEL: test_NE_IlcEbT
-; CHECK: cmn x0, w1, uxtb
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IlcEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT: cmn x0, w1, uxtb
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %b to i64
%add = sub i64 0, %a
@@ -248,10 +275,11 @@ entry:
}
define i1 @test_NE_IilEbT(i32 %a, i64 %b) {
-; CHECK-LABEL: test_NE_IilEbT
-; CHECK: cmn x1, w0, sxtw
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IilEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn x1, w0, sxtw
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = sext i32 %a to i64
%add = sub i64 0, %b
@@ -260,10 +288,11 @@ entry:
}
define i1 @test_NE_IiiEbT(i32 %a, i32 %b) {
-; CHECK-LABEL: test_NE_IiiEbT
-; CHECK: cmn w1, w0
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IiiEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn w0, w1
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%add = sub i32 0, %b
%cmp = icmp ne i32 %add, %a
@@ -271,10 +300,11 @@ entry:
}
define i1 @test_NE_IisEbT(i32 %a, i16 %b) {
-; CHECK-LABEL: test_NE_IisEbT
-; CHECK: cmn w0, w1, sxth
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IisEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn w0, w1, sxth
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %b to i32
%add = sub i32 0, %a
@@ -283,10 +313,11 @@ entry:
}
define i1 @test_NE_IicEbT(i32 %a, i8 %b) {
-; CHECK-LABEL: test_NE_IicEbT
-; CHECK: cmn w0, w1, uxtb
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IicEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn w0, w1, uxtb
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %b to i32
%add = sub i32 0, %a
@@ -295,10 +326,12 @@ entry:
}
define i1 @test_NE_IslEbT(i16 %a, i64 %b) {
-; CHECK-LABEL: test_NE_IslEbT
-; CHECK: cmn x1, w0, sxth
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IslEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT: cmn x1, w0, sxth
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %a to i64
%add = sub i64 0, %b
@@ -307,10 +340,11 @@ entry:
}
define i1 @test_NE_IsiEbT(i16 %a, i32 %b) {
-; CHECK-LABEL: test_NE_IsiEbT
-; CHECK: cmn w1, w0, sxth
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IsiEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn w1, w0, sxth
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %a to i32
%add = sub i32 0, %b
@@ -319,11 +353,12 @@ entry:
}
define i1 @test_NE_IssEbT(i16 %a, i16 %b) {
-; CHECK-LABEL:test_NE_IssEbT
-; CHECK: sxth w8, w1
-; CHECK-NEXT: cmn w8, w0, sxth
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IssEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sxth w8, w0
+; CHECK-NEXT: cmn w8, w1, sxth
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %a to i32
%conv1 = sext i16 %b to i32
@@ -333,11 +368,12 @@ entry:
}
define i1 @test_NE_IscEbT(i16 %a, i8 %b) {
-; CHECK-LABEL:test_NE_IscEbT
-; CHECK: and w8, w1, #0xff
-; CHECK-NEXT: cmn w8, w0, sxth
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IscEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sxth w8, w0
+; CHECK-NEXT: cmn w8, w1, uxtb
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = sext i16 %a to i32
%conv1 = zext i8 %b to i32
@@ -347,10 +383,12 @@ entry:
}
define i1 @test_NE_IclEbT(i8 %a, i64 %b) {
-; CHECK-LABEL:test_NE_IclEbT
-; CHECK: cmn x1, w0, uxtb
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IclEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT: cmn x1, w0, uxtb
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %a to i64
%add = sub i64 0, %b
@@ -359,10 +397,11 @@ entry:
}
define i1 @test_NE_IciEbT(i8 %a, i32 %b) {
-; CHECK-LABEL:test_NE_IciEbT
-; CHECK: cmn w1, w0, uxtb
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IciEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: cmn w1, w0, uxtb
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %a to i32
%add = sub i32 0, %b
@@ -371,11 +410,12 @@ entry:
}
define i1 @test_NE_IcsEbT(i8 %a, i16 %b) {
-; CHECK-LABEL:test_NE_IcsEbT
-; CHECK: sxth w8, w1
-; CHECK-NEXT: cmn w8, w0, uxtb
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IcsEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: and w8, w0, #0xff
+; CHECK-NEXT: cmn w8, w1, sxth
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %a to i32
%conv1 = sext i16 %b to i32
@@ -385,11 +425,12 @@ entry:
}
define i1 @test_NE_IccEbT(i8 %a, i8 %b) {
-; CHECK-LABEL:test_NE_IccEbT
-; CHECK: and w8, w1, #0xff
-; CHECK-NEXT: cmn w8, w0, uxtb
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_NE_IccEbT:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: and w8, w0, #0xff
+; CHECK-NEXT: cmn w8, w1, uxtb
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
entry:
%conv = zext i8 %a to i32
%conv1 = zext i8 %b to i32
diff --git a/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll b/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
index 39edc03ced442..2451ea478ed71 100644
--- a/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
+++ b/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
@@ -107,11 +107,11 @@ define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
; CHECK-LABEL: overflow_add_positive_const_limit:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: mov w9, #8 // =0x8
-; CHECK-NEXT: cmp w8, w0, sxtb
+; CHECK-NEXT: sxtb w9, w0
; CHECK-NEXT: mov w8, #16 // =0x10
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: cmn w9, #1
+; CHECK-NEXT: mov w9, #8 // =0x8
+; CHECK-NEXT: csel w0, w9, w8, lt
; CHECK-NEXT: ret
%cmp = icmp slt i8 %a, -1
%res = select i1 %cmp, i32 8, i32 16
@@ -162,11 +162,11 @@ define i32 @safe_add_underflow_neg(i8 zeroext %a) {
define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
; CHECK-LABEL: overflow_sub_negative_const_limit:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: mov w9, #8 // =0x8
-; CHECK-NEXT: cmp w8, w0, sxtb
+; CHECK-NEXT: sxtb w9, w0
; CHECK-NEXT: mov w8, #16 // =0x10
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: cmn w9, #1
+; CHECK-NEXT: mov w9, #8 // =0x8
+; CHECK-NEXT: csel w0, w9, w8, lt
; CHECK-NEXT: ret
%cmp = icmp slt i8 %a, -1
%res = select i1 %cmp, i32 8, i32 16
More information about the llvm-commits
mailing list