[llvm] 31009f0 - [CodeGen][AArch64] Ensure isSExtCheaperThanZExt returns true for negative constants
David Sherwood via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 13 01:43:28 PST 2022
Author: David Sherwood
Date: 2022-01-13T09:43:07Z
New Revision: 31009f0b5afb504fc1f30769c038e1b7be6ea45b
URL: https://github.com/llvm/llvm-project/commit/31009f0b5afb504fc1f30769c038e1b7be6ea45b
DIFF: https://github.com/llvm/llvm-project/commit/31009f0b5afb504fc1f30769c038e1b7be6ea45b.diff
LOG: [CodeGen][AArch64] Ensure isSExtCheaperThanZExt returns true for negative constants
When we know the value we're extending is a negative constant then it
makes sense to use SIGN_EXTEND because this may improve code quality in
some cases, particularly when doing a constant splat of an unpacked vector
type. For example, for SVE when splatting the value -1 into all elements
of a vector of type <vscale x 2 x i32> the element type will get promoted
from i32 -> i64. In this case we want the splat value to sign-extend from
(i32 -1) -> (i64 -1), whereas currently it zero-extends from
(i32 -1) -> (i64 0xFFFFFFFF). Sign-extending the constant means we can use
a single mov immediate instruction.
New tests added here:
CodeGen/AArch64/sve-vector-splat.ll
I believe we see some code quality improvements in these existing
tests too:
CodeGen/AArch64/dag-numsignbits.ll
CodeGen/AArch64/reduce-and.ll
CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll
The apparent regressions in CodeGen/AArch64/fast-isel-cmp-vec.ll only
occur because the test disables codegen prepare and branch folding.
Differential Revision: https://reviews.llvm.org/D114357
Added:
Modified:
llvm/include/llvm/CodeGen/TargetLowering.h
llvm/lib/CodeGen/CodeGenPrepare.cpp
llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.h
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.h
llvm/test/CodeGen/AArch64/arm64-vshuffle.ll
llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
llvm/test/CodeGen/AArch64/dag-numsignbits.ll
llvm/test/CodeGen/AArch64/fast-isel-cmp-vec.ll
llvm/test/CodeGen/AArch64/funnel-shift.ll
llvm/test/CodeGen/AArch64/reduce-and.ll
llvm/test/CodeGen/AArch64/redundant-copy-elim-empty-mbb.ll
llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
llvm/test/CodeGen/AArch64/sve-vector-splat.ll
llvm/test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll
llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index dfad3b3a65937..e9b2df85e7f57 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -2645,9 +2645,9 @@ class TargetLoweringBase {
getApproximateEVTForLLT(ToTy, DL, Ctx));
}
- /// Return true if sign-extension from FromTy to ToTy is cheaper than
- /// zero-extension.
- virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
+ /// Return true if sign-extension of value \p V from FromTy to ToTy is
+ /// cheaper than zero-extension, where \p V can be SDValue() if unknown.
+ virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy, SDValue V) const {
return false;
}
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 747f4e4fdecca..8053f1d7e5d57 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -7004,7 +7004,7 @@ bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
// matching the argument extension instead.
Instruction::CastOps ExtType = Instruction::ZExt;
// Some targets prefer SExt over ZExt.
- if (TLI->isSExtCheaperThanZExt(OldVT, RegType))
+ if (TLI->isSExtCheaperThanZExt(OldVT, RegType, SDValue()))
ExtType = Instruction::SExt;
if (auto *Arg = dyn_cast<Argument>(Cond)) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 8c7b90b6cd336..da1dd65f02511 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -1704,7 +1704,7 @@ void DAGTypeLegalizer::PromoteSetCCOperands(SDValue &LHS, SDValue &RHS,
SDValue OpL = GetPromotedInteger(LHS);
SDValue OpR = GetPromotedInteger(RHS);
- if (TLI.isSExtCheaperThanZExt(LHS.getValueType(), OpL.getValueType())) {
+ if (TLI.isSExtCheaperThanZExt(LHS.getValueType(), OpL.getValueType(), LHS)) {
// The target would prefer to promote the comparison operand with sign
// extension. Honor that unless the promoted values are already zero
// extended.
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index f0f32118540eb..50f96c3f8e84e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -283,7 +283,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
EVT OldVT = Op.getValueType();
SDLoc DL(Op);
Op = GetPromotedInteger(Op);
- if (TLI.isSExtCheaperThanZExt(OldVT, Op.getValueType()))
+ if (TLI.isSExtCheaperThanZExt(OldVT, Op.getValueType(), Op))
return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), Op,
DAG.getValueType(OldVT));
return DAG.getZeroExtendInReg(Op, DL, OldVT);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 5168869cb574d..20374ea1469cb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4767,7 +4767,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
C->isTargetOpcode(), C->isOpaque());
case ISD::ANY_EXTEND:
// Some targets like RISCV prefer to sign extend some types.
- if (TLI->isSExtCheaperThanZExt(Operand.getValueType(), VT))
+ if (TLI->isSExtCheaperThanZExt(Operand.getValueType(), VT, Operand))
return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
C->isTargetOpcode(), C->isOpaque());
return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 40b422fd740dc..2bb30469fc505 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -3844,7 +3844,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
} else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
(Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
!isSExtCheaperThanZExt(cast<VTSDNode>(N0.getOperand(1))->getVT(),
- OpVT)) {
+ OpVT, N0.getOperand(1))) {
EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
EVT ExtDstTy = N0.getValueType();
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 9841a4c048632..e6e25da7cd888 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -1138,6 +1138,14 @@ class AArch64TargetLowering : public TargetLowering {
bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1,
LLT Ty2) const override;
+
+ bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT, SDValue V) const override {
+ if (!V)
+ return false;
+ if (ConstantSDNode *C = isConstOrConstSplat(V))
+ return C->getAPIntValue().isNegative();
+ return false;
+ }
};
namespace AArch64 {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a9d52d19d045e..db0afdbb27b8b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1198,7 +1198,8 @@ bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
return TargetLowering::isZExtFree(Val, VT2);
}
-bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
+bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT,
+ SDValue V) const {
return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 199485ee6b8d6..b8ff9522bd062 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -326,7 +326,7 @@ class RISCVTargetLowering : public TargetLowering {
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
bool isZExtFree(SDValue Val, EVT VT2) const override;
- bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
+ bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT, SDValue V) const override;
bool isCheapToSpeculateCttz() const override;
bool isCheapToSpeculateCtlz() const override;
bool hasAndNotCompare(SDValue Y) const override;
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshuffle.ll b/llvm/test/CodeGen/AArch64/arm64-vshuffle.ll
index fdd7cad78536b..db1920e5cc9c7 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshuffle.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshuffle.ll
@@ -14,18 +14,8 @@ entry:
ret <8 x i1> %Shuff
}
-; CHECK: lCPI1_0:
-; CHECK: .byte 0 ; 0x0
-; CHECK: .byte 0 ; 0x0
-; CHECK: .byte 0 ; 0x0
-; CHECK: .byte 0 ; 0x0
-; CHECK: .byte 1 ; 0x1
-; CHECK: .byte 0 ; 0x0
-; CHECK: .byte 0 ; 0x0
-; CHECK: .byte 0 ; 0x0
; CHECK: test2
-; CHECK: adrp x[[REG2:[0-9]+]], lCPI1_0 at PAGE
-; CHECK: ldr d[[REG1:[0-9]+]], [x[[REG2]], lCPI1_0 at PAGEOFF]
+; CHECK: movi d{{[0-9]+}}, #0x0000ff00000000
define <8 x i1>@test2() {
bb:
%Shuff = shufflevector <8 x i1> zeroinitializer,
@@ -36,7 +26,7 @@ bb:
}
; CHECK: test3
-; CHECK: movi.4s v{{[0-9]+}}, #1
+; CHECK: movi.2d v{{[0-9]+}}, #0x0000ff000000ff
define <16 x i1> @test3(i1* %ptr, i32 %v) {
bb:
%Shuff = shufflevector <16 x i1> <i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 0, i1 0>, <16 x i1> undef,
@@ -45,11 +35,13 @@ bb:
i32 14, i32 0>
ret <16 x i1> %Shuff
}
+
+
; CHECK: lCPI3_0:
; CHECK: .byte 0 ; 0x0
; CHECK: .byte 0 ; 0x0
; CHECK: .byte 0 ; 0x0
-; CHECK: .byte 1 ; 0x1
+; CHECK: .byte 255 ; 0xff
; CHECK: .byte 0 ; 0x0
; CHECK: .byte 0 ; 0x0
; CHECK: .byte 0 ; 0x0
diff --git a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
index 5e16a9a31ea65..8f4b2d42d767f 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
@@ -249,7 +249,7 @@ define {i8*, i1} @test_cmpxchg_ptr(i8** %addr, i8* %cmp, i8* %new) {
; CHECK: stlxr [[SUCCESS:w[0-9]+]], w2, [x0]
; CHECK: cbnz [[SUCCESS]], [[LOOP]]
-; CHECK: mov w1, #1
+; CHECK: mov w1, #-1
; CHECK: mov w0, [[OLD]]
; CHECK: ret
diff --git a/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll b/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
index 7232e7cb72416..33be7add3c59e 100644
--- a/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
+++ b/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
@@ -15,7 +15,7 @@ define i32 @test_return(i32* %p, i32 %oldval, i32 %newval) {
; CHECK-NEXT: stlxr w8, w2, [x0]
; CHECK-NEXT: cbnz w8, LBB0_1
; CHECK-NEXT: ; %bb.3:
-; CHECK-NEXT: mov w0, #1
+; CHECK-NEXT: mov w0, #-1
; CHECK-NEXT: ret
; CHECK-NEXT: LBB0_4: ; %cmpxchg.nostore
; CHECK-NEXT: mov w0, wzr
@@ -64,7 +64,7 @@ define i1 @test_return_bool(i8* %value, i8 %oldValue, i8 %newValue) {
; CHECK-NEXT: stlxrb w9, w2, [x0]
; CHECK-NEXT: cbnz w9, LBB1_1
; CHECK-NEXT: ; %bb.3:
-; CHECK-NEXT: mov w8, #1
+; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: eor w0, w8, #0x1
; CHECK-NEXT: ret
; CHECK-NEXT: LBB1_4: ; %cmpxchg.nostore
@@ -188,7 +188,7 @@ define i1 @test_conditional2(i32 %a, i32 %b, i32* %c) {
; CHECK-NEXT: stlxr w8, w20, [x19]
; CHECK-NEXT: cbnz w8, LBB3_1
; CHECK-NEXT: ; %bb.3:
-; CHECK-NEXT: mov w8, #1
+; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: b LBB3_5
; CHECK-NEXT: LBB3_4: ; %cmpxchg.nostore
; CHECK-NEXT: mov w8, wzr
diff --git a/llvm/test/CodeGen/AArch64/dag-numsignbits.ll b/llvm/test/CodeGen/AArch64/dag-numsignbits.ll
index e4f13f5c98a17..802df9d116049 100644
--- a/llvm/test/CodeGen/AArch64/dag-numsignbits.ll
+++ b/llvm/test/CodeGen/AArch64/dag-numsignbits.ll
@@ -8,18 +8,13 @@ define void @signbits_vXi1(<4 x i16> %a1) {
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, .LCPI0_0
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: mov w1, wzr
+; CHECK-NEXT: movi v2.4h, #1
; CHECK-NEXT: dup v0.4h, v0.h[0]
+; CHECK-NEXT: mov w1, wzr
; CHECK-NEXT: mov w2, wzr
; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI0_0]
-; CHECK-NEXT: adrp x8, .LCPI0_1
; CHECK-NEXT: add v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: movi v1.4h, #1
-; CHECK-NEXT: cmgt v0.4h, v1.4h, v0.4h
-; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI0_1]
-; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT: shl v0.4h, v0.4h, #15
-; CHECK-NEXT: cmlt v0.4h, v0.4h, #0
+; CHECK-NEXT: cmgt v0.4h, v2.4h, v0.4h
; CHECK-NEXT: umov w0, v0.h[0]
; CHECK-NEXT: umov w3, v0.h[3]
; CHECK-NEXT: b foo
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-cmp-vec.ll b/llvm/test/CodeGen/AArch64/fast-isel-cmp-vec.ll
index 644a0ef512613..671ed1f7a4cbe 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-cmp-vec.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-cmp-vec.ll
@@ -25,8 +25,10 @@ bb2:
define <2 x i32> @icmp_constfold_v2i32(<2 x i32> %a) {
; CHECK-LABEL: icmp_constfold_v2i32:
; CHECK: ; %bb.0:
-; CHECK-NEXT: movi.2s v0, #1
-; CHECK-NEXT: and.8b v0, v0, v0
+; CHECK-NEXT: movi.2d v0, #0xffffffffffffffff
+; CHECK-NEXT: ; %bb.1: ; %bb2
+; CHECK-NEXT: movi.2s v1, #1
+; CHECK-NEXT: and.8b v0, v0, v1
; CHECK-NEXT: ret
%1 = icmp eq <2 x i32> %a, %a
br label %bb2
@@ -55,9 +57,10 @@ bb2:
define <4 x i32> @icmp_constfold_v4i32(<4 x i32> %a) {
; CHECK-LABEL: icmp_constfold_v4i32:
; CHECK: ; %bb.0:
-; CHECK-NEXT: movi.4h v0, #1
+; CHECK-NEXT: movi.2d v0, #0xffffffffffffffff
; CHECK-NEXT: ; %bb.1: ; %bb2
-; CHECK-NEXT: and.8b v0, v0, v0
+; CHECK-NEXT: movi.4h v1, #1
+; CHECK-NEXT: and.8b v0, v0, v1
; CHECK-NEXT: ushll.4s v0, v0, #0
; CHECK-NEXT: ret
%1 = icmp eq <4 x i32> %a, %a
@@ -85,8 +88,10 @@ bb2:
define <16 x i8> @icmp_constfold_v16i8(<16 x i8> %a) {
; CHECK-LABEL: icmp_constfold_v16i8:
; CHECK: ; %bb.0:
-; CHECK-NEXT: movi.16b v0, #1
-; CHECK-NEXT: and.16b v0, v0, v0
+; CHECK-NEXT: movi.2d v0, #0xffffffffffffffff
+; CHECK-NEXT: ; %bb.1: ; %bb2
+; CHECK-NEXT: movi.16b v1, #1
+; CHECK-NEXT: and.16b v0, v0, v1
; CHECK-NEXT: ret
%1 = icmp eq <16 x i8> %a, %a
br label %bb2
diff --git a/llvm/test/CodeGen/AArch64/funnel-shift.ll b/llvm/test/CodeGen/AArch64/funnel-shift.ll
index 51dc7ce2d061d..da1b107e65d4b 100644
--- a/llvm/test/CodeGen/AArch64/funnel-shift.ll
+++ b/llvm/test/CodeGen/AArch64/funnel-shift.ll
@@ -93,7 +93,7 @@ declare i7 @llvm.fshl.i7(i7, i7, i7)
define i7 @fshl_i7_const_fold() {
; CHECK-LABEL: fshl_i7_const_fold:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w0, #67
+; CHECK-NEXT: mov w0, #-61
; CHECK-NEXT: ret
%f = call i7 @llvm.fshl.i7(i7 112, i7 127, i7 2)
ret i7 %f
@@ -102,7 +102,7 @@ define i7 @fshl_i7_const_fold() {
define i8 @fshl_i8_const_fold_overshift_1() {
; CHECK-LABEL: fshl_i8_const_fold_overshift_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w0, #128
+; CHECK-NEXT: mov w0, #-128
; CHECK-NEXT: ret
%f = call i8 @llvm.fshl.i8(i8 255, i8 0, i8 15)
ret i8 %f
@@ -164,7 +164,7 @@ define i64 @fshl_i64_const_overshift(i64 %x, i64 %y) {
define i8 @fshl_i8_const_fold() {
; CHECK-LABEL: fshl_i8_const_fold:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w0, #128
+; CHECK-NEXT: mov w0, #-128
; CHECK-NEXT: ret
%f = call i8 @llvm.fshl.i8(i8 255, i8 0, i8 7)
ret i8 %f
@@ -241,7 +241,7 @@ define i7 @fshr_i7_const_fold() {
define i8 @fshr_i8_const_fold_overshift_1() {
; CHECK-LABEL: fshr_i8_const_fold_overshift_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w0, #254
+; CHECK-NEXT: mov w0, #-2
; CHECK-NEXT: ret
%f = call i8 @llvm.fshr.i8(i8 255, i8 0, i8 15)
ret i8 %f
@@ -250,7 +250,7 @@ define i8 @fshr_i8_const_fold_overshift_1() {
define i8 @fshr_i8_const_fold_overshift_2() {
; CHECK-LABEL: fshr_i8_const_fold_overshift_2:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w0, #225
+; CHECK-NEXT: mov w0, #-31
; CHECK-NEXT: ret
%f = call i8 @llvm.fshr.i8(i8 15, i8 15, i8 11)
ret i8 %f
@@ -259,7 +259,7 @@ define i8 @fshr_i8_const_fold_overshift_2() {
define i8 @fshr_i8_const_fold_overshift_3() {
; CHECK-LABEL: fshr_i8_const_fold_overshift_3:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w0, #255
+; CHECK-NEXT: mov w0, #-1
; CHECK-NEXT: ret
%f = call i8 @llvm.fshr.i8(i8 0, i8 255, i8 8)
ret i8 %f
@@ -303,7 +303,7 @@ define i64 @fshr_i64_const_overshift(i64 %x, i64 %y) {
define i8 @fshr_i8_const_fold() {
; CHECK-LABEL: fshr_i8_const_fold:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w0, #254
+; CHECK-NEXT: mov w0, #-2
; CHECK-NEXT: ret
%f = call i8 @llvm.fshr.i8(i8 255, i8 0, i8 7)
ret i8 %f
diff --git a/llvm/test/CodeGen/AArch64/reduce-and.ll b/llvm/test/CodeGen/AArch64/reduce-and.ll
index 60546cf128af4..a4d2f6576f0af 100644
--- a/llvm/test/CodeGen/AArch64/reduce-and.ll
+++ b/llvm/test/CodeGen/AArch64/reduce-and.ll
@@ -223,8 +223,7 @@ define i8 @test_redand_v3i8(<3 x i8> %a) {
; CHECK-LABEL: test_redand_v3i8:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, w1
-; CHECK-NEXT: and w8, w8, w2
-; CHECK-NEXT: and w0, w8, #0xff
+; CHECK-NEXT: and w0, w8, w2
; CHECK-NEXT: ret
;
; GISEL-LABEL: test_redand_v3i8:
diff --git a/llvm/test/CodeGen/AArch64/redundant-copy-elim-empty-mbb.ll b/llvm/test/CodeGen/AArch64/redundant-copy-elim-empty-mbb.ll
index ed34cbd2fa0b6..bccb5ba58206f 100644
--- a/llvm/test/CodeGen/AArch64/redundant-copy-elim-empty-mbb.ll
+++ b/llvm/test/CodeGen/AArch64/redundant-copy-elim-empty-mbb.ll
@@ -9,7 +9,7 @@ declare i8* @bar()
; CHECK-LABEL: foo:
; CHECK: tbz
-; CHECK: mov{{.*}}, #1
+; CHECK: mov{{.*}}, #-1
; CHECK: ret
; CHECK: bl bar
; CHECK: cbnz
diff --git a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
index 65dc44618d5cd..d38548084d3c4 100644
--- a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
+++ b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
@@ -177,7 +177,7 @@ define i1 @test_cross_bb(i32 addrspace(1)* %a, i1 %external_cond) gc "statepoint
; CHECK-NEXT: bl consume
; CHECK-NEXT: b .LBB8_3
; CHECK-NEXT: .LBB8_2:
-; CHECK-NEXT: mov w19, #1
+; CHECK-NEXT: mov w19, #-1
; CHECK-NEXT: .LBB8_3: // %common.ret
; CHECK-NEXT: and w0, w19, #0x1
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
index 58821738f3a3a..7f9307d37f6d1 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
@@ -119,8 +119,7 @@ define <vscale x 8 x i8> @sve_splat_8xi8(i8 %val) {
define <vscale x 8 x i8> @sve_splat_8xi8_imm() {
; CHECK-LABEL: sve_splat_8xi8_imm:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #255
-; CHECK-NEXT: mov z0.h, w8
+; CHECK-NEXT: mov z0.h, #-1 // =0xffffffffffffffff
; CHECK-NEXT: ret
%ins = insertelement <vscale x 8 x i8> undef, i8 -1, i32 0
%splat = shufflevector <vscale x 8 x i8> %ins, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
@@ -151,8 +150,7 @@ define <vscale x 4 x i16> @sve_splat_4xi16(i16 %val) {
define <vscale x 4 x i16> @sve_splat_4xi16_imm() {
; CHECK-LABEL: sve_splat_4xi16_imm:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #65535
-; CHECK-NEXT: mov z0.s, w8
+; CHECK-NEXT: mov z0.s, #-1 // =0xffffffffffffffff
; CHECK-NEXT: ret
%ins = insertelement <vscale x 4 x i16> undef, i16 -1, i32 0
%splat = shufflevector <vscale x 4 x i16> %ins, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
@@ -173,8 +171,7 @@ define <vscale x 2 x i32> @sve_splat_2xi32(i32 %val) {
define <vscale x 2 x i32> @sve_splat_2xi32_imm() {
; CHECK-LABEL: sve_splat_2xi32_imm:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #-1
-; CHECK-NEXT: mov z0.d, x8
+; CHECK-NEXT: mov z0.d, #-1 // =0xffffffffffffffff
; CHECK-NEXT: ret
%ins = insertelement <vscale x 2 x i32> undef, i32 -1, i32 0
%splat = shufflevector <vscale x 2 x i32> %ins, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll b/llvm/test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll
index 607f5dd3dc772..0c85c14badfb8 100644
--- a/llvm/test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll
+++ b/llvm/test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll
@@ -29,11 +29,7 @@ define <1 x i8> @out_v1i8(<1 x i8> %x, <1 x i8> %y, <1 x i8> %mask) nounwind {
define <2 x i8> @out_v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> %mask) nounwind {
; CHECK-LABEL: out_v2i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi d3, #0x0000ff000000ff
-; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
-; CHECK-NEXT: eor v2.8b, v2.8b, v3.8b
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b
; CHECK-NEXT: ret
%mx = and <2 x i8> %x, %mask
%notmask = xor <2 x i8> %mask, <i8 -1, i8 -1>
@@ -61,11 +57,7 @@ define <1 x i16> @out_v1i16(<1 x i16> %x, <1 x i16> %y, <1 x i16> %mask) nounwin
define <4 x i8> @out_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind {
; CHECK-LABEL: out_v4i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi d3, #0xff00ff00ff00ff
-; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
-; CHECK-NEXT: eor v2.8b, v2.8b, v3.8b
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b
; CHECK-NEXT: ret
%mx = and <4 x i8> %x, %mask
%notmask = xor <4 x i8> %mask, <i8 -1, i8 -1, i8 -1, i8 -1>
@@ -77,11 +69,7 @@ define <4 x i8> @out_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind {
define <4 x i8> @out_v4i8_undef(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind {
; CHECK-LABEL: out_v4i8_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi d3, #0xff00ff00ff00ff
-; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
-; CHECK-NEXT: eor v2.8b, v2.8b, v3.8b
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b
; CHECK-NEXT: ret
%mx = and <4 x i8> %x, %mask
%notmask = xor <4 x i8> %mask, <i8 -1, i8 -1, i8 undef, i8 -1>
@@ -93,11 +81,7 @@ define <4 x i8> @out_v4i8_undef(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwi
define <2 x i16> @out_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwind {
; CHECK-LABEL: out_v2i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi d3, #0x00ffff0000ffff
-; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
-; CHECK-NEXT: eor v2.8b, v2.8b, v3.8b
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b
; CHECK-NEXT: ret
%mx = and <2 x i16> %x, %mask
%notmask = xor <2 x i16> %mask, <i16 -1, i16 -1>
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
index fc0f3a10f5b16..03f0f04062aad 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
@@ -86,8 +86,7 @@ define i8 @test_v3i8(<3 x i8> %a) nounwind {
; CHECK-LABEL: test_v3i8:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, w1
-; CHECK-NEXT: and w8, w8, w2
-; CHECK-NEXT: and w0, w8, #0xff
+; CHECK-NEXT: and w0, w8, w2
; CHECK-NEXT: ret
%b = call i8 @llvm.vector.reduce.and.v3i8(<3 x i8> %a)
ret i8 %b
More information about the llvm-commits
mailing list