[llvm] 41fef10 - [GlobalISel] Combine G_SHL, G_ASHR, G_SHL of undef shifts to undef.
Amara Emerson via llvm-commits
llvm-commits at lists.llvm.org
Fri May 13 12:20:39 PDT 2022
Author: Amara Emerson
Date: 2022-05-13T12:20:34-07:00
New Revision: 41fef1044956a2aa9824d3284d363603d6f23537
URL: https://github.com/llvm/llvm-project/commit/41fef1044956a2aa9824d3284d363603d6f23537
DIFF: https://github.com/llvm/llvm-project/commit/41fef1044956a2aa9824d3284d363603d6f23537.diff
LOG: [GlobalISel] Combine G_SHL, G_ASHR, G_SHL of undef shifts to undef.
Differential Revision: https://reviews.llvm.org/D125041
Added:
Modified:
llvm/include/llvm/Target/GlobalISel/Combine.td
llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir
Removed:
################################################################################
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 66f7463445300..16580a432d638 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -235,6 +235,12 @@ def binop_left_undef_to_zero: GICombineRule<
[{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
(apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
+def binop_right_undef_to_undef: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
+ [{ return Helper.matchOperandIsUndef(*${root}, 2); }]),
+ (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+
// Instructions where if any source operand is undef, the instruction can be
// replaced with undef.
def propagate_undef_any_op: GICombineRule<
@@ -889,6 +895,7 @@ def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
undef_to_negative_one,
binop_left_undef_to_zero,
+ binop_right_undef_to_undef,
propagate_undef_any_op,
propagate_undef_all_ops,
propagate_undef_shuffle_mask,
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir
index 9cef073aec326..7db4526ea0702 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir
@@ -226,3 +226,96 @@ body: |
%0:_(<2 x s32>) = G_SHUFFLE_VECTOR %1(<2 x s32>), %2(<2 x s32>), shufflemask(0, 1)
$d0 = COPY %0(<2 x s32>)
RET_ReallyLR implicit $d0
+
+...
+---
+name: shl_undef_rhs
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: shl_undef_rhs
+ ; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY [[DEF]](s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %0:_(s64) = G_CONSTANT i64 10
+ %1:_(s64) = G_IMPLICIT_DEF
+ %2:_(s64) = G_SHL %0, %1
+ $x0 = COPY %2(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: lshr_undef_rhs
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: lshr_undef_rhs
+ ; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY [[DEF]](s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %0:_(s64) = G_CONSTANT i64 10
+ %1:_(s64) = G_IMPLICIT_DEF
+ %2:_(s64) = G_LSHR %0, %1
+ $x0 = COPY %2(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: ashr_undef_rhs
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: ashr_undef_rhs
+ ; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY [[DEF]](s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %0:_(s64) = G_CONSTANT i64 10
+ %1:_(s64) = G_IMPLICIT_DEF
+ %2:_(s64) = G_ASHR %0, %1
+ $x0 = COPY %2(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: lshr_undef_lhs
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; Optimize these to zero?
+ ; CHECK-LABEL: name: lshr_undef_lhs
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[DEF]], [[C]](s64)
+ ; CHECK-NEXT: $x0 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %0:_(s64) = G_CONSTANT i64 10
+ %1:_(s64) = G_IMPLICIT_DEF
+ %2:_(s64) = G_LSHR %1, %0
+ $x0 = COPY %2(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: ashr_undef_lhs
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; Optimize these to zero?
+ ; CHECK-LABEL: name: ashr_undef_lhs
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[DEF]], [[C]](s64)
+ ; CHECK-NEXT: $x0 = COPY [[ASHR]](s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %0:_(s64) = G_CONSTANT i64 10
+ %1:_(s64) = G_IMPLICIT_DEF
+ %2:_(s64) = G_ASHR %1, %0
+ $x0 = COPY %2(s64)
+ RET_ReallyLR implicit $x0
+
+...
More information about the llvm-commits
mailing list