[llvm] [AArch64] Combine and and lsl into ubfiz (PR #118974)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 6 06:02:47 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: Cullen Rhodes (c-rhodes)
<details>
<summary>Changes</summary>
Fixes #<!-- -->118132.
---
Full diff: https://github.com/llvm/llvm-project/pull/118974.diff
3 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64InstrInfo.td (+9)
- (modified) llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll (+4-6)
- (modified) llvm/test/CodeGen/AArch64/xbfiz.ll (+16)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 7614f6215b803c..9f980615caff5a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -8989,6 +8989,15 @@ def : Pat<(shl (i64 (zext GPR32:$Rn)), (i64 imm0_63:$imm)),
(i64 (i64shift_a imm0_63:$imm)),
(i64 (i64shift_sext_i32 imm0_63:$imm)))>;
+def : Pat<(shl (i64 (and (i64 (anyext GPR32:$Rn)), 0xff)), (i64 imm0_63:$imm)),
+ (UBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
+ (i64 (i64shift_a imm0_63:$imm)),
+ (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
+def : Pat<(shl (i64 (and (i64 (anyext GPR32:$Rn)), 0xffff)), (i64 imm0_63:$imm)),
+ (UBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
+ (i64 (i64shift_a imm0_63:$imm)),
+ (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
+
// sra patterns have an AddedComplexity of 10, so make sure we have a higher
// AddedComplexity for the following patterns since we want to match sext + sra
// patterns before we attempt to match a single sra node.
diff --git a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
index 63dcafed2320a0..abc5c0876e80b7 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
@@ -13,11 +13,10 @@ define i16 @halfword(ptr %ctx, i32 %xor72) nounwind {
; CHECK0-SDAG-LABEL: halfword:
; CHECK0-SDAG: // %bb.0:
; CHECK0-SDAG-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK0-SDAG-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK0-SDAG-NEXT: ubfx x8, x1, #9, #8
+; CHECK0-SDAG-NEXT: lsr w8, w1, #9
; CHECK0-SDAG-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
; CHECK0-SDAG-NEXT: mov x19, x0
-; CHECK0-SDAG-NEXT: lsl x21, x8, #1
+; CHECK0-SDAG-NEXT: ubfiz x21, x8, #1, #8
; CHECK0-SDAG-NEXT: ldrh w20, [x0, x21]
; CHECK0-SDAG-NEXT: bl foo
; CHECK0-SDAG-NEXT: mov w0, w20
@@ -231,10 +230,9 @@ define i16 @multi_use_half_word(ptr %ctx, i32 %xor72) {
; CHECK0-SDAG-NEXT: .cfi_offset w21, -24
; CHECK0-SDAG-NEXT: .cfi_offset w22, -32
; CHECK0-SDAG-NEXT: .cfi_offset w30, -48
-; CHECK0-SDAG-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK0-SDAG-NEXT: ubfx x8, x1, #9, #8
+; CHECK0-SDAG-NEXT: lsr w8, w1, #9
; CHECK0-SDAG-NEXT: mov x19, x0
-; CHECK0-SDAG-NEXT: lsl x21, x8, #1
+; CHECK0-SDAG-NEXT: ubfiz x21, x8, #1, #8
; CHECK0-SDAG-NEXT: ldrh w20, [x0, x21]
; CHECK0-SDAG-NEXT: add w22, w20, #1
; CHECK0-SDAG-NEXT: bl foo
diff --git a/llvm/test/CodeGen/AArch64/xbfiz.ll b/llvm/test/CodeGen/AArch64/xbfiz.ll
index b777ddcb7efcc4..05567e34258402 100644
--- a/llvm/test/CodeGen/AArch64/xbfiz.ll
+++ b/llvm/test/CodeGen/AArch64/xbfiz.ll
@@ -69,3 +69,19 @@ define i64 @lsl32_not_ubfiz64(i64 %v) {
%and = and i64 %shl, 4294967295
ret i64 %and
}
+
+define i64 @lsl_zext_i8_i64(i8 %b) {
+; CHECK-LABEL: lsl_zext_i8_i64:
+; CHECK: ubfiz x0, x0, #1, #8
+ %1 = zext i8 %b to i64
+ %2 = shl i64 %1, 1
+ ret i64 %2
+}
+
+define i64 @lsl_zext_i16_i64(i16 %b) {
+; CHECK-LABEL: lsl_zext_i16_i64:
+; CHECK: ubfiz x0, x0, #1, #16
+ %1 = zext i16 %b to i64
+ %2 = shl i64 %1, 1
+ ret i64 %2
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/118974
More information about the llvm-commits
mailing list