[llvm] b4edd82 - [AArch64] Remove redundant FMOV for zero-extended i32/i16 loads to f64 (#146920)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 23 23:47:36 PDT 2025


Author: Amina Chabane
Date: 2025-07-24T07:47:32+01:00
New Revision: b4edd827e4f71c2a0fcb13f79de7eae4545f0aea

URL: https://github.com/llvm/llvm-project/commit/b4edd827e4f71c2a0fcb13f79de7eae4545f0aea
DIFF: https://github.com/llvm/llvm-project/commit/b4edd827e4f71c2a0fcb13f79de7eae4545f0aea.diff

LOG: [AArch64] Remove redundant FMOV for zero-extended i32/i16 loads to f64 (#146920)

Previously, a separate load, zext and FMOV instruction was emitted. This
patch adds a new TableGen pattern to avoid the unnecessary FMOV. A test
is included in test/CodeGen/AArch64/load_u64_from_u32.ll

Added: 
    llvm/test/CodeGen/AArch64/load-zext-bitcast.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 9f8a2571b076e..6ef28f052e836 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -3934,6 +3934,26 @@ defm LDRSW  : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
 def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
       (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
 
+// load zero-extended i32, bitcast to f64
+def : Pat <(f64 (bitconvert (i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
+       (SUBREG_TO_REG (i64 0), (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
+
+// load zero-extended i16, bitcast to f64
+def : Pat <(f64 (bitconvert (i64 (zextloadi16 (am_indexed32 GPR64sp:$Rn, uimm12s2:$offset))))),
+           (SUBREG_TO_REG (i64 0), (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
+
+// load zero-extended i8, bitcast to f64
+def : Pat <(f64 (bitconvert (i64 (zextloadi8 (am_indexed32 GPR64sp:$Rn, uimm12s1:$offset))))),
+           (SUBREG_TO_REG (i64 0), (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
+
+// load zero-extended i16, bitcast to f32
+def : Pat <(f32 (bitconvert (i32 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
+           (SUBREG_TO_REG (i32 0), (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
+
+// load zero-extended i8, bitcast to f32
+def : Pat <(f32 (bitconvert (i32 (zextloadi8 (am_indexed16 GPR64sp:$Rn, uimm12s1:$offset))))),
+           (SUBREG_TO_REG (i32 0), (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
+
 // Pre-fetch.
 def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
                         [(AArch64Prefetch timm:$Rt,

diff  --git a/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll b/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll
new file mode 100644
index 0000000000000..1a83930dfafa2
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll
@@ -0,0 +1,82 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-linux-gnu -o - %s | FileCheck %s
+
+; load zero-extended i32, bitcast to f64
+define double @_Z9load_u64_from_u32_testPj(ptr %n){
+; CHECK-LABEL: _Z9load_u64_from_u32_testPj:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr %n, align 4
+  %conv = zext i32 %0 to i64
+  %1 = bitcast i64 %conv to double
+  ret double %1
+}
+
+; load zero-extended i16, bitcast to f64
+define double @_Z9load_u64_from_u16_testPj(ptr %n){
+; CHECK-LABEL: _Z9load_u64_from_u16_testPj:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i16, ptr %n, align 2
+  %conv = zext i16 %0 to i64
+  %1 = bitcast i64 %conv to double
+  ret double %1
+}
+
+; load zero-extended i8, bitcast to f64
+define double @_Z16load_u64_from_u8Ph(ptr %n){
+; CHECK-LABEL: _Z16load_u64_from_u8Ph:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr b0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i8, ptr %n, align 1
+  %conv = zext i8 %0 to i64
+  %1 = bitcast i64 %conv to double
+  ret double %1
+}
+
+; load zero-extended i16, bitcast to f32
+define float @_Z17load_u32_from_u16Pt(ptr %n){
+; CHECK-LABEL: _Z17load_u32_from_u16Pt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i16, ptr %n, align 2
+  %conv = zext i16 %0 to i32
+  %1 = bitcast i32 %conv to float
+  ret float %1
+}
+
+; load zero-extended i8, bitcast to f32
+define float @_Z16load_u32_from_u8Ph(ptr %n){
+; CHECK-LABEL: _Z16load_u32_from_u8Ph:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr b0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i8, ptr %n, align 1
+  %conv = zext i8 %0 to i32
+  %1 = bitcast i32 %conv to float
+  ret float %1
+}
+
+; load zero-extended i8, bitcast to f16
+define half @_Z16load_u16_from_u8Ph(ptr %n){
+; CHECK-LABEL: _Z16load_u16_from_u8Ph:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr b0, [x0]
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i8, ptr %n, align 1
+  %conv = zext i8 %0 to i16
+  %1 = bitcast i16 %conv to half
+  ret half %1
+}
+


        


More information about the llvm-commits mailing list