[llvm] 6b09f73 - [RISCV][test] Precommit test case showing opportunity to narrow loads for some shift then mask cases

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Sat Dec 20 21:52:55 PST 2025


Author: Alex Bradbury
Date: 2025-12-21T05:51:48Z
New Revision: 6b09f739c4d085dc39eb9ff220c786bc3aa8c7fb

URL: https://github.com/llvm/llvm-project/commit/6b09f739c4d085dc39eb9ff220c786bc3aa8c7fb
DIFF: https://github.com/llvm/llvm-project/commit/6b09f739c4d085dc39eb9ff220c786bc3aa8c7fb.diff

LOG: [RISCV][test] Precommit test case showing opportunity to narrow loads for some shift then mask cases

Reviewed as part of #170483

Added: 
    llvm/test/CodeGen/RISCV/load-narrow-shift-mask-combine.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/load-narrow-shift-mask-combine.ll b/llvm/test/CodeGen/RISCV/load-narrow-shift-mask-combine.ll
new file mode 100644
index 0000000000000..901bce594f913
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/load-narrow-shift-mask-combine.ll
@@ -0,0 +1,80 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+
+; For each of these examples, it is fewer instructions to narrow the load and
+; then shift (which is an opportunity that can be exposed by doing the mask
+; before the shift).
+; TODO: Impement this optimisation.
+
+define ptr @narrow_to_lbu(ptr %a, ptr %b) {
+; RV32I-LABEL: narrow_to_lbu:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srli a0, a0, 20
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: narrow_to_lbu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lh a0, 0(a0)
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srli a0, a0, 52
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    ret
+  %1 = load i16, ptr %a, align 2
+  %2 = shl i16 %1, 1
+  %3 = and i16 %2, 510
+  %4 = zext nneg i16 %3 to i64
+  %5 = getelementptr inbounds double, ptr %b, i64 %4
+  ret ptr %5
+}
+
+define ptr @narrow_to_lhu(ptr %a, ptr %b) {
+; RV32I-LABEL: narrow_to_lhu:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a0, 0(a0)
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 12
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: narrow_to_lhu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lw a0, 0(a0)
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 44
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    ret
+  %1 = load i32, ptr %a, align 4
+  %2 = shl i32 %1, 1
+  %3 = and i32 %2, 131070
+  %4 = zext nneg i32 %3 to i64
+  %5 = getelementptr inbounds double, ptr %b, i64 %4
+  ret ptr %5
+}
+
+define ptr @narrow_to_lwu(ptr %a, ptr %b) {
+; RV32I-LABEL: narrow_to_lwu:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a0, 0(a0)
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: narrow_to_lwu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ld a0, 0(a0)
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 28
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    ret
+  %1 = load i64, ptr %a, align 8
+  %2 = shl i64 %1, 1
+  %3 = and i64 %2, 8589934590
+  %4 = getelementptr inbounds double, ptr %b, i64 %3
+  ret ptr %4
+}


        


More information about the llvm-commits mailing list