[llvm] 0947a8c - [RISCV][NFC] Add tests for folds of ADDIs into load/stores

Luís Marques via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 23 15:00:43 PDT 2020


Author: Luís Marques
Date: 2020-06-23T22:59:54+01:00
New Revision: 0947a8ca9824e15780075dc3525e30bada4bccdd

URL: https://github.com/llvm/llvm-project/commit/0947a8ca9824e15780075dc3525e30bada4bccdd
DIFF: https://github.com/llvm/llvm-project/commit/0947a8ca9824e15780075dc3525e30bada4bccdd.diff

LOG: [RISCV][NFC] Add tests for folds of ADDIs into load/stores

This patch adds tests for folds of ADDIs into load/stores, focusing on
load/stores with nonzero offsets. When the offset is nonzero we currently
don't do the fold. A follow-up patch will improve on that.

Differential Revision: https://reviews.llvm.org/D79689

Added: 
    llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
new file mode 100644
index 000000000000..4c98bafdfb8a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
@@ -0,0 +1,294 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+
+; We can often fold an ADDI into the offset of load/store instructions:
+;   (load (addi base, off1), off2) -> (load base, off1+off2)
+;   (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
+; This is possible when the off1+off2 continues to fit the 12-bit immediate.
+; Check if we do the fold under various conditions. If off1 is (the low part of)
+; an address the fold's safety depends on the variable's alignment.
+
+ at g_0 = global i64 0
+ at g_1 = global i64 0, align 1
+ at g_2 = global i64 0, align 2
+ at g_4 = global i64 0, align 4
+ at g_8 = global i64 0, align 8
+ at g_16 = global i64 0, align 16
+
+define i64 @load_g_0() nounwind {
+; RV32I-LABEL: load_g_0:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a1, %hi(g_0)
+; RV32I-NEXT:    lw a0, %lo(g_0)(a1)
+; RV32I-NEXT:    addi a1, a1, %lo(g_0)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_g_0:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %hi(g_0)
+; RV64I-NEXT:    ld a0, %lo(g_0)(a0)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* @g_0
+  ret i64 %0
+}
+
+define i64 @load_g_1() nounwind {
+; RV32I-LABEL: load_g_1:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a1, %hi(g_1)
+; RV32I-NEXT:    lw a0, %lo(g_1)(a1)
+; RV32I-NEXT:    addi a1, a1, %lo(g_1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_g_1:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %hi(g_1)
+; RV64I-NEXT:    ld a0, %lo(g_1)(a0)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* @g_1
+  ret i64 %0
+}
+
+define i64 @load_g_2() nounwind {
+; RV32I-LABEL: load_g_2:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a1, %hi(g_2)
+; RV32I-NEXT:    lw a0, %lo(g_2)(a1)
+; RV32I-NEXT:    addi a1, a1, %lo(g_2)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_g_2:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %hi(g_2)
+; RV64I-NEXT:    ld a0, %lo(g_2)(a0)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* @g_2
+  ret i64 %0
+}
+
+define i64 @load_g_4() nounwind {
+; RV32I-LABEL: load_g_4:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a1, %hi(g_4)
+; RV32I-NEXT:    lw a0, %lo(g_4)(a1)
+; RV32I-NEXT:    addi a1, a1, %lo(g_4)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_g_4:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %hi(g_4)
+; RV64I-NEXT:    ld a0, %lo(g_4)(a0)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* @g_4
+  ret i64 %0
+}
+
+define i64 @load_g_8() nounwind {
+; RV32I-LABEL: load_g_8:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a1, %hi(g_8)
+; RV32I-NEXT:    lw a0, %lo(g_8)(a1)
+; RV32I-NEXT:    addi a1, a1, %lo(g_8)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_g_8:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %hi(g_8)
+; RV64I-NEXT:    ld a0, %lo(g_8)(a0)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* @g_8
+  ret i64 %0
+}
+
+define i64 @load_g_16() nounwind {
+; RV32I-LABEL: load_g_16:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a1, %hi(g_16)
+; RV32I-NEXT:    lw a0, %lo(g_16)(a1)
+; RV32I-NEXT:    addi a1, a1, %lo(g_16)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_g_16:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %hi(g_16)
+; RV64I-NEXT:    ld a0, %lo(g_16)(a0)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* @g_16
+  ret i64 %0
+}
+
+define void @store_g_4() nounwind {
+; RV32I-LABEL: store_g_4:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a0, %hi(g_4)
+; RV32I-NEXT:    sw zero, %lo(g_4)(a0)
+; RV32I-NEXT:    addi a0, a0, %lo(g_4)
+; RV32I-NEXT:    sw zero, 4(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: store_g_4:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %hi(g_4)
+; RV64I-NEXT:    sd zero, %lo(g_4)(a0)
+; RV64I-NEXT:    ret
+entry:
+   store i64 0, i64* @g_4
+   ret void
+}
+
+define void @store_g_8() nounwind {
+; RV32I-LABEL: store_g_8:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a0, %hi(g_8)
+; RV32I-NEXT:    sw zero, %lo(g_8)(a0)
+; RV32I-NEXT:    addi a0, a0, %lo(g_8)
+; RV32I-NEXT:    sw zero, 4(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: store_g_8:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %hi(g_8)
+; RV64I-NEXT:    sd zero, %lo(g_8)(a0)
+; RV64I-NEXT:    ret
+entry:
+   store i64 0, i64* @g_8
+   ret void
+}
+
+; Check for folds in accesses to the second element of an i64 array.
+
+ at ga_8 = dso_local local_unnamed_addr global [2 x i64] zeroinitializer, align 8
+ at ga_16 = dso_local local_unnamed_addr global [2 x i64] zeroinitializer, align 16
+
+define i64 @load_ga_8() nounwind {
+; RV32I-LABEL: load_ga_8:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a0, %hi(ga_8)
+; RV32I-NEXT:    addi a1, a0, %lo(ga_8)
+; RV32I-NEXT:    lw a0, 8(a1)
+; RV32I-NEXT:    lw a1, 12(a1)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_ga_8:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %hi(ga_8+8)
+; RV64I-NEXT:    ld a0, %lo(ga_8+8)(a0)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @ga_8, i32 0, i32 1)
+  ret i64 %0
+}
+
+define i64 @load_ga_16() nounwind {
+; RV32I-LABEL: load_ga_16:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a0, %hi(ga_16)
+; RV32I-NEXT:    addi a1, a0, %lo(ga_16)
+; RV32I-NEXT:    lw a0, 8(a1)
+; RV32I-NEXT:    lw a1, 12(a1)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_ga_16:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %hi(ga_16+8)
+; RV64I-NEXT:    ld a0, %lo(ga_16+8)(a0)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @ga_16, i32 0, i32 1)
+  ret i64 %0
+}
+
+; Check for folds in accesses to thread-local variables.
+
+ at tl_4 = thread_local global i64 0, align 4
+ at tl_8 = thread_local global i64 0, align 8
+
+define i64 @load_tl_4() nounwind {
+; RV32I-LABEL: load_tl_4:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a0, %tprel_hi(tl_4)
+; RV32I-NEXT:    add a1, a0, tp, %tprel_add(tl_4)
+; RV32I-NEXT:    lw a0, %tprel_lo(tl_4)(a1)
+; RV32I-NEXT:    addi a1, a1, %tprel_lo(tl_4)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_tl_4:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %tprel_hi(tl_4)
+; RV64I-NEXT:    add a0, a0, tp, %tprel_add(tl_4)
+; RV64I-NEXT:    ld a0, %tprel_lo(tl_4)(a0)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* @tl_4
+  ret i64 %0
+}
+
+define i64 @load_tl_8() nounwind {
+; RV32I-LABEL: load_tl_8:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a0, %tprel_hi(tl_8)
+; RV32I-NEXT:    add a1, a0, tp, %tprel_add(tl_8)
+; RV32I-NEXT:    lw a0, %tprel_lo(tl_8)(a1)
+; RV32I-NEXT:    addi a1, a1, %tprel_lo(tl_8)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_tl_8:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a0, %tprel_hi(tl_8)
+; RV64I-NEXT:    add a0, a0, tp, %tprel_add(tl_8)
+; RV64I-NEXT:    ld a0, %tprel_lo(tl_8)(a0)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* @tl_8
+  ret i64 %0
+}
+
+define i64 @load_const_ok() nounwind {
+; RV32I-LABEL: load_const_ok:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lw a0, 2040(zero)
+; RV32I-NEXT:    lw a1, 2044(zero)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_const_ok:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    ld a0, 2040(zero)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* inttoptr (i32 2040 to i64*)
+  ret i64 %0
+}
+
+define i64 @load_cost_overflow() nounwind {
+; RV32I-LABEL: load_cost_overflow:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a0, 1
+; RV32I-NEXT:    lw a1, -2048(a0)
+; RV32I-NEXT:    lw a0, 2044(zero)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: load_cost_overflow:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    ld a0, 2044(zero)
+; RV64I-NEXT:    ret
+entry:
+  %0 = load i64, i64* inttoptr (i64 2044 to i64*)
+  ret i64 %0
+}


        


More information about the llvm-commits mailing list