[llvm] 653d823 - [LoongArch] Merge the 12bit constant address into the offset field of the instruction
via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 8 00:58:39 PST 2023
Author: gonglingqin
Date: 2023-02-08T16:57:11+08:00
New Revision: 653d823ad22b83fea3019e94e02f8ad99a33b4c0
URL: https://github.com/llvm/llvm-project/commit/653d823ad22b83fea3019e94e02f8ad99a33b4c0
DIFF: https://github.com/llvm/llvm-project/commit/653d823ad22b83fea3019e94e02f8ad99a33b4c0.diff
LOG: [LoongArch] Merge the 12bit constant address into the offset field of the instruction
There are 12bit offset fields in the ld.[b/h/w/d] and st.[b/h/w/d].
When the constant address is less than 12 bits, the address
calculation is incorporated into the offset field of the instruction.
Differential Revision: https://reviews.llvm.org/D143470
Added:
Modified:
llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
llvm/test/CodeGen/LoongArch/load-store-offset.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
index 9fe7d94acc7e0..ae7167cb5ce72 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
@@ -138,6 +138,25 @@ bool LoongArchDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
return true;
}
+// Fold constant addresses.
+bool LoongArchDAGToDAGISel::SelectAddrConstant(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ SDLoc DL(Addr);
+ MVT VT = Addr.getSimpleValueType();
+
+ if (!isa<ConstantSDNode>(Addr))
+ return false;
+
+ // If the constant is a simm12, we can fold the whole constant and use R0 as
+ // the base.
+ int64_t CVal = cast<ConstantSDNode>(Addr)->getSExtValue();
+ if (!isInt<12>(CVal))
+ return false;
+ Base = CurDAG->getRegister(LoongArch::R0, VT);
+ Offset = CurDAG->getTargetConstant(SignExtend64<12>(CVal), DL, VT);
+ return true;
+}
+
bool LoongArchDAGToDAGISel::selectNonFIBaseAddr(SDValue Addr, SDValue &Base) {
// If this is FrameIndex, don't select it.
if (isa<FrameIndexSDNode>(Addr))
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
index 230151b5340e7..3099407aea3ee 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
@@ -42,6 +42,7 @@ class LoongArchDAGToDAGISel : public SelectionDAGISel {
std::vector<SDValue> &OutOps) override;
bool SelectBaseAddr(SDValue Addr, SDValue &Base);
+ bool SelectAddrConstant(SDValue Addr, SDValue &Base, SDValue &Offset);
bool selectNonFIBaseAddr(SDValue Addr, SDValue &Base);
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt);
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 75b2adc729d0e..0e968e040ce23 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -364,6 +364,7 @@ def ImmSubFrom32 : SDNodeXForm<imm, [{
}]>;
def BaseAddr : ComplexPattern<iPTR, 1, "SelectBaseAddr">;
+def AddrConstant : ComplexPattern<iPTR, 2, "SelectAddrConstant">;
def NonFIBaseAddr : ComplexPattern<iPTR, 1, "selectNonFIBaseAddr">;
def fma_nsz : PatFrag<(ops node:$fj, node:$fk, node:$fa),
@@ -1219,6 +1220,8 @@ def : Pat<(bitreverse (bswap GPR:$rj)), (BITREV_8B GPR:$rj)>;
multiclass LdPat<PatFrag LoadOp, LAInst Inst, ValueType vt = GRLenVT> {
def : Pat<(vt (LoadOp BaseAddr:$rj)), (Inst BaseAddr:$rj, 0)>;
+ def : Pat<(vt (LoadOp (AddrConstant GPR:$rj, simm12:$imm12))),
+ (Inst GPR:$rj, simm12:$imm12)>;
def : Pat<(vt (LoadOp (AddLike BaseAddr:$rj, simm12:$imm12))),
(Inst BaseAddr:$rj, simm12:$imm12)>;
}
@@ -1261,6 +1264,8 @@ multiclass StPat<PatFrag StoreOp, LAInst Inst, RegisterClass StTy,
ValueType vt> {
def : Pat<(StoreOp (vt StTy:$rd), BaseAddr:$rj),
(Inst StTy:$rd, BaseAddr:$rj, 0)>;
+ def : Pat<(StoreOp (vt StTy:$rs2), (AddrConstant GPR:$rj, simm12:$imm12)),
+ (Inst StTy:$rs2, GPR:$rj, simm12:$imm12)>;
def : Pat<(StoreOp (vt StTy:$rd), (AddLike BaseAddr:$rj, simm12:$imm12)),
(Inst StTy:$rd, BaseAddr:$rj, simm12:$imm12)>;
}
diff --git a/llvm/test/CodeGen/LoongArch/load-store-offset.ll b/llvm/test/CodeGen/LoongArch/load-store-offset.ll
index c744b94146cbb..68777dfe0c2a2 100644
--- a/llvm/test/CodeGen/LoongArch/load-store-offset.ll
+++ b/llvm/test/CodeGen/LoongArch/load-store-offset.ll
@@ -2,185 +2,158 @@
; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
-;; TODO: When the offset of the address is less than 12bit, merge the offset
-;; of address calculation into the offset field of the instruction.
-
define i8 @load_i8() nounwind {
; LA32-LABEL: load_i8:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a0, $zero, 40
-; LA32-NEXT: ld.b $a0, $a0, 0
+; LA32-NEXT: ld.b $a0, $zero, 40
; LA32-NEXT: ret
;
; LA64-LABEL: load_i8:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a0, $zero, 40
-; LA64-NEXT: ld.b $a0, $a0, 0
+; LA64-NEXT: ld.b $a0, $zero, 40
; LA64-NEXT: ret
- %a = load i8, i8* inttoptr (i64 40 to i8*), align 8
+ %a = load i8, ptr inttoptr (i64 40 to ptr), align 8
ret i8 %a
}
define signext i8 @load_i8_sext() nounwind {
; LA32-LABEL: load_i8_sext:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a0, $zero, 40
-; LA32-NEXT: ld.b $a0, $a0, 0
+; LA32-NEXT: ld.b $a0, $zero, 40
; LA32-NEXT: ret
;
; LA64-LABEL: load_i8_sext:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a0, $zero, 40
-; LA64-NEXT: ld.b $a0, $a0, 0
+; LA64-NEXT: ld.b $a0, $zero, 40
; LA64-NEXT: ret
- %a = load i8, i8* inttoptr (i64 40 to i8*), align 8
+ %a = load i8, ptr inttoptr (i64 40 to ptr), align 8
ret i8 %a
}
define i16 @load_i16() nounwind {
; LA32-LABEL: load_i16:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a0, $zero, 40
-; LA32-NEXT: ld.h $a0, $a0, 0
+; LA32-NEXT: ld.h $a0, $zero, 40
; LA32-NEXT: ret
;
; LA64-LABEL: load_i16:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a0, $zero, 40
-; LA64-NEXT: ld.h $a0, $a0, 0
+; LA64-NEXT: ld.h $a0, $zero, 40
; LA64-NEXT: ret
- %a = load i16, i16* inttoptr (i64 40 to i16*), align 8
+ %a = load i16, ptr inttoptr (i64 40 to ptr), align 8
ret i16 %a
}
define signext i16 @load_i16_sext() nounwind {
; LA32-LABEL: load_i16_sext:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a0, $zero, 40
-; LA32-NEXT: ld.h $a0, $a0, 0
+; LA32-NEXT: ld.h $a0, $zero, 40
; LA32-NEXT: ret
;
; LA64-LABEL: load_i16_sext:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a0, $zero, 40
-; LA64-NEXT: ld.h $a0, $a0, 0
+; LA64-NEXT: ld.h $a0, $zero, 40
; LA64-NEXT: ret
- %a = load i16, i16* inttoptr (i64 40 to i16*), align 8
+ %a = load i16, ptr inttoptr (i64 40 to ptr), align 8
ret i16 %a
}
define i32 @load_i32() nounwind {
; LA32-LABEL: load_i32:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a0, $zero, 40
-; LA32-NEXT: ld.w $a0, $a0, 0
+; LA32-NEXT: ld.w $a0, $zero, 40
; LA32-NEXT: ret
;
; LA64-LABEL: load_i32:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a0, $zero, 40
-; LA64-NEXT: ld.w $a0, $a0, 0
+; LA64-NEXT: ld.w $a0, $zero, 40
; LA64-NEXT: ret
- %a = load i32, i32* inttoptr (i64 40 to i32*), align 8
+ %a = load i32, ptr inttoptr (i64 40 to ptr), align 8
ret i32 %a
}
define signext i32 @load_i32_sext() nounwind {
; LA32-LABEL: load_i32_sext:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a0, $zero, 40
-; LA32-NEXT: ld.w $a0, $a0, 0
+; LA32-NEXT: ld.w $a0, $zero, 40
; LA32-NEXT: ret
;
; LA64-LABEL: load_i32_sext:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a0, $zero, 40
-; LA64-NEXT: ld.w $a0, $a0, 0
+; LA64-NEXT: ld.w $a0, $zero, 40
; LA64-NEXT: ret
- %a = load i32, i32* inttoptr (i64 40 to i32*), align 8
+ %a = load i32, ptr inttoptr (i64 40 to ptr), align 8
ret i32 %a
}
define i64 @load_i64() nounwind {
; LA32-LABEL: load_i64:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a0, $zero, 40
-; LA32-NEXT: ld.w $a0, $a0, 0
-; LA32-NEXT: ori $a1, $zero, 44
-; LA32-NEXT: ld.w $a1, $a1, 0
+; LA32-NEXT: ld.w $a0, $zero, 40
+; LA32-NEXT: ld.w $a1, $zero, 44
; LA32-NEXT: ret
;
; LA64-LABEL: load_i64:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a0, $zero, 40
-; LA64-NEXT: ld.d $a0, $a0, 0
+; LA64-NEXT: ld.d $a0, $zero, 40
; LA64-NEXT: ret
- %a = load i64, i64* inttoptr (i64 40 to i64*), align 8
+ %a = load i64, ptr inttoptr (i64 40 to ptr), align 8
ret i64 %a
}
define void @store_i8(i8 %v) nounwind {
; LA32-LABEL: store_i8:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a1, $zero, 40
-; LA32-NEXT: st.b $a0, $a1, 0
+; LA32-NEXT: st.b $a0, $zero, 40
; LA32-NEXT: ret
;
; LA64-LABEL: store_i8:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a1, $zero, 40
-; LA64-NEXT: st.b $a0, $a1, 0
+; LA64-NEXT: st.b $a0, $zero, 40
; LA64-NEXT: ret
- store i8 %v, i8* inttoptr (i64 40 to i8*), align 8
+ store i8 %v, ptr inttoptr (i64 40 to ptr), align 8
ret void
}
define void @store_i16(i16 %v) nounwind {
; LA32-LABEL: store_i16:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a1, $zero, 40
-; LA32-NEXT: st.h $a0, $a1, 0
+; LA32-NEXT: st.h $a0, $zero, 40
; LA32-NEXT: ret
;
; LA64-LABEL: store_i16:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a1, $zero, 40
-; LA64-NEXT: st.h $a0, $a1, 0
+; LA64-NEXT: st.h $a0, $zero, 40
; LA64-NEXT: ret
- store i16 %v, i16* inttoptr (i64 40 to i16*), align 8
+ store i16 %v, ptr inttoptr (i64 40 to ptr), align 8
ret void
}
define void @store_i32(i32 %v) nounwind {
; LA32-LABEL: store_i32:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a1, $zero, 40
-; LA32-NEXT: st.w $a0, $a1, 0
+; LA32-NEXT: st.w $a0, $zero, 40
; LA32-NEXT: ret
;
; LA64-LABEL: store_i32:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a1, $zero, 40
-; LA64-NEXT: st.w $a0, $a1, 0
+; LA64-NEXT: st.w $a0, $zero, 40
; LA64-NEXT: ret
- store i32 %v, i32* inttoptr (i64 40 to i32*), align 8
+ store i32 %v, ptr inttoptr (i64 40 to ptr), align 8
ret void
}
define void @store_i64(i64 %v) nounwind {
; LA32-LABEL: store_i64:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a2, $zero, 44
-; LA32-NEXT: st.w $a1, $a2, 0
-; LA32-NEXT: ori $a1, $zero, 40
-; LA32-NEXT: st.w $a0, $a1, 0
+; LA32-NEXT: st.w $a1, $zero, 44
+; LA32-NEXT: st.w $a0, $zero, 40
; LA32-NEXT: ret
;
; LA64-LABEL: store_i64:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a1, $zero, 40
-; LA64-NEXT: st.d $a0, $a1, 0
+; LA64-NEXT: st.d $a0, $zero, 40
; LA64-NEXT: ret
- store i64 %v, i64* inttoptr (i64 40 to i64*), align 8
+ store i64 %v, ptr inttoptr (i64 40 to ptr), align 8
ret void
}
More information about the llvm-commits
mailing list