[llvm] b940fe6 - [LoongArch] Add codegen support for conditional branches
Weining Lu via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 4 21:01:49 PDT 2022
Author: wanglei
Date: 2022-07-05T11:59:58+08:00
New Revision: b940fe6fe2af2297c71c322297cfab326363534e
URL: https://github.com/llvm/llvm-project/commit/b940fe6fe2af2297c71c322297cfab326363534e
DIFF: https://github.com/llvm/llvm-project/commit/b940fe6fe2af2297c71c322297cfab326363534e.diff
LOG: [LoongArch] Add codegen support for conditional branches
Setting ISD::BR_CC to Expand makes it much easier to deal with
matching the expanded form.
Differential Revision: https://reviews.llvm.org/D128428
Added:
llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll
Modified:
llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h
llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h
index 7e5aa49f227ca..b51c191880519 100644
--- a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h
+++ b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h
@@ -39,6 +39,10 @@ class LLVM_LIBRARY_VISIBILITY LoongArchAsmPrinter : public AsmPrinter {
// tblgen'erated function.
bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
const MachineInstr *MI);
+ // Wrapper needed for tblgenned pseudo lowering.
+ bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
+ return lowerLoongArchMachineOperandToMCOperand(MO, MCOp, *this);
+ }
};
} // end namespace llvm
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 1814b8ad2baa6..b3548a7d2e5e9 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -66,6 +66,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
}
+ setOperationAction(ISD::BR_CC, GRLenVT, Expand);
setOperationAction(ISD::SELECT_CC, GRLenVT, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 5e2231b783d7b..09425c523c52f 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -106,7 +106,14 @@ def simm16 : Operand<GRLenVT> {
let DecoderMethod = "decodeSImmOperand<16>";
}
-def simm16_lsl2 : Operand<GRLenVT> {
+def simm16_lsl2 : Operand<GRLenVT>,
+ ImmLeaf<GRLenVT, [{return isInt<16>(Imm>>2);}]> {
+ let ParserMatchClass = SImmAsmOperand<16, "lsl2">;
+ let EncoderMethod = "getImmOpValueAsr2";
+ let DecoderMethod = "decodeSImmOperand<16, 2>";
+}
+
+def simm16_lsl2_br : Operand<OtherVT> {
let ParserMatchClass = SImmAsmOperand<16, "lsl2">;
let EncoderMethod = "getImmOpValueAsr2";
let DecoderMethod = "decodeSImmOperand<16, 2>";
@@ -117,13 +124,13 @@ def simm20 : Operand<GRLenVT> {
let DecoderMethod = "decodeSImmOperand<20>";
}
-def simm21_lsl2 : Operand<GRLenVT> {
+def simm21_lsl2 : Operand<OtherVT> {
let ParserMatchClass = SImmAsmOperand<21, "lsl2">;
let EncoderMethod = "getImmOpValueAsr2";
let DecoderMethod = "decodeSImmOperand<21, 2>";
}
-def simm26_lsl2 : Operand<GRLenVT> {
+def simm26_lsl2 : Operand<OtherVT> {
let ParserMatchClass = SImmAsmOperand<26, "lsl2">;
let EncoderMethod = "getImmOpValueAsr2";
let DecoderMethod = "decodeSImmOperand<26, 2>";
@@ -185,7 +192,7 @@ class RDTIME_2R<bits<22> op, string opstr>
: Fmt2R<op, (outs GPR:$rd, GPR:$rj), (ins), opstr, "$rd, $rj">;
class BrCC_2RI16<bits<6> op, string opstr>
- : Fmt2RI16<op, (outs), (ins GPR:$rj, GPR:$rd, simm16_lsl2:$imm16), opstr,
+ : Fmt2RI16<op, (outs), (ins GPR:$rj, GPR:$rd, simm16_lsl2_br:$imm16), opstr,
"$rj, $rd, $imm16"> {
let isBranch = 1;
let isTerminator = 1;
@@ -649,6 +656,44 @@ def : Pat<(select GPR:$cond, GPR:$t, GPR:$f),
/// Branches and jumps
+class BccPat<PatFrag CondOp, LAInst Inst>
+ : Pat<(brcond (GRLenVT (CondOp GPR:$rj, GPR:$rd)), bb:$imm16),
+ (Inst GPR:$rj, GPR:$rd, bb:$imm16)>;
+
+def : BccPat<seteq, BEQ>;
+def : BccPat<setne, BNE>;
+def : BccPat<setlt, BLT>;
+def : BccPat<setge, BGE>;
+def : BccPat<setult, BLTU>;
+def : BccPat<setuge, BGEU>;
+
+class BccSwapPat<PatFrag CondOp, LAInst InstBcc>
+ : Pat<(brcond (GRLenVT (CondOp GPR:$rd, GPR:$rj)), bb:$imm16),
+ (InstBcc GPR:$rj, GPR:$rd, bb:$imm16)>;
+
+// Condition codes that don't have matching LoongArch branch instructions, but
+// are trivially supported by swapping the two input operands.
+def : BccSwapPat<setgt, BLT>;
+def : BccSwapPat<setle, BGE>;
+def : BccSwapPat<setugt, BLTU>;
+def : BccSwapPat<setule, BGEU>;
+
+// An extra pattern is needed for a brcond without a setcc (i.e. where the
+// condition was calculated elsewhere).
+def : Pat<(brcond GPR:$rj, bb:$imm21), (BNEZ GPR:$rj, bb:$imm21)>;
+
+let isBarrier = 1, isBranch = 1, isTerminator = 1 in
+def PseudoBR : Pseudo<(outs), (ins simm26_lsl2:$imm26), [(br bb:$imm26)]>,
+ PseudoInstExpansion<(B simm26_lsl2:$imm26)>;
+
+let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in
+def PseudoBRIND : Pseudo<(outs), (ins GPR:$rj, simm16_lsl2:$imm16), []>,
+ PseudoInstExpansion<(JIRL R0, GPR:$rj, simm16_lsl2:$imm16)>;
+
+def : Pat<(brind GPR:$rj), (PseudoBRIND GPR:$rj, 0)>;
+def : Pat<(brind (add GPR:$rj, simm16_lsl2:$imm16)),
+ (PseudoBRIND GPR:$rj, simm16_lsl2:$imm16)>;
+
let isBarrier = 1, isReturn = 1, isTerminator = 1 in
def PseudoRET : Pseudo<(outs), (ins), [(loongarch_ret)]>,
PseudoInstExpansion<(JIRL R0, R1, 0)>;
diff --git a/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp b/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp
index 5277cd7f42428..5c33044788615 100644
--- a/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp
@@ -60,8 +60,10 @@ bool llvm::lowerLoongArchMachineOperandToMCOperand(const MachineOperand &MO,
case MachineOperand::MO_GlobalAddress:
MCOp = lowerSymbolOperand(MO, AP.getSymbolPreferLocal(*MO.getGlobal()), AP);
break;
- // TODO: lower special operands
case MachineOperand::MO_MachineBasicBlock:
+ MCOp = lowerSymbolOperand(MO, MO.getMBB()->getSymbol(), AP);
+ break;
+ // TODO: lower special operands
case MachineOperand::MO_BlockAddress:
case MachineOperand::MO_ExternalSymbol:
case MachineOperand::MO_ConstantPoolIndex:
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
new file mode 100644
index 0000000000000..f46eca268aae8
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
@@ -0,0 +1,358 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefixes=ALL,LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefixes=ALL,LA64
+
+define void @foo() noreturn nounwind {
+; ALL-LABEL: foo:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: .LBB0_1: # %loop
+; ALL-NEXT: # =>This Inner Loop Header: Depth=1
+; ALL-NEXT: b .LBB0_1
+entry:
+ br label %loop
+loop:
+ br label %loop
+}
+
+define void @foo_br_eq(i32 %a, ptr %b) nounwind {
+; LA32-LABEL: foo_br_eq:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: beq $a2, $a0, .LBB1_2
+; LA32-NEXT: b .LBB1_1
+; LA32-NEXT: .LBB1_1: # %test
+; LA32-NEXT: ld.w $a0, $a1, 0
+; LA32-NEXT: .LBB1_2: # %end
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: foo_br_eq:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.wu $a2, $a1, 0
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: beq $a2, $a0, .LBB1_2
+; LA64-NEXT: b .LBB1_1
+; LA64-NEXT: .LBB1_1: # %test
+; LA64-NEXT: ld.w $a0, $a1, 0
+; LA64-NEXT: .LBB1_2: # %end
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load volatile i32, ptr %b
+ %cc = icmp eq i32 %val, %a
+ br i1 %cc, label %end, label %test
+test:
+ %tmp = load volatile i32, ptr %b
+ br label %end
+
+end:
+ ret void
+}
+
+define void @foo_br_ne(i32 %a, ptr %b) nounwind {
+; LA32-LABEL: foo_br_ne:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: bne $a2, $a0, .LBB2_2
+; LA32-NEXT: b .LBB2_1
+; LA32-NEXT: .LBB2_1: # %test
+; LA32-NEXT: ld.w $a0, $a1, 0
+; LA32-NEXT: .LBB2_2: # %end
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: foo_br_ne:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.wu $a2, $a1, 0
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: bne $a2, $a0, .LBB2_2
+; LA64-NEXT: b .LBB2_1
+; LA64-NEXT: .LBB2_1: # %test
+; LA64-NEXT: ld.w $a0, $a1, 0
+; LA64-NEXT: .LBB2_2: # %end
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load volatile i32, ptr %b
+ %cc = icmp ne i32 %val, %a
+ br i1 %cc, label %end, label %test
+test:
+ %tmp = load volatile i32, ptr %b
+ br label %end
+
+end:
+ ret void
+}
+
+define void @foo_br_slt(i32 %a, ptr %b) nounwind {
+; LA32-LABEL: foo_br_slt:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: blt $a2, $a0, .LBB3_2
+; LA32-NEXT: b .LBB3_1
+; LA32-NEXT: .LBB3_1: # %test
+; LA32-NEXT: ld.w $a0, $a1, 0
+; LA32-NEXT: .LBB3_2: # %end
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: foo_br_slt:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.w $a2, $a1, 0
+; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: blt $a2, $a0, .LBB3_2
+; LA64-NEXT: b .LBB3_1
+; LA64-NEXT: .LBB3_1: # %test
+; LA64-NEXT: ld.w $a0, $a1, 0
+; LA64-NEXT: .LBB3_2: # %end
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load volatile i32, ptr %b
+ %cc = icmp slt i32 %val, %a
+ br i1 %cc, label %end, label %test
+test:
+ %tmp = load volatile i32, ptr %b
+ br label %end
+
+end:
+ ret void
+}
+
+define void @foo_br_sge(i32 %a, ptr %b) nounwind {
+; LA32-LABEL: foo_br_sge:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: bge $a2, $a0, .LBB4_2
+; LA32-NEXT: b .LBB4_1
+; LA32-NEXT: .LBB4_1: # %test
+; LA32-NEXT: ld.w $a0, $a1, 0
+; LA32-NEXT: .LBB4_2: # %end
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: foo_br_sge:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.w $a2, $a1, 0
+; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: bge $a2, $a0, .LBB4_2
+; LA64-NEXT: b .LBB4_1
+; LA64-NEXT: .LBB4_1: # %test
+; LA64-NEXT: ld.w $a0, $a1, 0
+; LA64-NEXT: .LBB4_2: # %end
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load volatile i32, ptr %b
+ %cc = icmp sge i32 %val, %a
+ br i1 %cc, label %end, label %test
+test:
+ %tmp = load volatile i32, ptr %b
+ br label %end
+
+end:
+ ret void
+}
+
+define void @foo_br_ult(i32 %a, ptr %b) nounwind {
+; LA32-LABEL: foo_br_ult:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: bltu $a2, $a0, .LBB5_2
+; LA32-NEXT: b .LBB5_1
+; LA32-NEXT: .LBB5_1: # %test
+; LA32-NEXT: ld.w $a0, $a1, 0
+; LA32-NEXT: .LBB5_2: # %end
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: foo_br_ult:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.wu $a2, $a1, 0
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: bltu $a2, $a0, .LBB5_2
+; LA64-NEXT: b .LBB5_1
+; LA64-NEXT: .LBB5_1: # %test
+; LA64-NEXT: ld.w $a0, $a1, 0
+; LA64-NEXT: .LBB5_2: # %end
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load volatile i32, ptr %b
+ %cc = icmp ult i32 %val, %a
+ br i1 %cc, label %end, label %test
+test:
+ %tmp = load volatile i32, ptr %b
+ br label %end
+
+end:
+ ret void
+}
+
+define void @foo_br_uge(i32 %a, ptr %b) nounwind {
+; LA32-LABEL: foo_br_uge:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: bgeu $a2, $a0, .LBB6_2
+; LA32-NEXT: b .LBB6_1
+; LA32-NEXT: .LBB6_1: # %test
+; LA32-NEXT: ld.w $a0, $a1, 0
+; LA32-NEXT: .LBB6_2: # %end
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: foo_br_uge:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.wu $a2, $a1, 0
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: bgeu $a2, $a0, .LBB6_2
+; LA64-NEXT: b .LBB6_1
+; LA64-NEXT: .LBB6_1: # %test
+; LA64-NEXT: ld.w $a0, $a1, 0
+; LA64-NEXT: .LBB6_2: # %end
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load volatile i32, ptr %b
+ %cc = icmp uge i32 %val, %a
+ br i1 %cc, label %end, label %test
+test:
+ %tmp = load volatile i32, ptr %b
+ br label %end
+
+end:
+ ret void
+}
+
+;; Check for condition codes that don't have a matching instruction.
+define void @foo_br_sgt(i32 %a, ptr %b) nounwind {
+; LA32-LABEL: foo_br_sgt:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: blt $a0, $a2, .LBB7_2
+; LA32-NEXT: b .LBB7_1
+; LA32-NEXT: .LBB7_1: # %test
+; LA32-NEXT: ld.w $a0, $a1, 0
+; LA32-NEXT: .LBB7_2: # %end
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: foo_br_sgt:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.w $a2, $a1, 0
+; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: blt $a0, $a2, .LBB7_2
+; LA64-NEXT: b .LBB7_1
+; LA64-NEXT: .LBB7_1: # %test
+; LA64-NEXT: ld.w $a0, $a1, 0
+; LA64-NEXT: .LBB7_2: # %end
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load volatile i32, ptr %b
+ %cc = icmp sgt i32 %val, %a
+ br i1 %cc, label %end, label %test
+test:
+ %tmp = load volatile i32, ptr %b
+ br label %end
+
+end:
+ ret void
+}
+
+define void @foo_br_sle(i32 %a, ptr %b) nounwind {
+; LA32-LABEL: foo_br_sle:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: bge $a0, $a2, .LBB8_2
+; LA32-NEXT: b .LBB8_1
+; LA32-NEXT: .LBB8_1: # %test
+; LA32-NEXT: ld.w $a0, $a1, 0
+; LA32-NEXT: .LBB8_2: # %end
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: foo_br_sle:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.w $a2, $a1, 0
+; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: bge $a0, $a2, .LBB8_2
+; LA64-NEXT: b .LBB8_1
+; LA64-NEXT: .LBB8_1: # %test
+; LA64-NEXT: ld.w $a0, $a1, 0
+; LA64-NEXT: .LBB8_2: # %end
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load volatile i32, ptr %b
+ %cc = icmp sle i32 %val, %a
+ br i1 %cc, label %end, label %test
+test:
+ %tmp = load volatile i32, ptr %b
+ br label %end
+
+end:
+ ret void
+}
+
+define void @foo_br_ugt(i32 %a, ptr %b) nounwind {
+; LA32-LABEL: foo_br_ugt:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: bltu $a0, $a2, .LBB9_2
+; LA32-NEXT: b .LBB9_1
+; LA32-NEXT: .LBB9_1: # %test
+; LA32-NEXT: ld.w $a0, $a1, 0
+; LA32-NEXT: .LBB9_2: # %end
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: foo_br_ugt:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.wu $a2, $a1, 0
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: bltu $a0, $a2, .LBB9_2
+; LA64-NEXT: b .LBB9_1
+; LA64-NEXT: .LBB9_1: # %test
+; LA64-NEXT: ld.w $a0, $a1, 0
+; LA64-NEXT: .LBB9_2: # %end
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load volatile i32, ptr %b
+ %cc = icmp ugt i32 %val, %a
+ br i1 %cc, label %end, label %test
+test:
+ %tmp = load volatile i32, ptr %b
+ br label %end
+
+end:
+ ret void
+}
+
+define void @foo_br_ule(i32 %a, ptr %b) nounwind {
+; LA32-LABEL: foo_br_ule:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: bgeu $a0, $a2, .LBB10_2
+; LA32-NEXT: b .LBB10_1
+; LA32-NEXT: .LBB10_1: # %test
+; LA32-NEXT: ld.w $a0, $a1, 0
+; LA32-NEXT: .LBB10_2: # %end
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: foo_br_ule:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.wu $a2, $a1, 0
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: bgeu $a0, $a2, .LBB10_2
+; LA64-NEXT: b .LBB10_1
+; LA64-NEXT: .LBB10_1: # %test
+; LA64-NEXT: ld.w $a0, $a1, 0
+; LA64-NEXT: .LBB10_2: # %end
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load volatile i32, ptr %b
+ %cc = icmp ule i32 %val, %a
+ br i1 %cc, label %end, label %test
+test:
+ %tmp = load volatile i32, ptr %b
+ br label %end
+
+end:
+ ret void
+}
+
+;; Check the case of a branch where the condition was generated in another
+;; function.
+define void @foo_br_cc(ptr %a, i1 %cc) nounwind {
+; ALL-LABEL: foo_br_cc:
+; ALL: # %bb.0:
+; ALL-NEXT: ld.w $a2, $a0, 0
+; ALL-NEXT: andi $a1, $a1, 1
+; ALL-NEXT: bnez $a1, .LBB11_2
+; ALL-NEXT: b .LBB11_1
+; ALL-NEXT: .LBB11_1: # %test
+; ALL-NEXT: ld.w $a0, $a0, 0
+; ALL-NEXT: .LBB11_2: # %end
+; ALL-NEXT: jirl $zero, $ra, 0
+ %val = load volatile i32, ptr %a
+ br i1 %cc, label %end, label %test
+test:
+ %tmp = load volatile i32, ptr %a
+ br label %end
+
+end:
+ ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll
new file mode 100644
index 0000000000000..abbd700f44f7d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll
@@ -0,0 +1,30 @@
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s
+
+define i32 @indirectbr(ptr %target) nounwind {
+; CHECK-LABEL: indirectbr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: jirl $zero, $a0, 0
+; CHECK-NEXT: .LBB0_1: # %test_label
+; CHECK-NEXT: move $a0, $zero
+; CHECK-NEXT: jirl $zero, $ra, 0
+ indirectbr ptr %target, [label %test_label]
+test_label:
+ br label %ret
+ret:
+ ret i32 0
+}
+
+define i32 @indirectbr_with_offset(ptr %a) nounwind {
+; CHECK-LABEL: indirectbr_with_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: jirl $zero, $a0, 1380
+; CHECK-NEXT: .LBB1_1: # %test_label
+; CHECK-NEXT: move $a0, $zero
+; CHECK-NEXT: jirl $zero, $ra, 0
+ %target = getelementptr inbounds i8, ptr %a, i32 1380
+ indirectbr ptr %target, [label %test_label]
+test_label:
+ br label %ret
+ret:
+ ret i32 0
+}
More information about the llvm-commits
mailing list