[llvm] 36038b5 - [LoongArch] Supports brcond with 21 bit offsets
via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 18 00:57:17 PDT 2022
Author: gonglingqin
Date: 2022-08-18T15:55:50+08:00
New Revision: 36038b5cb6ed52e0baab096c733f9140737ad209
URL: https://github.com/llvm/llvm-project/commit/36038b5cb6ed52e0baab096c733f9140737ad209
DIFF: https://github.com/llvm/llvm-project/commit/36038b5cb6ed52e0baab096c733f9140737ad209.diff
LOG: [LoongArch] Supports brcond with 21 bit offsets
Differential Revision: https://reviews.llvm.org/D132006
Added:
llvm/test/CodeGen/LoongArch/bnez-beqz.ll
Modified:
llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index ae4e73211ec7f..fa73ea2fbc64b 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -821,6 +821,11 @@ def : BccSwapPat<setule, BGEU>;
// condition was calculated elsewhere).
def : Pat<(brcond GPR:$rj, bb:$imm21), (BNEZ GPR:$rj, bb:$imm21)>;
+def : Pat<(brcond (GRLenVT (seteq GPR:$rj, 0)), bb:$imm21),
+ (BEQZ GPR:$rj, bb:$imm21)>;
+def : Pat<(brcond (GRLenVT (setne GPR:$rj, 0)), bb:$imm21),
+ (BNEZ GPR:$rj, bb:$imm21)>;
+
let isBarrier = 1, isBranch = 1, isTerminator = 1 in
def PseudoBR : Pseudo<(outs), (ins simm26_lsl2:$imm26), [(br bb:$imm26)]>,
PseudoInstExpansion<(B simm26_lsl2:$imm26)>;
diff --git a/llvm/test/CodeGen/LoongArch/bnez-beqz.ll b/llvm/test/CodeGen/LoongArch/bnez-beqz.ll
new file mode 100644
index 0000000000000..782dd121e666a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/bnez-beqz.ll
@@ -0,0 +1,151 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+declare void @bar()
+
+define void @bnez_i32(i32 signext %0) nounwind {
+; LA32-LABEL: bnez_i32:
+; LA32: # %bb.0: # %start
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: bnez $a0, .LBB0_2
+; LA32-NEXT: # %bb.1: # %t
+; LA32-NEXT: bl bar
+; LA32-NEXT: .LBB0_2: # %f
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bnez_i32:
+; LA64: # %bb.0: # %start
+; LA64-NEXT: addi.d $sp, $sp, -16
+; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: bnez $a0, .LBB0_2
+; LA64-NEXT: # %bb.1: # %t
+; LA64-NEXT: bl bar
+; LA64-NEXT: .LBB0_2: # %f
+; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 16
+; LA64-NEXT: ret
+start:
+ %1 = icmp eq i32 %0, 0
+ br i1 %1, label %t, label %f
+
+t:
+ tail call void @bar()
+ br label %f
+
+f:
+ ret void
+}
+
+define void @beqz_i32(i32 signext %0) nounwind {
+; LA32-LABEL: beqz_i32:
+; LA32: # %bb.0: # %start
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: beqz $a0, .LBB1_2
+; LA32-NEXT: # %bb.1: # %t
+; LA32-NEXT: bl bar
+; LA32-NEXT: .LBB1_2: # %f
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: beqz_i32:
+; LA64: # %bb.0: # %start
+; LA64-NEXT: addi.d $sp, $sp, -16
+; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: beqz $a0, .LBB1_2
+; LA64-NEXT: # %bb.1: # %t
+; LA64-NEXT: bl bar
+; LA64-NEXT: .LBB1_2: # %f
+; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 16
+; LA64-NEXT: ret
+start:
+ %1 = icmp ne i32 %0, 0
+ br i1 %1, label %t, label %f
+
+t:
+ tail call void @bar()
+ br label %f
+
+f:
+ ret void
+}
+
+define void @bnez_i64(i64 %0) nounwind {
+; LA32-LABEL: bnez_i64:
+; LA32: # %bb.0: # %start
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: bnez $a0, .LBB2_2
+; LA32-NEXT: # %bb.1: # %t
+; LA32-NEXT: bl bar
+; LA32-NEXT: .LBB2_2: # %f
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bnez_i64:
+; LA64: # %bb.0: # %start
+; LA64-NEXT: addi.d $sp, $sp, -16
+; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: bnez $a0, .LBB2_2
+; LA64-NEXT: # %bb.1: # %t
+; LA64-NEXT: bl bar
+; LA64-NEXT: .LBB2_2: # %f
+; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 16
+; LA64-NEXT: ret
+start:
+ %1 = icmp eq i64 %0, 0
+ br i1 %1, label %t, label %f
+
+t:
+ tail call void @bar()
+ br label %f
+
+f:
+ ret void
+}
+
+define void @beqz_i64(i64 %0) nounwind {
+; LA32-LABEL: beqz_i64:
+; LA32: # %bb.0: # %start
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beqz $a0, .LBB3_2
+; LA32-NEXT: # %bb.1: # %t
+; LA32-NEXT: bl bar
+; LA32-NEXT: .LBB3_2: # %f
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: beqz_i64:
+; LA64: # %bb.0: # %start
+; LA64-NEXT: addi.d $sp, $sp, -16
+; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: beqz $a0, .LBB3_2
+; LA64-NEXT: # %bb.1: # %t
+; LA64-NEXT: bl bar
+; LA64-NEXT: .LBB3_2: # %f
+; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 16
+; LA64-NEXT: ret
+start:
+ %1 = icmp ne i64 %0, 0
+ br i1 %1, label %t, label %f
+
+t:
+ tail call void @bar()
+ br label %f
+
+f:
+ ret void
+}
More information about the llvm-commits
mailing list