[llvm] d3580c2 - [LoongArch] Add codegen support for not

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 8 23:19:45 PDT 2022


Author: gonglingqin
Date: 2022-08-09T14:05:09+08:00
New Revision: d3580c2eb6ec95256a65ce7f66346351e9852eab

URL: https://github.com/llvm/llvm-project/commit/d3580c2eb6ec95256a65ce7f66346351e9852eab
DIFF: https://github.com/llvm/llvm-project/commit/d3580c2eb6ec95256a65ce7f66346351e9852eab.diff

LOG: [LoongArch] Add codegen support for not

Differential Revision: https://reviews.llvm.org/D131384

Added: 
    llvm/test/CodeGen/LoongArch/not.ll

Modified: 
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
    llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 41df630b6b33..de9108608066 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -661,6 +661,10 @@ def : PatGprGpr<or, OR>;
 def : PatGprImm<or, ORI, uimm12>;
 def : PatGprGpr<xor, XOR>;
 def : PatGprImm<xor, XORI, uimm12>;
+def : Pat<(not GPR:$rj), (NOR GPR:$rj, R0)>;
+def : Pat<(not (or GPR:$rj, GPR:$rk)), (NOR GPR:$rj, GPR:$rk)>;
+def : Pat<(or GPR:$rj, (not GPR:$rk)), (ORN GPR:$rj, GPR:$rk)>;
+def : Pat<(and GPR:$rj, (not GPR:$rk)), (ANDN GPR:$rj, GPR:$rk)>;
 
 /// FrameIndex calculations
 let Predicates = [IsLA32] in {

diff  --git a/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll b/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
index 9082143ff7a7..2bcf45d4f0eb 100644
--- a/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
@@ -277,11 +277,9 @@ define i64 @caller_small_scalar_ret() nounwind {
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; CHECK-NEXT:    bl callee_small_scalar_ret
-; CHECK-NEXT:    addi.w $a2, $zero, -1
-; CHECK-NEXT:    xor $a1, $a1, $a2
 ; CHECK-NEXT:    addi.w $a2, $zero, -2
 ; CHECK-NEXT:    xor $a0, $a0, $a2
-; CHECK-NEXT:    or $a0, $a0, $a1
+; CHECK-NEXT:    orn $a0, $a0, $a1
 ; CHECK-NEXT:    sltui $a0, $a0, 1
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16

diff  --git a/llvm/test/CodeGen/LoongArch/not.ll b/llvm/test/CodeGen/LoongArch/not.ll
new file mode 100644
index 000000000000..932ec39e5199
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/not.ll
@@ -0,0 +1,243 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+define i8 @nor_i8(i8 %a, i8 %b) nounwind {
+; LA32-LABEL: nor_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    nor $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: nor_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    nor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %or = or i8 %a, %b
+  %neg = xor i8 %or, -1
+  ret i8 %neg
+}
+
+define i16 @nor_i16(i16 %a, i16 %b) nounwind {
+; LA32-LABEL: nor_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    nor $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: nor_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    nor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %or = or i16 %a, %b
+  %neg = xor i16 %or, -1
+  ret i16 %neg
+}
+
+define i32 @nor_i32(i32 %a, i32 %b) nounwind {
+; LA32-LABEL: nor_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    nor $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: nor_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    nor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %or = or i32 %a, %b
+  %neg = xor i32 %or, -1
+  ret i32 %neg
+}
+
+define i64 @nor_i64(i64 %a, i64 %b) nounwind {
+; LA32-LABEL: nor_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    nor $a0, $a0, $a2
+; LA32-NEXT:    nor $a1, $a1, $a3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: nor_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    nor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %or = or i64 %a, %b
+  %neg = xor i64 %or, -1
+  ret i64 %neg
+}
+
+define i8 @nor_zero_i8(i8 %a) nounwind {
+; LA32-LABEL: nor_zero_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    nor $a0, $a0, $zero
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: nor_zero_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    nor $a0, $a0, $zero
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i8 %a, -1
+  ret i8 %neg
+}
+
+define i16 @nor_zero_i16(i16 %a) nounwind {
+; LA32-LABEL: nor_zero_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    nor $a0, $a0, $zero
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: nor_zero_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    nor $a0, $a0, $zero
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i16 %a, -1
+  ret i16 %neg
+}
+
+define i32 @nor_zero_i32(i32 %a) nounwind {
+; LA32-LABEL: nor_zero_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    nor $a0, $a0, $zero
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: nor_zero_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    nor $a0, $a0, $zero
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i32 %a, -1
+  ret i32 %neg
+}
+
+define i64 @nor_zero_i64(i64 %a) nounwind {
+; LA32-LABEL: nor_zero_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    nor $a0, $a0, $zero
+; LA32-NEXT:    nor $a1, $a1, $zero
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: nor_zero_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    nor $a0, $a0, $zero
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i64 %a, -1
+  ret i64 %neg
+}
+
+define i8 @orn_i8(i8 %a, i8 %b) nounwind {
+; LA32-LABEL: orn_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    orn $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: orn_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    orn $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i8 %b, -1
+  %or = or i8 %neg, %a
+  ret i8 %or
+}
+
+define i16 @orn_i16(i16 %a, i16 %b) nounwind {
+; LA32-LABEL: orn_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    orn $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: orn_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    orn $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i16 %b, -1
+  %or = or i16 %neg, %a
+  ret i16 %or
+}
+
+define i32 @orn_i32(i32 %a, i32 %b) nounwind {
+; LA32-LABEL: orn_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    orn $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: orn_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    orn $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i32 %b, -1
+  %or = or i32 %neg, %a
+  ret i32 %or
+}
+
+define i64 @orn_i64(i64 %a, i64 %b) nounwind {
+; LA32-LABEL: orn_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    orn $a0, $a0, $a2
+; LA32-NEXT:    orn $a1, $a1, $a3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: orn_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    orn $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i64 %b, -1
+  %or = or i64 %neg, %a
+  ret i64 %or
+}
+
+define i8 @andn_i8(i8 %a, i8 %b) nounwind {
+; LA32-LABEL: andn_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andn $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: andn_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andn $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i8 %b, -1
+  %and = and i8 %neg, %a
+  ret i8 %and
+}
+
+define i16 @andn_i16(i16 %a, i16 %b) nounwind {
+; LA32-LABEL: andn_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andn $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: andn_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andn $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i16 %b, -1
+  %and = and i16 %neg, %a
+  ret i16 %and
+}
+
+define i32 @andn_i32(i32 %a, i32 %b) nounwind {
+; LA32-LABEL: andn_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andn $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: andn_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andn $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i32 %b, -1
+  %and = and i32 %neg, %a
+  ret i32 %and
+}
+
+define i64 @andn_i64(i64 %a, i64 %b) nounwind {
+; LA32-LABEL: andn_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andn $a0, $a0, $a2
+; LA32-NEXT:    andn $a1, $a1, $a3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: andn_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andn $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %neg = xor i64 %b, -1
+  %and = and i64 %neg, %a
+  ret i64 %and
+}


        


More information about the llvm-commits mailing list