[llvm] a992115 - [LoongArch] Add codegen support for the binary operations

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Sun Jun 19 18:44:12 PDT 2022


Author: Weining Lu
Date: 2022-06-20T09:43:36+08:00
New Revision: a9921155450ab1bb2ff920bf2dee4fec8d7a8652

URL: https://github.com/llvm/llvm-project/commit/a9921155450ab1bb2ff920bf2dee4fec8d7a8652
DIFF: https://github.com/llvm/llvm-project/commit/a9921155450ab1bb2ff920bf2dee4fec8d7a8652.diff

LOG: [LoongArch] Add codegen support for the binary operations

These binary operations include sub/fadd/fsub/fmul/fdiv. Others ops
like mul/udiv/sdiv/urem/srem would be added later since they depend on
`shift` and `truncate` that have not been supported.

Note `add` has been added in a previous patch.

Reference:
https://llvm.org/docs/LangRef.html#binary-operations

Differential Revision: https://reviews.llvm.org/D127199

Added: 
    llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll

Modified: 
    llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
    llvm/test/CodeGen/LoongArch/ir-instruction/add.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
index f47d51b857c10..d7b8e7f54a83e 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
@@ -106,3 +106,23 @@ def FSTGT_S : FP_STORE_3R<0b00111000011101100, "fstgt.s", FPR32>;
 def FSTLE_S : FP_STORE_3R<0b00111000011101110, "fstle.s", FPR32>;
 
 } // Predicates = [HasBasicF]
+
+//===----------------------------------------------------------------------===//
+// Pseudo-instructions and codegen patterns
+//===----------------------------------------------------------------------===//
+
+/// Generic pattern classes
+
+class PatFprFpr<SDPatternOperator OpNode, LAInst Inst, RegisterClass RegTy>
+    : Pat<(OpNode RegTy:$fj, RegTy:$fk), (Inst $fj, $fk)>;
+
+let Predicates = [HasBasicF] in {
+
+/// Float arithmetic operations
+
+def : PatFprFpr<fadd, FADD_S, FPR32>;
+def : PatFprFpr<fsub, FSUB_S, FPR32>;
+def : PatFprFpr<fmul, FMUL_S, FPR32>;
+def : PatFprFpr<fdiv, FDIV_S, FPR32>;
+
+} // Predicates = [HasBasicF]

diff  --git a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
index 09b8347f47c57..4ad3f6e35ee77 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
@@ -131,3 +131,17 @@ def MOVGR2FR_D  : FP_MOV<0b0000000100010100101010, "movgr2fr.d", FPR64, GPR>;
 def MOVFR2GR_D  : FP_MOV<0b0000000100010100101110, "movfr2gr.d", GPR, FPR64>;
 } // Predicates = [HasBasicD, IsLA64]
 
+//===----------------------------------------------------------------------===//
+// Pseudo-instructions and codegen patterns
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasBasicD] in {
+
+/// Float arithmetic operations
+
+def : PatFprFpr<fadd, FADD_D, FPR64>;
+def : PatFprFpr<fsub, FSUB_D, FPR64>;
+def : PatFprFpr<fmul, FMUL_D, FPR64>;
+def : PatFprFpr<fdiv, FDIV_D, FPR64>;
+
+} // Predicates = [HasBasicD]

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 5e0b77af03c64..714042137137a 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -31,6 +31,10 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
   MVT GRLenVT = Subtarget.getGRLenVT();
   // Set up the register classes.
   addRegisterClass(GRLenVT, &LoongArch::GPRRegClass);
+  if (Subtarget.hasBasicF())
+    addRegisterClass(MVT::f32, &LoongArch::FPR32RegClass);
+  if (Subtarget.hasBasicD())
+    addRegisterClass(MVT::f64, &LoongArch::FPR64RegClass);
 
   // TODO: add necessary setOperationAction calls later.
 
@@ -39,6 +43,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
 
   setStackPointerRegisterToSaveRestore(LoongArch::R3);
 
+  setBooleanContents(ZeroOrOneBooleanContent);
+
   // Function alignments.
   const Align FunctionAlignment(4);
   setMinFunctionAlignment(FunctionAlignment);
@@ -64,16 +70,29 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
 //                     Calling Convention Implementation
 //===----------------------------------------------------------------------===//
 // FIXME: Now, we only support CallingConv::C with fixed arguments which are
-// passed with integer registers.
+// passed with integer or floating-point registers.
 const MCPhysReg ArgGPRs[] = {LoongArch::R4,  LoongArch::R5, LoongArch::R6,
                              LoongArch::R7,  LoongArch::R8, LoongArch::R9,
                              LoongArch::R10, LoongArch::R11};
+const MCPhysReg ArgFPR32s[] = {LoongArch::F0, LoongArch::F1, LoongArch::F2,
+                               LoongArch::F3, LoongArch::F4, LoongArch::F5,
+                               LoongArch::F6, LoongArch::F7};
+const MCPhysReg ArgFPR64s[] = {
+    LoongArch::F0_64, LoongArch::F1_64, LoongArch::F2_64, LoongArch::F3_64,
+    LoongArch::F4_64, LoongArch::F5_64, LoongArch::F6_64, LoongArch::F7_64};
 
 // Implements the LoongArch calling convention. Returns true upon failure.
 static bool CC_LoongArch(unsigned ValNo, MVT ValVT,
                          CCValAssign::LocInfo LocInfo, CCState &State) {
   // Allocate to a register if possible.
-  Register Reg = State.AllocateReg(ArgGPRs);
+  Register Reg;
+
+  if (ValVT == MVT::f32)
+    Reg = State.AllocateReg(ArgFPR32s);
+  else if (ValVT == MVT::f64)
+    Reg = State.AllocateReg(ArgFPR64s);
+  else
+    Reg = State.AllocateReg(ArgGPRs);
   if (Reg) {
     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, ValVT, LocInfo));
     return false;

diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 3d47b0e24decc..265651cdc3f93 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -509,35 +509,43 @@ class PatGprImm_32<SDPatternOperator OpNode, LAInst Inst, Operand ImmOpnd>
     : Pat<(sext_inreg (OpNode GPR:$rj, ImmOpnd:$imm), i32),
           (Inst GPR:$rj, ImmOpnd:$imm)>;
 
-def : PatGprGpr<and, AND>;
-def : PatGprGpr<or, OR>;
-def : PatGprGpr<xor, XOR>;
-
-/// Branches and jumps
-
-let isBarrier = 1, isReturn = 1, isTerminator = 1 in
-def PseudoRET : Pseudo<(outs), (ins), [(loongarch_ret)]>,
-                PseudoInstExpansion<(JIRL R0, R1, 0)>;
-
-/// LA32 patterns
+/// Simple arithmetic operations
 
 let Predicates = [IsLA32] in {
 def : PatGprGpr<add, ADD_W>;
 def : PatGprImm<add, ADDI_W, simm12>;
+def : PatGprGpr<sub, SUB_W>;
 } // Predicates = [IsLA32]
 
-/// LA64 patterns
-
 let Predicates = [IsLA64] in {
-def : Pat<(sext_inreg GPR:$rj, i32), (ADDI_W GPR:$rj, 0)>;
-
 def : PatGprGpr<add, ADD_D>;
 def : PatGprGpr_32<add, ADD_W>;
-
 def : PatGprImm<add, ADDI_D, simm12>;
 def : PatGprImm_32<add, ADDI_W, simm12>;
+def : PatGprGpr<sub, SUB_D>;
+def : PatGprGpr_32<sub, SUB_W>;
 } // Predicates = [IsLA64]
 
+def : PatGprGpr<and, AND>;
+def : PatGprGpr<or, OR>;
+def : PatGprGpr<xor, XOR>;
+
+/// sext and zext
+
+let Predicates = [IsLA64] in {
+def : Pat<(sext_inreg GPR:$rj, i32), (ADDI_W GPR:$rj, 0)>;
+} // Predicates = [IsLA64]
+
+/// Setcc
+
+def : PatGprGpr<setult, SLTU>;
+
+/// Branches and jumps
+
+let isBarrier = 1, isReturn = 1, isTerminator = 1 in
+def PseudoRET : Pseudo<(outs), (ins), [(loongarch_ret)]>,
+                PseudoInstExpansion<(JIRL R0, R1, 0)>;
+
 //===----------------------------------------------------------------------===//
 // Assembler Pseudo Instructions
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
index b8a2de2678a06..bfa1a59756b81 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
@@ -1,17 +1,183 @@
-; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=CHECK32
-; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=CHECK64
-
-define i32 @addRR(i32 %x, i32 %y) {
-; CHECK32-LABEL: addRR:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    add.w $a0, $a1, $a0
-; CHECK32-NEXT:    jirl $zero, $ra, 0
-;
-; CHECK64-LABEL: addRR:
-; CHECK64:       # %bb.0: # %entry
-; CHECK64-NEXT:    add.d $a0, $a1, $a0
-; CHECK64-NEXT:    jirl $zero, $ra, 0
-entry:
-  %add = add nsw i32 %y, %x
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'add' LLVM IR: https://llvm.org/docs/LangRef.html#add-instruction
+
+define i1 @add_i1(i1 %x, i1 %y) {
+; LA32-LABEL: add_i1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i1 %x, %y
+  ret i1 %add
+}
+
+define i8 @add_i8(i8 %x, i8 %y) {
+; LA32-LABEL: add_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i8 %x, %y
+  ret i8 %add
+}
+
+define i16 @add_i16(i16 %x, i16 %y) {
+; LA32-LABEL: add_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i16 %x, %y
+  ret i16 %add
+}
+
+define i32 @add_i32(i32 %x, i32 %y) {
+; LA32-LABEL: add_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i32 %x, %y
+  ret i32 %add
+}
+
+;; Match the pattern:
+;; def : PatGprGpr_32<add, ADD_W>;
+define signext i32 @add_i32_sext(i32 %x, i32 %y) {
+; LA32-LABEL: add_i32_sext:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i32_sext:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i32 %x, %y
   ret i32 %add
 }
+
+define i64 @add_i64(i64 %x, i64 %y) {
+; LA32-LABEL: add_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a1, $a1, $a3
+; LA32-NEXT:    add.w $a2, $a0, $a2
+; LA32-NEXT:    sltu $a0, $a2, $a0
+; LA32-NEXT:    add.w $a1, $a1, $a0
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i64 %x, %y
+  ret i64 %add
+}
+
+define i1 @add_i1_3(i1 %x) {
+; LA32-LABEL: add_i1_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i1_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i1 %x, 3
+  ret i1 %add
+}
+
+define i8 @add_i8_3(i8 %x) {
+; LA32-LABEL: add_i8_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i8_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i8 %x, 3
+  ret i8 %add
+}
+
+define i16 @add_i16_3(i16 %x) {
+; LA32-LABEL: add_i16_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i16_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i16 %x, 3
+  ret i16 %add
+}
+
+define i32 @add_i32_3(i32 %x) {
+; LA32-LABEL: add_i32_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i32_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i32 %x, 3
+  ret i32 %add
+}
+
+;; Match the pattern:
+;; def : PatGprImm_32<add, ADDI_W, simm12>;
+define signext i32 @add_i32_3_sext(i32 %x) {
+; LA32-LABEL: add_i32_3_sext:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i32_3_sext:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.w $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i32 %x, 3
+  ret i32 %add
+}
+
+define i64 @add_i64_3(i64 %x) {
+; LA32-LABEL: add_i64_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $a0, 3
+; LA32-NEXT:    sltu $a0, $a2, $a0
+; LA32-NEXT:    add.w $a1, $a1, $a0
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i64_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i64 %x, 3
+  ret i64 %add
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll
new file mode 100644
index 0000000000000..15e1118d2e560
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll
@@ -0,0 +1,32 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'fadd' LLVM IR: https://llvm.org/docs/LangRef.html#fadd-instruction
+
+define float @fadd_s(float %x, float %y) {
+; LA32-LABEL: fadd_s:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fadd_s:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = fadd float %x, %y
+  ret float %add
+}
+
+define double @fadd_d(double %x, double %y) {
+; LA32-LABEL: fadd_d:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fadd.d $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fadd_d:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fadd.d $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = fadd double %x, %y
+  ret double %add
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll
new file mode 100644
index 0000000000000..9c3f85950d5d4
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll
@@ -0,0 +1,32 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'fdiv' LLVM IR: https://llvm.org/docs/LangRef.html#fdiv-instruction
+
+define float @fdiv_s(float %x, float %y) {
+; LA32-LABEL: fdiv_s:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fdiv.s $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fdiv_s:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fdiv.s $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %div = fdiv float %x, %y
+  ret float %div
+}
+
+define double @fdiv_d(double %x, double %y) {
+; LA32-LABEL: fdiv_d:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fdiv.d $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fdiv_d:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fdiv.d $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %div = fdiv double %x, %y
+  ret double %div
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll
new file mode 100644
index 0000000000000..78ee031c13015
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll
@@ -0,0 +1,32 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'fmul' LLVM IR: https://llvm.org/docs/LangRef.html#fmul-instruction
+
+define float @fmul_s(float %x, float %y) {
+; LA32-LABEL: fmul_s:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fmul.s $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fmul_s:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fmul.s $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %mul = fmul float %x, %y
+  ret float %mul
+}
+
+define double @fmul_d(double %x, double %y) {
+; LA32-LABEL: fmul_d:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fmul.d $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fmul_d:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fmul.d $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %mul = fmul double %x, %y
+  ret double %mul
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll
new file mode 100644
index 0000000000000..9e7d7964ef05d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll
@@ -0,0 +1,32 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'fsub' LLVM IR: https://llvm.org/docs/LangRef.html#fsub-instruction
+
+define float @fsub_s(float %x, float %y) {
+; LA32-LABEL: fsub_s:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fsub.s $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fsub_s:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fsub.s $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = fsub float %x, %y
+  ret float %sub
+}
+
+define double @fsub_d(double %x, double %y) {
+; LA32-LABEL: fsub_d:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fsub.d $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fsub_d:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fsub.d $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = fsub double %x, %y
+  ret double %sub
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll
new file mode 100644
index 0000000000000..dfa55c29ebaed
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll
@@ -0,0 +1,93 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'sub' LLVM IR: https://llvm.org/docs/LangRef.html#sub-instruction
+
+define i1 @sub_i1(i1 %x, i1 %y) {
+; LA32-LABEL: sub_i1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i1 %x, %y
+  ret i1 %sub
+}
+
+define i8 @sub_i8(i8 %x, i8 %y) {
+; LA32-LABEL: sub_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i8 %x, %y
+  ret i8 %sub
+}
+
+define i16 @sub_i16(i16 %x, i16 %y) {
+; LA32-LABEL: sub_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i16 %x, %y
+  ret i16 %sub
+}
+
+define i32 @sub_i32(i32 %x, i32 %y) {
+; LA32-LABEL: sub_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i32 %x, %y
+  ret i32 %sub
+}
+
+;; Match the pattern:
+;; def : PatGprGpr_32<sub, SUB_W>;
+define signext i32 @sub_i32_sext(i32 %x, i32 %y) {
+; LA32-LABEL: sub_i32_sext:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i32_sext:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i32 %x, %y
+  ret i32 %sub
+}
+
+define i64 @sub_i64(i64 %x, i64 %y) {
+; LA32-LABEL: sub_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a1, $a1, $a3
+; LA32-NEXT:    sltu $a3, $a0, $a2
+; LA32-NEXT:    sub.w $a1, $a1, $a3
+; LA32-NEXT:    sub.w $a0, $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i64 %x, %y
+  ret i64 %sub
+}


        


More information about the llvm-commits mailing list