[llvm] 42b7079 - Reland "[Clang][LoongArch] Add inline asm support for constraints k/m/ZB/ZC"

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 11 05:22:00 PDT 2022


Author: Weining Lu
Date: 2022-10-11T19:51:48+08:00
New Revision: 42b70793a1df473be9c78b4141d3f3cedcbac988

URL: https://github.com/llvm/llvm-project/commit/42b70793a1df473be9c78b4141d3f3cedcbac988
DIFF: https://github.com/llvm/llvm-project/commit/42b70793a1df473be9c78b4141d3f3cedcbac988.diff

LOG: Reland "[Clang][LoongArch] Add inline asm support for constraints k/m/ZB/ZC"

Reference: https://gcc.gnu.org/onlinedocs/gccint/Machine-Constraints.html

k: A memory operand whose address is formed by a base register and
(optionally scaled) index register.

m: A memory operand whose address is formed by a base register and
offset that is suitable for use in instructions with the same
addressing mode as st.w and ld.w.

ZB: An address that is held in a general-purpose register. The offset
is zero.

ZC: A memory operand whose address is formed by a base register and
offset that is suitable for use in instructions with the same
addressing mode as ll.w and sc.w.

Note:
The INLINEASM SDNode flags in below tests are updated because the new
introduced enum `Constraint_k` is added before `Constraint_m`.
  llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
  llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
  llvm/test/CodeGen/X86/callbr-asm-kill.mir

This patch passes `ninja check-all` on a X86 machine with all official
targets and the LoongArch target enabled.

Differential Revision: https://reviews.llvm.org/D134638

Added: 
    llvm/test/CodeGen/LoongArch/inline-asm-constraint-ZB.ll
    llvm/test/CodeGen/LoongArch/inline-asm-constraint-ZC.ll
    llvm/test/CodeGen/LoongArch/inline-asm-constraint-k.ll
    llvm/test/CodeGen/LoongArch/inline-asm-constraint-m.ll

Modified: 
    clang/lib/Basic/Targets/LoongArch.cpp
    clang/lib/Basic/Targets/LoongArch.h
    clang/test/CodeGen/LoongArch/inline-asm-constraints.c
    llvm/include/llvm/IR/InlineAsm.h
    llvm/lib/Target/LoongArch/LoongArchAsmPrinter.cpp
    llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h
    llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
    llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
    llvm/lib/Target/LoongArch/LoongArchISelLowering.h
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
    llvm/test/CodeGen/X86/callbr-asm-kill.mir

Removed: 
    


################################################################################
diff  --git a/clang/lib/Basic/Targets/LoongArch.cpp b/clang/lib/Basic/Targets/LoongArch.cpp
index cc93206dad686..1ad9c0fb8b495 100644
--- a/clang/lib/Basic/Targets/LoongArch.cpp
+++ b/clang/lib/Basic/Targets/LoongArch.cpp
@@ -67,14 +67,19 @@ bool LoongArchTargetInfo::validateAsmConstraint(
     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
   // See the GCC definitions here:
   // https://gcc.gnu.org/onlinedocs/gccint/Machine-Constraints.html
+  // Note that the 'm' constraint is handled in TargetInfo.
   switch (*Name) {
-  // TODO: handle 'k', 'm', "ZB", "ZC".
   default:
     return false;
   case 'f':
     // A floating-point register (if available).
     Info.setAllowsRegister();
     return true;
+  case 'k':
+    // A memory operand whose address is formed by a base register and
+    // (optionally scaled) index register.
+    Info.setAllowsMemory();
+    return true;
   case 'l':
     // A signed 16-bit constant.
     Info.setRequiresImmediate(-32768, 32767);
@@ -87,7 +92,36 @@ bool LoongArchTargetInfo::validateAsmConstraint(
     // An unsigned 12-bit constant (for logic instructions).
     Info.setRequiresImmediate(0, 4095);
     return true;
+  case 'Z':
+    // ZB: An address that is held in a general-purpose register. The offset is
+    //     zero.
+    // ZC: A memory operand whose address is formed by a base register
+    //     and offset that is suitable for use in instructions with the same
+    //     addressing mode as ll.w and sc.w.
+    if (Name[1] == 'C' || Name[1] == 'B') {
+      Info.setAllowsMemory();
+      ++Name; // Skip over 'Z'.
+      return true;
+    }
+    return false;
+  }
+}
+
+std::string
+LoongArchTargetInfo::convertConstraint(const char *&Constraint) const {
+  std::string R;
+  switch (*Constraint) {
+  case 'Z':
+    // "ZC"/"ZB" are two-character constraints; add "^" hint for later
+    // parsing.
+    R = "^" + std::string(Constraint, 2);
+    ++Constraint;
+    break;
+  default:
+    R = TargetInfo::convertConstraint(Constraint);
+    break;
   }
+  return R;
 }
 
 void LoongArchTargetInfo::getTargetDefines(const LangOptions &Opts,

diff  --git a/clang/lib/Basic/Targets/LoongArch.h b/clang/lib/Basic/Targets/LoongArch.h
index 5d711c6b1db4c..47ee2fc737752 100644
--- a/clang/lib/Basic/Targets/LoongArch.h
+++ b/clang/lib/Basic/Targets/LoongArch.h
@@ -55,6 +55,7 @@ class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
 
   bool validateAsmConstraint(const char *&Name,
                              TargetInfo::ConstraintInfo &Info) const override;
+  std::string convertConstraint(const char *&Constraint) const override;
 
   bool hasBitIntType() const override { return true; }
 };

diff  --git a/clang/test/CodeGen/LoongArch/inline-asm-constraints.c b/clang/test/CodeGen/LoongArch/inline-asm-constraints.c
index 9c9c60e484930..d7d425ea0766f 100644
--- a/clang/test/CodeGen/LoongArch/inline-asm-constraints.c
+++ b/clang/test/CodeGen/LoongArch/inline-asm-constraints.c
@@ -15,6 +15,12 @@ void test_f(void) {
   asm volatile ("" :: "f"(d));
 }
 
+void test_k(int *p, int idx) {
+// CHECK-LABEL: define{{.*}} void @test_k(ptr noundef %p, i32 noundef{{.*}} %idx)
+// CHECK: call void asm sideeffect "", "*k"(ptr elementtype(i32) %{{.*}})
+  asm volatile("" :: "k"(*(p+idx)));
+}
+
 void test_l(void) {
 // CHECK-LABEL: define{{.*}} void @test_l()
 // CHECK: call void asm sideeffect "", "l"(i32 32767)
@@ -23,6 +29,12 @@ void test_l(void) {
   asm volatile ("" :: "l"(-32768));
 }
 
+void test_m(int *p) {
+// CHECK-LABEL: define{{.*}} void @test_m(ptr noundef %p)
+// CHECK: call void asm sideeffect "", "*m"(ptr nonnull elementtype(i32) %{{.*}})
+  asm volatile("" :: "m"(*(p+4)));
+}
+
 void test_I(void) {
 // CHECK-LABEL: define{{.*}} void @test_I()
 // CHECK: call void asm sideeffect "", "I"(i32 2047)
@@ -38,3 +50,15 @@ void test_K(void) {
 // CHECK: call void asm sideeffect "", "K"(i32 0)
   asm volatile ("" :: "K"(0));
 }
+
+void test_ZB(int *p) {
+// CHECK-LABEL: define{{.*}} void @test_ZB(ptr noundef %p)
+// CHECK: call void asm sideeffect "", "*^ZB"(ptr elementtype(i32) %p)
+  asm volatile ("" :: "ZB"(*p));
+}
+
+void test_ZC(int *p) {
+// CHECK-LABEL: define{{.*}} void @test_ZC(ptr noundef %p)
+// CHECK: call void asm sideeffect "", "*^ZC"(ptr elementtype(i32) %p)
+  asm volatile ("" :: "ZC"(*p));
+}

diff  --git a/llvm/include/llvm/IR/InlineAsm.h b/llvm/include/llvm/IR/InlineAsm.h
index 0a8d27aad58a2..65e5335168d69 100644
--- a/llvm/include/llvm/IR/InlineAsm.h
+++ b/llvm/include/llvm/IR/InlineAsm.h
@@ -252,6 +252,7 @@ class InlineAsm final : public Value {
     Constraint_Unknown = 0,
     Constraint_es,
     Constraint_i,
+    Constraint_k,
     Constraint_m,
     Constraint_o,
     Constraint_v,
@@ -269,6 +270,7 @@ class InlineAsm final : public Value {
     Constraint_Uy,
     Constraint_X,
     Constraint_Z,
+    Constraint_ZB,
     Constraint_ZC,
     Constraint_Zy,
 
@@ -428,6 +430,8 @@ class InlineAsm final : public Value {
       return "es";
     case InlineAsm::Constraint_i:
       return "i";
+    case InlineAsm::Constraint_k:
+      return "k";
     case InlineAsm::Constraint_m:
       return "m";
     case InlineAsm::Constraint_o:
@@ -460,6 +464,8 @@ class InlineAsm final : public Value {
       return "X";
     case InlineAsm::Constraint_Z:
       return "Z";
+    case InlineAsm::Constraint_ZB:
+      return "ZB";
     case InlineAsm::Constraint_ZC:
       return "ZC";
     case InlineAsm::Constraint_Zy:

diff  --git a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.cpp b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.cpp
index 9a7fb73b736c8..9d69a26e18b13 100644
--- a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.cpp
@@ -68,6 +68,35 @@ bool LoongArchAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
   return true;
 }
 
+bool LoongArchAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
+                                                unsigned OpNo,
+                                                const char *ExtraCode,
+                                                raw_ostream &OS) {
+  // TODO: handle extra code.
+  if (ExtraCode)
+    return true;
+
+  const MachineOperand &BaseMO = MI->getOperand(OpNo);
+  // Base address must be a register.
+  if (!BaseMO.isReg())
+    return true;
+  // Print the base address register.
+  OS << "$" << LoongArchInstPrinter::getRegisterName(BaseMO.getReg());
+  // Print the offset register or immediate if has.
+  if (OpNo + 1 < MI->getNumOperands()) {
+    const MachineOperand &OffsetMO = MI->getOperand(OpNo + 1);
+    if (OffsetMO.isReg())
+      OS << ", $" << LoongArchInstPrinter::getRegisterName(OffsetMO.getReg());
+    else if (OffsetMO.isImm())
+      OS << ", " << OffsetMO.getImm();
+    else
+      return true;
+  }
+  return false;
+
+  return AsmPrinter::PrintAsmMemoryOperand(MI, OpNo, ExtraCode, OS);
+}
+
 bool LoongArchAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
   AsmPrinter::runOnMachineFunction(MF);
   return true;

diff  --git a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h
index 941c86633fa1b..23e29354743e6 100644
--- a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h
+++ b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h
@@ -38,6 +38,8 @@ class LLVM_LIBRARY_VISIBILITY LoongArchAsmPrinter : public AsmPrinter {
 
   bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
                        const char *ExtraCode, raw_ostream &OS) override;
+  bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+                             const char *ExtraCode, raw_ostream &OS) override;
 
   // tblgen'erated function.
   bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
index a741032da3aa7..415ca4d871cda 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
@@ -77,6 +77,59 @@ void LoongArchDAGToDAGISel::Select(SDNode *Node) {
   SelectCode(Node);
 }
 
+bool LoongArchDAGToDAGISel::SelectInlineAsmMemoryOperand(
+    const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
+  switch (ConstraintID) {
+  default:
+    llvm_unreachable("unexpected asm memory constraint");
+  // Reg+Reg addressing.
+  case InlineAsm::Constraint_k:
+    OutOps.push_back(Op.getOperand(0));
+    OutOps.push_back(Op.getOperand(1));
+    return false;
+  // Reg+simm12 addressing.
+  case InlineAsm::Constraint_m: {
+    SDValue Base = Op;
+    SDValue Offset =
+        CurDAG->getTargetConstant(0, SDLoc(Op), Subtarget->getGRLenVT());
+    if (CurDAG->isBaseWithConstantOffset(Op)) {
+      ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+      if (isIntN(12, CN->getSExtValue())) {
+        Base = Op.getOperand(0);
+        Offset = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Op),
+                                           Op.getValueType());
+      }
+    }
+    OutOps.push_back(Base);
+    OutOps.push_back(Offset);
+    return false;
+  }
+  case InlineAsm::Constraint_ZB:
+    OutOps.push_back(Op);
+    // No offset.
+    return false;
+  // Reg+(simm14<<2) addressing.
+  case InlineAsm::Constraint_ZC: {
+    SDValue Base = Op;
+    SDValue Offset =
+        CurDAG->getTargetConstant(0, SDLoc(Op), Subtarget->getGRLenVT());
+    if (CurDAG->isBaseWithConstantOffset(Op)) {
+      ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+      if (isIntN(16, CN->getSExtValue()) &&
+          isAligned(Align(4ULL), CN->getZExtValue())) {
+        Base = Op.getOperand(0);
+        Offset = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Op),
+                                           Op.getValueType());
+      }
+    }
+    OutOps.push_back(Base);
+    OutOps.push_back(Offset);
+    return false;
+  }
+  }
+  return true;
+}
+
 bool LoongArchDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
   // If this is FrameIndex, select it directly. Otherwise just let it get
   // selected to a register independently.

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
index 4164a18fb653e..49843ac610da2 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
@@ -38,6 +38,9 @@ class LoongArchDAGToDAGISel : public SelectionDAGISel {
 
   void Select(SDNode *Node) override;
 
+  bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
+                                    std::vector<SDValue> &OutOps) override;
+
   bool SelectBaseAddr(SDValue Addr, SDValue &Base);
   bool selectNonFIBaseAddr(SDValue Addr, SDValue &Base);
 

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index dae4d815a59c2..91d58b28250da 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2114,14 +2114,27 @@ LoongArchTargetLowering::getConstraintType(StringRef Constraint) const {
     case 'I':
     case 'K':
       return C_Immediate;
+    case 'k':
+      return C_Memory;
     }
   }
 
-  // TODO: handle 'k", "ZB" and "ZC".
+  if (Constraint == "ZC" || Constraint == "ZB")
+    return C_Memory;
 
+  // 'm' is handled here.
   return TargetLowering::getConstraintType(Constraint);
 }
 
+unsigned LoongArchTargetLowering::getInlineAsmMemConstraint(
+    StringRef ConstraintCode) const {
+  return StringSwitch<unsigned>(ConstraintCode)
+      .Case("k", InlineAsm::Constraint_k)
+      .Case("ZB", InlineAsm::Constraint_ZB)
+      .Case("ZC", InlineAsm::Constraint_ZC)
+      .Default(TargetLowering::getInlineAsmMemConstraint(ConstraintCode));
+}
+
 std::pair<unsigned, const TargetRegisterClass *>
 LoongArchTargetLowering::getRegForInlineAsmConstraint(
     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 37de50f15c03f..3d313c12083a0 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -170,6 +170,8 @@ class LoongArchTargetLowering : public TargetLowering {
 
   ConstraintType getConstraintType(StringRef Constraint) const override;
 
+  unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
+
   std::pair<unsigned, const TargetRegisterClass *>
   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
                                StringRef Constraint, MVT VT) const override;

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
index 8aedbad871951..9924e0c3e5924 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
@@ -204,7 +204,7 @@ define i32 @test_memory_constraint(i32* %a) nounwind {
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
   ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-  ; CHECK:   INLINEASM &"ldr $0, $1", 8 /* mayload attdialect */, 655370 /* regdef:GPR32common */, def %1, 196622 /* mem:m */, [[COPY]](p0)
+  ; CHECK:   INLINEASM &"ldr $0, $1", 8 /* mayload attdialect */, 655370 /* regdef:GPR32common */, def %1, 262158 /* mem:m */, [[COPY]](p0)
   ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
   ; CHECK:   $w0 = COPY [[COPY1]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
index c26fba50f6e26..89235e3f05de2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
@@ -203,7 +203,7 @@ define i32 @test_memory_constraint(i32 addrspace(3)* %a) nounwind {
   ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
-  ; CHECK-NEXT:   INLINEASM &"ds_read_b32 $0, $1", 8 /* mayload attdialect */, 1966090 /* regdef:VGPR_32 */, def %1, 196622 /* mem:m */, [[COPY]](p3)
+  ; CHECK-NEXT:   INLINEASM &"ds_read_b32 $0, $1", 8 /* mayload attdialect */, 1966090 /* regdef:VGPR_32 */, def %1, 262158 /* mem:m */, [[COPY]](p3)
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
   ; CHECK-NEXT:   $vgpr0 = COPY [[COPY1]](s32)
   ; CHECK-NEXT:   SI_RETURN implicit $vgpr0

diff  --git a/llvm/test/CodeGen/LoongArch/inline-asm-constraint-ZB.ll b/llvm/test/CodeGen/LoongArch/inline-asm-constraint-ZB.ll
new file mode 100644
index 0000000000000..4373855e4c008
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/inline-asm-constraint-ZB.ll
@@ -0,0 +1,49 @@
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefix=ASM
+; RUN: llc --mtriple=loongarch64 --print-after-isel -o /dev/null 2>&1 < %s \
+; RUN:   | FileCheck %s --check-prefix=MACHINE-INSTR
+
+;; Note amswap.w is not available on loongarch32.
+
+define void @ZB(ptr %p) nounwind {
+; ASM-LABEL: ZB:
+; ASM:       # %bb.0:
+; ASM-NEXT:    #APP
+; ASM-NEXT:    amswap.w $t0, $t1, $a0
+; ASM-NEXT:    #NO_APP
+; ASM-NEXT:    ret
+;; Make sure machine instr with this "ZB" constraint is printed correctly.
+; MACHINE-INSTR: INLINEASM{{.*}}[mem:ZB]
+  call void asm "amswap.w $$r12, $$r13, $0", "*^ZB"(ptr elementtype(i32) %p)
+  ret void
+}
+
+define void @ZB_constant_offset(ptr %p) nounwind {
+; ASM-LABEL: ZB_constant_offset:
+; ASM:       # %bb.0:
+; ASM-NEXT:    addi.d $a0, $a0, 1
+; ASM-NEXT:    #APP
+; ASM-NEXT:    amswap.w $t0, $t1, $a0
+; ASM-NEXT:    #NO_APP
+; ASM-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 1
+;; Make sure machine instr with this "ZB" constraint is printed correctly.
+; MACHINE-INSTR: INLINEASM{{.*}}[mem:ZB]
+  call void asm "amswap.w $$r12, $$r13, $0", "*^ZB"(ptr elementtype(i32) %1)
+  ret void
+}
+
+define void @ZB_variable_offset(ptr %p, i32 signext %idx) nounwind {
+; ASM-LABEL: ZB_variable_offset:
+; ASM:       # %bb.0:
+; ASM-NEXT:    add.d $a0, $a0, $a1
+; ASM-NEXT:    #APP
+; ASM-NEXT:    amswap.w $t0, $t1, $a0
+; ASM-NEXT:    #NO_APP
+; ASM-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 %idx
+;; Make sure machine instr with this "ZB" constraint is printed correctly.
+; MACHINE-INSTR: INLINEASM{{.*}}[mem:ZB]
+  call void asm "amswap.w $$r12, $$r13, $0", "*^ZB"(ptr elementtype(i32) %1)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/inline-asm-constraint-ZC.ll b/llvm/test/CodeGen/LoongArch/inline-asm-constraint-ZC.ll
new file mode 100644
index 0000000000000..9c053c4d24857
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/inline-asm-constraint-ZC.ll
@@ -0,0 +1,170 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64
+
+define i32 @ZC_offset_neg_32769(ptr %p) nounwind {
+; LA32-LABEL: ZC_offset_neg_32769:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a1, -9
+; LA32-NEXT:    ori $a1, $a1, 4095
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ll.w $a0, $a0, 0
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: ZC_offset_neg_32769:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a1, -9
+; LA64-NEXT:    ori $a1, $a1, 4095
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ll.w $a0, $a0, 0
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 -32769
+  %2 = call i32 asm "ll.w $0, $1", "=r,*^ZC"(ptr elementtype(i32) %1)
+  ret i32 %2
+}
+
+define i32 @ZC_offset_neg_32768(ptr %p) nounwind {
+; LA32-LABEL: ZC_offset_neg_32768:
+; LA32:       # %bb.0:
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ll.w $a0, $a0, -32768
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: ZC_offset_neg_32768:
+; LA64:       # %bb.0:
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ll.w $a0, $a0, -32768
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 -32768
+  %2 = call i32 asm "ll.w $0, $1", "=r,*^ZC"(ptr elementtype(i32) %1)
+  ret i32 %2
+}
+
+define i32 @ZC_offset_neg_4(ptr %p) nounwind {
+; LA32-LABEL: ZC_offset_neg_4:
+; LA32:       # %bb.0:
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ll.w $a0, $a0, -4
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: ZC_offset_neg_4:
+; LA64:       # %bb.0:
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ll.w $a0, $a0, -4
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 -4
+  %2 = call i32 asm "ll.w $0, $1", "=r,*^ZC"(ptr elementtype(i32) %1)
+  ret i32 %2
+}
+
+define i32 @ZC_offset_neg_1(ptr %p) nounwind {
+; LA32-LABEL: ZC_offset_neg_1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, -1
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ll.w $a0, $a0, 0
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: ZC_offset_neg_1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, -1
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ll.w $a0, $a0, 0
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 -1
+  %2 = call i32 asm "ll.w $0, $1", "=r,*^ZC"(ptr elementtype(i32) %1)
+  ret i32 %2
+}
+
+define i32 @ZC_offset_0(ptr %p) nounwind {
+; LA32-LABEL: ZC_offset_0:
+; LA32:       # %bb.0:
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ll.w $a0, $a0, 0
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: ZC_offset_0:
+; LA64:       # %bb.0:
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ll.w $a0, $a0, 0
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = call i32 asm "ll.w $0, $1", "=r,*^ZC"(ptr elementtype(i32) %p)
+  ret i32 %1
+}
+
+define i32 @ZC_offset_1(ptr %p) nounwind {
+; LA32-LABEL: ZC_offset_1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, 1
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ll.w $a0, $a0, 0
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: ZC_offset_1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, 1
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ll.w $a0, $a0, 0
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 1
+  %2 = call i32 asm "ll.w $0, $1", "=r,*^ZC"(ptr elementtype(i32) %1)
+  ret i32 %2
+}
+
+define i32 @ZC_offset_32764(ptr %p) nounwind {
+; LA32-LABEL: ZC_offset_32764:
+; LA32:       # %bb.0:
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ll.w $a0, $a0, 32764
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: ZC_offset_32764:
+; LA64:       # %bb.0:
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ll.w $a0, $a0, 32764
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 32764
+  %2 = call i32 asm "ll.w $0, $1", "=r,*^ZC"(ptr elementtype(i32) %1)
+  ret i32 %2
+}
+
+define i32 @ZC_offset_32767(ptr %p) nounwind {
+; LA32-LABEL: ZC_offset_32767:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a1, 7
+; LA32-NEXT:    ori $a1, $a1, 4095
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ll.w $a0, $a0, 0
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: ZC_offset_32767:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a1, 7
+; LA64-NEXT:    ori $a1, $a1, 4095
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ll.w $a0, $a0, 0
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 32767
+  %2 = call i32 asm "ll.w $0, $1", "=r,*^ZC"(ptr elementtype(i32) %1)
+  ret i32 %2
+}

diff  --git a/llvm/test/CodeGen/LoongArch/inline-asm-constraint-k.ll b/llvm/test/CodeGen/LoongArch/inline-asm-constraint-k.ll
new file mode 100644
index 0000000000000..5ffe4b48c3f54
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/inline-asm-constraint-k.ll
@@ -0,0 +1,33 @@
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefix=ASM
+; RUN: llc --mtriple=loongarch64 --print-after-isel -o /dev/null 2>&1 < %s \
+; RUN:   | FileCheck %s --check-prefix=MACHINE-INSTR
+
+define i64 @k_variable_offset(ptr %p, i64 %idx) nounwind {
+; ASM-LABEL: k_variable_offset:
+; ASM:       # %bb.0:
+; ASM-NEXT:    #APP
+; ASM-NEXT:    ldx.d $a0, $a0, $a1
+; ASM-NEXT:    #NO_APP
+; ASM-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i64 %idx
+;; Make sure machine instr with this 'k' constraint is printed correctly.
+; MACHINE-INSTR: INLINEASM{{.*}}[mem:k]
+  %2 = call i64 asm "ldx.d $0, $1", "=r,*k"(ptr elementtype(i64) %1)
+  ret i64 %2
+}
+
+define i64 @k_constant_offset(ptr %p) nounwind {
+; ASM-LABEL: k_constant_offset:
+; ASM:       # %bb.0:
+; ASM-NEXT:    ori $a1, $zero, 5
+; ASM-NEXT:    #APP
+; ASM-NEXT:    ldx.d $a0, $a0, $a1
+; ASM-NEXT:    #NO_APP
+; ASM-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i64 5
+;; Make sure machine instr with this 'k' constraint is printed correctly.
+; MACHINE-INSTR: INLINEASM{{.*}}[mem:k]
+  %2 = call i64 asm "ldx.d $0, $1", "=r,*k"(ptr elementtype(i64) %1)
+  ret i64 %2
+}

diff  --git a/llvm/test/CodeGen/LoongArch/inline-asm-constraint-m.ll b/llvm/test/CodeGen/LoongArch/inline-asm-constraint-m.ll
new file mode 100644
index 0000000000000..129368aee1d31
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/inline-asm-constraint-m.ll
@@ -0,0 +1,145 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64
+
+define i32 @m_offset_neg_2049(ptr %p) nounwind {
+; LA32-LABEL: m_offset_neg_2049:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a1, -1
+; LA32-NEXT:    ori $a1, $a1, 2047
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ld.w $a0, $a0, 0
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: m_offset_neg_2049:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a1, -1
+; LA64-NEXT:    ori $a1, $a1, 2047
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ld.w $a0, $a0, 0
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 -2049
+  %2 = call i32 asm "ld.w $0, $1", "=r,*m"(ptr elementtype(i32) %1)
+  ret i32 %2
+}
+
+define i32 @m_offset_neg_2048(ptr %p) nounwind {
+; LA32-LABEL: m_offset_neg_2048:
+; LA32:       # %bb.0:
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ld.w $a0, $a0, -2048
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: m_offset_neg_2048:
+; LA64:       # %bb.0:
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ld.w $a0, $a0, -2048
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 -2048
+  %2 = call i32 asm "ld.w $0, $1", "=r,*m"(ptr elementtype(i32) %1)
+  ret i32 %2
+}
+
+define i32 @m_offset_neg_1(ptr %p) nounwind {
+; LA32-LABEL: m_offset_neg_1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ld.w $a0, $a0, -1
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: m_offset_neg_1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ld.w $a0, $a0, -1
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 -1
+  %2 = call i32 asm "ld.w $0, $1", "=r,*m"(ptr elementtype(i32) %1)
+  ret i32 %2
+}
+
+define i32 @m_offset_0(ptr %p) nounwind {
+; LA32-LABEL: m_offset_0:
+; LA32:       # %bb.0:
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ld.w $a0, $a0, 0
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: m_offset_0:
+; LA64:       # %bb.0:
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ld.w $a0, $a0, 0
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = call i32 asm "ld.w $0, $1", "=r,*m"(ptr elementtype(i32) %p)
+  ret i32 %1
+}
+
+define i32 @m_offset_1(ptr %p) nounwind {
+; LA32-LABEL: m_offset_1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ld.w $a0, $a0, 1
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: m_offset_1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ld.w $a0, $a0, 1
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 1
+  %2 = call i32 asm "ld.w $0, $1", "=r,*m"(ptr elementtype(i32) %1)
+  ret i32 %2
+}
+
+define i32 @m_offset_2047(ptr %p) nounwind {
+; LA32-LABEL: m_offset_2047:
+; LA32:       # %bb.0:
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ld.w $a0, $a0, 2047
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: m_offset_2047:
+; LA64:       # %bb.0:
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ld.w $a0, $a0, 2047
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 2047
+  %2 = call i32 asm "ld.w $0, $1", "=r,*m"(ptr elementtype(i32) %1)
+  ret i32 %2
+}
+
+define i32 @m_offset_2048(ptr %p) nounwind {
+; LA32-LABEL: m_offset_2048:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ori $a1, $zero, 2048
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    #APP
+; LA32-NEXT:    ld.w $a0, $a0, 0
+; LA32-NEXT:    #NO_APP
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: m_offset_2048:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ori $a1, $zero, 2048
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    #APP
+; LA64-NEXT:    ld.w $a0, $a0, 0
+; LA64-NEXT:    #NO_APP
+; LA64-NEXT:    ret
+  %1 = getelementptr inbounds i8, ptr %p, i32 2048
+  %2 = call i32 asm "ld.w $0, $1", "=r,*m"(ptr elementtype(i32) %1)
+  ret i32 %2
+}

diff  --git a/llvm/test/CodeGen/X86/callbr-asm-kill.mir b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
index bef2c48e2ed16..bd279020b65e6 100644
--- a/llvm/test/CodeGen/X86/callbr-asm-kill.mir
+++ b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
@@ -67,7 +67,7 @@ body:             |
   ; CHECK-NEXT:   CALL64pcrel32 target-flags(x86-plt) @foo, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp
   ; CHECK-NEXT:   ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gr64 = COPY [[MOV64rm]]
-  ; CHECK-NEXT:   INLINEASM_BR &"", 9 /* sideeffect mayload attdialect */, 196654 /* mem:m */, killed [[MOV64rm]], 1, $noreg, 0, $noreg, 13 /* imm */, blockaddress(@test1, %ir-block.loop)
+  ; CHECK-NEXT:   INLINEASM_BR &"", 9 /* sideeffect mayload attdialect */, 262190 /* mem:m */, killed [[MOV64rm]], 1, $noreg, 0, $noreg, 13 /* imm */, blockaddress(@test1, %ir-block.loop)
   ; CHECK-NEXT:   JMP_1 %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2.end:
@@ -87,7 +87,7 @@ body:             |
     $rdi = COPY killed %0
     CALL64pcrel32 target-flags(x86-plt) @foo, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp
     ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
-    INLINEASM_BR &"", 9 /* sideeffect mayload attdialect */, 196654 /* mem:m */, %1, 1, $noreg, 0, $noreg, 13 /* imm */, blockaddress(@test1, %ir-block.loop)
+    INLINEASM_BR &"", 9 /* sideeffect mayload attdialect */, 262190 /* mem:m */, %1, 1, $noreg, 0, $noreg, 13 /* imm */, blockaddress(@test1, %ir-block.loop)
     JMP_1 %bb.2
 
   bb.2.end:


        


More information about the llvm-commits mailing list