[clang] 7d88a05 - [Clang][LoongArch] Implement ABI lowering

Weining Lu via cfe-commits cfe-commits at lists.llvm.org
Sun Sep 18 21:07:59 PDT 2022


Author: Weining Lu
Date: 2022-09-19T12:05:00+08:00
New Revision: 7d88a05cc01c8d8950d88e96fd8516af67b10b8e

URL: https://github.com/llvm/llvm-project/commit/7d88a05cc01c8d8950d88e96fd8516af67b10b8e
DIFF: https://github.com/llvm/llvm-project/commit/7d88a05cc01c8d8950d88e96fd8516af67b10b8e.diff

LOG: [Clang][LoongArch] Implement ABI lowering

Reuse most of RISCV's implementation with several exceptions:

1. Assign signext/zeroext attribute to args passed in stack.
On RISCV, integer scalars passed in registers have signext/zeroext
when promoted, but are anyext if passed on the stack. This is defined
in early RISCV ABI specification. But after this change [1], integers
should also be signext/zeroext if passed on the stack. So I think
RISCV's ABI lowering should be updated [2].

While in LoongArch ABI spec, we can see that integer scalars narrower
than GRLEN bits are zero/sign-extended no matter passed in registers
or on the stack.

2. Zero-width bit fields are ignored.
This matches GCC's behavior but it hasn't been documented in ABI sepc.
See https://gcc.gnu.org/r12-8294.

3. `char` is signed by default.
There is another difference worth mentioning is that `char` is signed
by default on LoongArch while it is unsigned on RISCV.

This patch also adds `_BitInt` type support to LoongArch and handle it
in LoongArchABIInfo::classifyArgumentType.

[1] https://github.com/riscv-non-isa/riscv-elf-psabi-doc/commit/cec39a064ee0e5b0129973fffab7e3ad1710498f
[2] https://github.com/llvm/llvm-project/issues/57261

Differential Revision: https://reviews.llvm.org/D132285

Added: 
    clang/test/CodeGen/LoongArch/abi-lp64d.c
    clang/test/CodeGenCXX/LoongArch/abi-lp64d-struct-inherit.cpp

Modified: 
    clang/lib/Basic/Targets/LoongArch.h
    clang/lib/CodeGen/TargetInfo.cpp
    clang/test/CodeGen/ext-int-cc.c

Removed: 
    


################################################################################
diff  --git a/clang/lib/Basic/Targets/LoongArch.h b/clang/lib/Basic/Targets/LoongArch.h
index 4380f149876c..5d711c6b1db4 100644
--- a/clang/lib/Basic/Targets/LoongArch.h
+++ b/clang/lib/Basic/Targets/LoongArch.h
@@ -55,6 +55,8 @@ class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
 
   bool validateAsmConstraint(const char *&Name,
                              TargetInfo::ConstraintInfo &Info) const override;
+
+  bool hasBitIntType() const override { return true; }
 };
 
 class LLVM_LIBRARY_VISIBILITY LoongArch32TargetInfo

diff  --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 370614f65e5b..2f9adb83ffa2 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -11598,6 +11598,438 @@ class BPFTargetCodeGenInfo : public TargetCodeGenInfo {
 
 }
 
+// LoongArch ABI Implementation. Documented at
+// https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html
+//
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LoongArchABIInfo : public DefaultABIInfo {
+private:
+  // Size of the integer ('r') registers in bits.
+  unsigned GRLen;
+  // Size of the floating point ('f') registers in bits.
+  unsigned FRLen;
+  // Number of general-purpose argument registers.
+  static const int NumGARs = 8;
+  // Number of floating-point argument registers.
+  static const int NumFARs = 8;
+  bool detectFARsEligibleStructHelper(QualType Ty, CharUnits CurOff,
+                                      llvm::Type *&Field1Ty,
+                                      CharUnits &Field1Off,
+                                      llvm::Type *&Field2Ty,
+                                      CharUnits &Field2Off) const;
+
+public:
+  LoongArchABIInfo(CodeGen::CodeGenTypes &CGT, unsigned GRLen, unsigned FRLen)
+      : DefaultABIInfo(CGT), GRLen(GRLen), FRLen(FRLen) {}
+
+  void computeInfo(CGFunctionInfo &FI) const override;
+
+  ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &GARsLeft,
+                                  int &FARsLeft) const;
+  ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+                    QualType Ty) const override;
+
+  ABIArgInfo extendType(QualType Ty) const;
+
+  bool detectFARsEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
+                                CharUnits &Field1Off, llvm::Type *&Field2Ty,
+                                CharUnits &Field2Off, int &NeededArgGPRs,
+                                int &NeededArgFPRs) const;
+  ABIArgInfo coerceAndExpandFARsEligibleStruct(llvm::Type *Field1Ty,
+                                               CharUnits Field1Off,
+                                               llvm::Type *Field2Ty,
+                                               CharUnits Field2Off) const;
+};
+} // end anonymous namespace
+
+void LoongArchABIInfo::computeInfo(CGFunctionInfo &FI) const {
+  QualType RetTy = FI.getReturnType();
+  if (!getCXXABI().classifyReturnType(FI))
+    FI.getReturnInfo() = classifyReturnType(RetTy);
+
+  // IsRetIndirect is true if classifyArgumentType indicated the value should
+  // be passed indirect, or if the type size is a scalar greater than 2*GRLen
+  // and not a complex type with elements <= FRLen. e.g. fp128 is passed direct
+  // in LLVM IR, relying on the backend lowering code to rewrite the argument
+  // list and pass indirectly on LA32.
+  bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
+  if (!IsRetIndirect && RetTy->isScalarType() &&
+      getContext().getTypeSize(RetTy) > (2 * GRLen)) {
+    if (RetTy->isComplexType() && FRLen) {
+      QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
+      IsRetIndirect = getContext().getTypeSize(EltTy) > FRLen;
+    } else {
+      // This is a normal scalar > 2*GRLen, such as fp128 on LA32.
+      IsRetIndirect = true;
+    }
+  }
+
+  // We must track the number of GARs and FARs used in order to conform to the
+  // LoongArch ABI. As GAR usage is 
diff erent for variadic arguments, we must
+  // also track whether we are examining a vararg or not.
+  int GARsLeft = IsRetIndirect ? NumGARs - 1 : NumGARs;
+  int FARsLeft = FRLen ? NumFARs : 0;
+  int NumFixedArgs = FI.getNumRequiredArgs();
+
+  int ArgNum = 0;
+  for (auto &ArgInfo : FI.arguments()) {
+    ArgInfo.info = classifyArgumentType(
+        ArgInfo.type, /*IsFixed=*/ArgNum < NumFixedArgs, GARsLeft, FARsLeft);
+    ArgNum++;
+  }
+}
+
+// Returns true if the struct is a potential candidate to be passed in FARs (and
+// GARs). If this function returns true, the caller is responsible for checking
+// that if there is only a single field then that field is a float.
+bool LoongArchABIInfo::detectFARsEligibleStructHelper(
+    QualType Ty, CharUnits CurOff, llvm::Type *&Field1Ty, CharUnits &Field1Off,
+    llvm::Type *&Field2Ty, CharUnits &Field2Off) const {
+  bool IsInt = Ty->isIntegralOrEnumerationType();
+  bool IsFloat = Ty->isRealFloatingType();
+
+  if (IsInt || IsFloat) {
+    uint64_t Size = getContext().getTypeSize(Ty);
+    if (IsInt && Size > GRLen)
+      return false;
+    // Can't be eligible if larger than the FP registers. Half precision isn't
+    // currently supported on LoongArch and the ABI hasn't been confirmed, so
+    // default to the integer ABI in that case.
+    if (IsFloat && (Size > FRLen || Size < 32))
+      return false;
+    // Can't be eligible if an integer type was already found (int+int pairs
+    // are not eligible).
+    if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
+      return false;
+    if (!Field1Ty) {
+      Field1Ty = CGT.ConvertType(Ty);
+      Field1Off = CurOff;
+      return true;
+    }
+    if (!Field2Ty) {
+      Field2Ty = CGT.ConvertType(Ty);
+      Field2Off = CurOff;
+      return true;
+    }
+    return false;
+  }
+
+  if (auto CTy = Ty->getAs<ComplexType>()) {
+    if (Field1Ty)
+      return false;
+    QualType EltTy = CTy->getElementType();
+    if (getContext().getTypeSize(EltTy) > FRLen)
+      return false;
+    Field1Ty = CGT.ConvertType(EltTy);
+    Field1Off = CurOff;
+    Field2Ty = Field1Ty;
+    Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
+    return true;
+  }
+
+  if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
+    uint64_t ArraySize = ATy->getSize().getZExtValue();
+    QualType EltTy = ATy->getElementType();
+    CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
+    for (uint64_t i = 0; i < ArraySize; ++i) {
+      if (!detectFARsEligibleStructHelper(EltTy, CurOff, Field1Ty, Field1Off,
+                                          Field2Ty, Field2Off))
+        return false;
+      CurOff += EltSize;
+    }
+    return true;
+  }
+
+  if (const auto *RTy = Ty->getAs<RecordType>()) {
+    // Structures with either a non-trivial destructor or a non-trivial
+    // copy constructor are not eligible for the FP calling convention.
+    if (getRecordArgABI(Ty, CGT.getCXXABI()))
+      return false;
+    if (isEmptyRecord(getContext(), Ty, true))
+      return true;
+    const RecordDecl *RD = RTy->getDecl();
+    // Unions aren't eligible unless they're empty (which is caught above).
+    if (RD->isUnion())
+      return false;
+    const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+    // If this is a C++ record, check the bases first.
+    if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+      for (const CXXBaseSpecifier &B : CXXRD->bases()) {
+        const auto *BDecl =
+            cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+        if (!detectFARsEligibleStructHelper(
+                B.getType(), CurOff + Layout.getBaseClassOffset(BDecl),
+                Field1Ty, Field1Off, Field2Ty, Field2Off))
+          return false;
+      }
+    }
+    for (const FieldDecl *FD : RD->fields()) {
+      QualType QTy = FD->getType();
+      if (FD->isBitField()) {
+        unsigned BitWidth = FD->getBitWidthValue(getContext());
+        // Zero-width bitfields are ignored.
+        if (BitWidth == 0)
+          continue;
+        // Allow a bitfield with a type greater than GRLen as long as the
+        // bitwidth is GRLen or less.
+        if (getContext().getTypeSize(QTy) > GRLen && BitWidth <= GRLen) {
+          QTy = getContext().getIntTypeForBitwidth(GRLen, false);
+        }
+      }
+
+      if (!detectFARsEligibleStructHelper(
+              QTy,
+              CurOff + getContext().toCharUnitsFromBits(
+                           Layout.getFieldOffset(FD->getFieldIndex())),
+              Field1Ty, Field1Off, Field2Ty, Field2Off))
+        return false;
+    }
+    return Field1Ty != nullptr;
+  }
+
+  return false;
+}
+
+// Determine if a struct is eligible to be passed in FARs (and GARs) (i.e., when
+// flattened it contains a single fp value, fp+fp, or int+fp of appropriate
+// size). If so, NeededFARs and NeededGARs are incremented appropriately.
+bool LoongArchABIInfo::detectFARsEligibleStruct(
+    QualType Ty, llvm::Type *&Field1Ty, CharUnits &Field1Off,
+    llvm::Type *&Field2Ty, CharUnits &Field2Off, int &NeededGARs,
+    int &NeededFARs) const {
+  Field1Ty = nullptr;
+  Field2Ty = nullptr;
+  NeededGARs = 0;
+  NeededFARs = 0;
+  if (!detectFARsEligibleStructHelper(Ty, CharUnits::Zero(), Field1Ty,
+                                      Field1Off, Field2Ty, Field2Off))
+    return false;
+  // Not really a candidate if we have a single int but no float.
+  if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
+    return false;
+  if (Field1Ty && Field1Ty->isFloatingPointTy())
+    NeededFARs++;
+  else if (Field1Ty)
+    NeededGARs++;
+  if (Field2Ty && Field2Ty->isFloatingPointTy())
+    NeededFARs++;
+  else if (Field2Ty)
+    NeededGARs++;
+  return true;
+}
+
+// Call getCoerceAndExpand for the two-element flattened struct described by
+// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
+// appropriate coerceToType and unpaddedCoerceToType.
+ABIArgInfo LoongArchABIInfo::coerceAndExpandFARsEligibleStruct(
+    llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
+    CharUnits Field2Off) const {
+  SmallVector<llvm::Type *, 3> CoerceElts;
+  SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
+  if (!Field1Off.isZero())
+    CoerceElts.push_back(llvm::ArrayType::get(
+        llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
+
+  CoerceElts.push_back(Field1Ty);
+  UnpaddedCoerceElts.push_back(Field1Ty);
+
+  if (!Field2Ty) {
+    return ABIArgInfo::getCoerceAndExpand(
+        llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
+        UnpaddedCoerceElts[0]);
+  }
+
+  CharUnits Field2Align =
+      CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
+  CharUnits Field1End =
+      Field1Off +
+      CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
+  CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
+
+  CharUnits Padding = CharUnits::Zero();
+  if (Field2Off > Field2OffNoPadNoPack)
+    Padding = Field2Off - Field2OffNoPadNoPack;
+  else if (Field2Off != Field2Align && Field2Off > Field1End)
+    Padding = Field2Off - Field1End;
+
+  bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
+
+  if (!Padding.isZero())
+    CoerceElts.push_back(llvm::ArrayType::get(
+        llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
+
+  CoerceElts.push_back(Field2Ty);
+  UnpaddedCoerceElts.push_back(Field2Ty);
+
+  return ABIArgInfo::getCoerceAndExpand(
+      llvm::StructType::get(getVMContext(), CoerceElts, IsPacked),
+      llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked));
+}
+
+ABIArgInfo LoongArchABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
+                                                  int &GARsLeft,
+                                                  int &FARsLeft) const {
+  assert(GARsLeft <= NumGARs && "GAR tracking underflow");
+  Ty = useFirstFieldIfTransparentUnion(Ty);
+
+  // Structures with either a non-trivial destructor or a non-trivial
+  // copy constructor are always passed indirectly.
+  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+    if (GARsLeft)
+      GARsLeft -= 1;
+    return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+                                           CGCXXABI::RAA_DirectInMemory);
+  }
+
+  // Ignore empty structs/unions.
+  if (isEmptyRecord(getContext(), Ty, true))
+    return ABIArgInfo::getIgnore();
+
+  uint64_t Size = getContext().getTypeSize(Ty);
+
+  // Pass floating point values via FARs if possible.
+  if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
+      FRLen >= Size && FARsLeft) {
+    FARsLeft--;
+    return ABIArgInfo::getDirect();
+  }
+
+  // Complex types for the *f or *d ABI must be passed directly rather than
+  // using CoerceAndExpand.
+  if (IsFixed && Ty->isComplexType() && FRLen && FARsLeft >= 2) {
+    QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
+    if (getContext().getTypeSize(EltTy) <= FRLen) {
+      FARsLeft -= 2;
+      return ABIArgInfo::getDirect();
+    }
+  }
+
+  if (IsFixed && FRLen && Ty->isStructureOrClassType()) {
+    llvm::Type *Field1Ty = nullptr;
+    llvm::Type *Field2Ty = nullptr;
+    CharUnits Field1Off = CharUnits::Zero();
+    CharUnits Field2Off = CharUnits::Zero();
+    int NeededGARs = 0;
+    int NeededFARs = 0;
+    bool IsCandidate = detectFARsEligibleStruct(
+        Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, NeededGARs, NeededFARs);
+    if (IsCandidate && NeededGARs <= GARsLeft && NeededFARs <= FARsLeft) {
+      GARsLeft -= NeededGARs;
+      FARsLeft -= NeededFARs;
+      return coerceAndExpandFARsEligibleStruct(Field1Ty, Field1Off, Field2Ty,
+                                               Field2Off);
+    }
+  }
+
+  uint64_t NeededAlign = getContext().getTypeAlign(Ty);
+  // Determine the number of GARs needed to pass the current argument
+  // according to the ABI. 2*GRLen-aligned varargs are passed in "aligned"
+  // register pairs, so may consume 3 registers.
+  int NeededGARs = 1;
+  if (!IsFixed && NeededAlign == 2 * GRLen)
+    NeededGARs = 2 + (GARsLeft % 2);
+  else if (Size > GRLen && Size <= 2 * GRLen)
+    NeededGARs = 2;
+
+  if (NeededGARs > GARsLeft)
+    NeededGARs = GARsLeft;
+
+  GARsLeft -= NeededGARs;
+
+  if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
+    // Treat an enum type as its underlying type.
+    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+      Ty = EnumTy->getDecl()->getIntegerType();
+
+    // All integral types are promoted to GRLen width.
+    if (Size < GRLen && Ty->isIntegralOrEnumerationType())
+      return extendType(Ty);
+
+    if (const auto *EIT = Ty->getAs<BitIntType>()) {
+      if (EIT->getNumBits() < GRLen)
+        return extendType(Ty);
+      if (EIT->getNumBits() > 128 ||
+          (!getContext().getTargetInfo().hasInt128Type() &&
+           EIT->getNumBits() > 64))
+        return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+    }
+
+    return ABIArgInfo::getDirect();
+  }
+
+  // Aggregates which are <= 2*GRLen will be passed in registers if possible,
+  // so coerce to integers.
+  if (Size <= 2 * GRLen) {
+    // Use a single GRLen int if possible, 2*GRLen if 2*GRLen alignment is
+    // required, and a 2-element GRLen array if only GRLen alignment is
+    // required.
+    if (Size <= GRLen) {
+      return ABIArgInfo::getDirect(
+          llvm::IntegerType::get(getVMContext(), GRLen));
+    }
+    if (getContext().getTypeAlign(Ty) == 2 * GRLen) {
+      return ABIArgInfo::getDirect(
+          llvm::IntegerType::get(getVMContext(), 2 * GRLen));
+    }
+    return ABIArgInfo::getDirect(
+        llvm::ArrayType::get(llvm::IntegerType::get(getVMContext(), GRLen), 2));
+  }
+  return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo LoongArchABIInfo::classifyReturnType(QualType RetTy) const {
+  if (RetTy->isVoidType())
+    return ABIArgInfo::getIgnore();
+  // The rules for return and argument types are the same, so defer to
+  // classifyArgumentType.
+  int GARsLeft = 2;
+  int FARsLeft = FRLen ? 2 : 0;
+  return classifyArgumentType(RetTy, /*IsFixed=*/true, GARsLeft, FARsLeft);
+}
+
+Address LoongArchABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+                                    QualType Ty) const {
+  CharUnits SlotSize = CharUnits::fromQuantity(GRLen / 8);
+
+  // Empty records are ignored for parameter passing purposes.
+  if (isEmptyRecord(getContext(), Ty, true)) {
+    Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr),
+                           getVAListElementType(CGF), SlotSize);
+    Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
+    return Addr;
+  }
+
+  auto TInfo = getContext().getTypeInfoInChars(Ty);
+
+  // Arguments bigger than 2*GRLen bytes are passed indirectly.
+  return emitVoidPtrVAArg(CGF, VAListAddr, Ty,
+                          /*IsIndirect=*/TInfo.Width > 2 * SlotSize, TInfo,
+                          SlotSize,
+                          /*AllowHigherAlign=*/true);
+}
+
+ABIArgInfo LoongArchABIInfo::extendType(QualType Ty) const {
+  int TySize = getContext().getTypeSize(Ty);
+  // LA64 ABI requires unsigned 32 bit integers to be sign extended.
+  if (GRLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
+    return ABIArgInfo::getSignExtend(Ty);
+  return ABIArgInfo::getExtend(Ty);
+}
+
+namespace {
+class LoongArchTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+  LoongArchTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned GRLen,
+                             unsigned FRLen)
+      : TargetCodeGenInfo(
+            std::make_unique<LoongArchABIInfo>(CGT, GRLen, FRLen)) {}
+};
+} // namespace
+
 //===----------------------------------------------------------------------===//
 // Driver code
 //===----------------------------------------------------------------------===//
@@ -11829,6 +12261,17 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
   case llvm::Triple::bpfeb:
   case llvm::Triple::bpfel:
     return SetCGInfo(new BPFTargetCodeGenInfo(Types));
+  case llvm::Triple::loongarch32:
+  case llvm::Triple::loongarch64: {
+    StringRef ABIStr = getTarget().getABI();
+    unsigned ABIFRLen = 0;
+    if (ABIStr.endswith("f"))
+      ABIFRLen = 32;
+    else if (ABIStr.endswith("d"))
+      ABIFRLen = 64;
+    return SetCGInfo(new LoongArchTargetCodeGenInfo(
+        Types, getTarget().getPointerWidth(0), ABIFRLen));
+  }
   }
 }
 

diff  --git a/clang/test/CodeGen/LoongArch/abi-lp64d.c b/clang/test/CodeGen/LoongArch/abi-lp64d.c
new file mode 100644
index 000000000000..66b480a7f068
--- /dev/null
+++ b/clang/test/CodeGen/LoongArch/abi-lp64d.c
@@ -0,0 +1,488 @@
+// RUN: %clang_cc1 -triple loongarch64 -target-feature +f -target-feature +d -target-abi lp64d \
+// RUN:   -emit-llvm %s -o - | FileCheck %s
+
+/// This test checks the calling convention of the lp64d ABI.
+
+#include <stddef.h>
+#include <stdint.h>
+
+/// Part 0: C Data Types and Alignment.
+
+/// `char` datatype is signed by default.
+/// In most cases, the unsigned integer data types are zero-extended when stored
+/// in general-purpose register, and the signed integer data types are
+/// sign-extended. However, in the LP64D ABI, unsigned 32-bit types, such as
+/// unsigned int, are stored in general-purpose registers as proper sign
+/// extensions of their 32-bit values.
+
+// CHECK-LABEL: define{{.*}} zeroext i1 @check_bool()
+_Bool check_bool() { return 0; }
+
+// CHECK-LABEL: define{{.*}} signext i8 @check_char()
+char check_char() { return 0; }
+
+// CHECK-LABEL: define{{.*}} signext i16 @check_short()
+short check_short() { return 0; }
+
+// CHECK-LABEL: define{{.*}} signext i32 @check_int()
+int check_int() { return 0; }
+
+// CHECK-LABEL: define{{.*}} i64 @check_long()
+long check_long() { return 0; }
+
+// CHECK-LABEL: define{{.*}} i64 @check_longlong()
+long long check_longlong() { return 0; }
+
+// CHECK-LABEL: define{{.*}} zeroext i8 @check_uchar()
+unsigned char check_uchar() { return 0; }
+
+// CHECK-LABEL: define{{.*}} zeroext i16 @check_ushort()
+unsigned short check_ushort() { return 0; }
+
+// CHECK-LABEL: define{{.*}} signext i32 @check_uint()
+unsigned int check_uint() { return 0; }
+
+// CHECK-LABEL: define{{.*}} i64 @check_ulong()
+unsigned long check_ulong() { return 0; }
+
+// CHECK-LABEL: define{{.*}} i64 @check_ulonglong()
+unsigned long long check_ulonglong() { return 0; }
+
+// CHECK-LABEL: define{{.*}} float @check_float()
+float check_float() { return 0; }
+
+// CHECK-LABEL: define{{.*}} double @check_double()
+double check_double() { return 0; }
+
+// CHECK-LABEL: define{{.*}} fp128 @check_longdouble()
+long double check_longdouble() { return 0; }
+
+/// Part 1: Scalar arguments and return value.
+
+/// 1. 1 < WOA <= GRLEN
+/// a. Argument is passed in a single argument register, or on the stack by
+/// value if none is available.
+/// i. If the argument is floating-point type, the argument is passed in FAR. if
+/// no FAR is available, it’s passed in GAR. If no GAR is available, it’s
+/// passed on the stack. When passed in registers or on the stack,
+/// floating-point types narrower than GRLEN bits are widened to GRLEN bits,
+/// with the upper bits undefined.
+/// ii. If the argument is integer or pointer type, the argument is passed in
+/// GAR. If no GAR is available, it’s passed on the stack. When passed in
+/// registers or on the stack, the unsigned integer scalars narrower than GRLEN
+/// bits are zero-extended to GRLEN bits, and the signed integer scalars are
+/// sign-extended.
+/// 2. GRLEN < WOA ≤ 2 × GRLEN
+/// a. The argument is passed in a pair of GAR, with the low-order GRLEN bits in
+/// the lower-numbered register and the high-order GRLEN bits in the
+/// higher-numbered register. If exactly one register is available, the
+/// low-order GRLEN bits are passed in the register and the high-order GRLEN
+/// bits are passed on the stack. If no GAR is available, it’s passed on the
+/// stack.
+
+/// Note that most of these conventions are handled by the backend, so here we
+/// only check the correctness of argument (or return value)'s sign/zero
+/// extension attribute.
+
+// CHECK-LABEL: define{{.*}} signext i32 @f_scalar(i1 noundef zeroext %a, i8 noundef signext %b, i8 noundef zeroext %c, i16 noundef signext %d, i16 noundef zeroext %e, i32 noundef signext %f, i32 noundef signext %g, i64 noundef %h, i1 noundef zeroext %i, i8 noundef signext %j, i8 noundef zeroext %k, i16 noundef signext %l, i16 noundef zeroext %m, i32 noundef signext %n, i32 noundef signext %o, i64 noundef %p)
+int f_scalar(_Bool a, int8_t b, uint8_t c, int16_t d, uint16_t e, int32_t f,
+             uint32_t g, int64_t h, _Bool i, int8_t j, uint8_t k, int16_t l,
+             uint16_t m, int32_t n, uint32_t o, int64_t p) {
+  return 0;
+}
+
+/// Part 2: Structure arguments and return value.
+
+/// Empty structures are ignored by C compilers which support them as a
+/// non-standard extension(same as union arguments and return values). Bits
+/// unused due to padding, and bits past the end of a structure whose size in
+/// bits is not divisible by GRLEN, are undefined. And the layout of the
+/// structure on the stack is consistent with that in memory.
+
+/// Check empty structs are ignored.
+
+struct empty_s {};
+
+// CHECK-LABEL: define{{.*}} void @f_empty_s()
+struct empty_s f_empty_s(struct empty_s x) {
+  return x;
+}
+
+/// 1. 0 < WOA ≤ GRLEN
+/// a. The structure has only fixed-point members. If there is an available GAR,
+/// the structure is passed through the GAR by value passing; If no GAR is
+/// available, it’s passed on the stack.
+
+struct i16x4_s {
+  int16_t a, b, c, d;
+};
+
+// CHECK-LABEL: define{{.*}} i64 @f_i16x4_s(i64 %x.coerce)
+struct i16x4_s f_i16x4_s(struct i16x4_s x) {
+  return x;
+}
+
+/// b. The structure has only floating-point members:
+/// i. One floating-point member. The argument is passed in a FAR; If no FAR is
+/// available, the value is passed in a GAR; if no GAR is available, the value
+/// is passed on the stack.
+
+struct f32x1_s {
+  float a;
+};
+
+struct f64x1_s {
+  double a;
+};
+
+// CHECK-LABEL: define{{.*}} float @f_f32x1_s(float %0)
+struct f32x1_s f_f32x1_s(struct f32x1_s x) {
+  return x;
+}
+
+// CHECK-LABEL: define{{.*}} double @f_f64x1_s(double %0)
+struct f64x1_s f_f64x1_s(struct f64x1_s x) {
+  return x;
+}
+
+/// ii. Two floating-point members. The argument is passed in a pair of
+/// available FAR, with the low-order float member bits in the lower-numbered
+/// FAR and the high-order float member bits in the higher-numbered FAR. If the
+/// number of available FAR is less than 2, it’s passed in a GAR, and passed on
+/// the stack if no GAR is available.
+
+struct f32x2_s {
+  float a, b;
+};
+
+// CHECK-LABEL: define{{.*}} { float, float } @f_f32x2_s(float %0, float %1)
+struct f32x2_s f_f32x2_s(struct f32x2_s x) {
+  return x;
+}
+
+/// c. The structure has both fixed-point and floating-point members, i.e. the
+/// structure has one float member and...
+/// i. Multiple fixed-point members. If there are available GAR, the structure
+/// is passed in a GAR, and passed on the stack if no GAR is available.
+
+struct f32x1_i16x2_s {
+  float a;
+  int16_t b, c;
+};
+
+// CHECK-LABEL: define{{.*}} i64 @f_f32x1_i16x2_s(i64 %x.coerce)
+struct f32x1_i16x2_s f_f32x1_i16x2_s(struct f32x1_i16x2_s x) {
+  return x;
+}
+
+/// ii. Only one fixed-point member. If one FAR and one GAR are available, the
+/// floating-point member of the structure is passed in the FAR, and the integer
+/// member of the structure is passed in the GAR; If no floating-point register
+/// but one GAR is available, it’s passed in GAR; If no GAR is available, it’s
+/// passed on the stack.
+
+struct f32x1_i32x1_s {
+  float a;
+  int32_t b;
+};
+
+// CHECK-LABEL: define{{.*}} { float, i32 } @f_f32x1_i32x1_s(float %0, i32 %1)
+struct f32x1_i32x1_s f_f32x1_i32x1_s(struct f32x1_i32x1_s x) {
+  return x;
+}
+
+/// 2. GRLEN < WOA ≤ 2 × GRLEN
+/// a. Only fixed-point members.
+/// i. The argument is passed in a pair of available GAR, with the low-order
+/// bits in the lower-numbered GAR and the high-order bits in the
+/// higher-numbered GAR. If only one GAR is available, the low-order bits are in
+/// the GAR and the high-order bits are on the stack, and passed on the stack if
+/// no GAR is available.
+
+struct i64x2_s {
+  int64_t a, b;
+};
+
+// CHECK-LABEL: define{{.*}} [2 x i64] @f_i64x2_s([2 x i64] %x.coerce)
+struct i64x2_s f_i64x2_s(struct i64x2_s x) {
+  return x;
+}
+
+/// b. Only floating-point members.
+/// i. The structure has one long double member or one double member and two
+/// adjacent float members or 3-4 float members. The argument is passed in a
+/// pair of available GAR, with the low-order bits in the lower-numbered GAR and
+/// the high-order bits in the higher-numbered GAR. If only one GAR is
+/// available, the low-order bits are in the GAR and the high-order bits are on
+/// the stack, and passed on the stack if no GAR is available.
+
+struct f128x1_s {
+  long double a;
+};
+
+// CHECK-LABEL: define{{.*}} i128 @f_f128x1_s(i128 %x.coerce)
+struct f128x1_s f_f128x1_s(struct f128x1_s x) {
+  return x;
+}
+
+struct f64x1_f32x2_s {
+  double a;
+  float b, c;
+};
+
+// CHECK-LABEL: define{{.*}} [2 x i64] @f_f64x1_f32x2_s([2 x i64] %x.coerce)
+struct f64x1_f32x2_s f_f64x1_f32x2_s(struct f64x1_f32x2_s x) {
+  return x;
+}
+
+struct f32x3_s {
+  float a, b, c;
+};
+
+// CHECK-LABEL: define{{.*}} [2 x i64] @f_f32x3_s([2 x i64] %x.coerce)
+struct f32x3_s f_f32x3_s(struct f32x3_s x) {
+  return x;
+}
+
+struct f32x4_s {
+  float a, b, c, d;
+};
+
+// CHECK-LABEL: define{{.*}} [2 x i64] @f_f32x4_s([2 x i64] %x.coerce)
+struct f32x4_s f_f32x4_s(struct f32x4_s x) {
+  return x;
+}
+
+/// ii. The structure with two double members is passed in a pair of available
+/// FARs. If no a pair of available FARs, it’s passed in GARs. A structure with
+/// one double member and one float member is same.
+
+struct f64x2_s {
+  double a, b;
+};
+
+// CHECK-LABEL: define{{.*}} { double, double } @f_f64x2_s(double %0, double %1)
+struct f64x2_s f_f64x2_s(struct f64x2_s x) {
+  return x;
+}
+
+/// c. Both fixed-point and floating-point members.
+/// i. The structure has one double member and only one fixed-point member.
+/// A. If one FAR and one GAR are available, the floating-point member of the
+/// structure is passed in the FAR, and the integer member of the structure is
+/// passed in the GAR; If no floating-point registers but two GARs are
+/// available, it’s passed in the two GARs; If only one GAR is available, the
+/// low-order bits are in the GAR and the high-order bits are on the stack; And
+/// it’s passed on the stack if no GAR is available.
+
+struct f64x1_i64x1_s {
+  double a;
+  int64_t b;
+};
+
+// CHECK-LABEL: define{{.*}} { double, i64 } @f_f64x1_i64x1_s(double %0, i64 %1)
+struct f64x1_i64x1_s f_f64x1_i64x1_s(struct f64x1_i64x1_s x) {
+  return x;
+}
+
+/// ii. Others
+/// A. The argument is passed in a pair of available GAR, with the low-order
+/// bits in the lower-numbered GAR and the high-order bits in the
+/// higher-numbered GAR. If only one GAR is available, the low-order bits are in
+/// the GAR and the high-order bits are on the stack, and passed on the stack if
+/// no GAR is available.
+
+struct f64x1_i32x2_s {
+  double a;
+  int32_t b, c;
+};
+
+// CHECK-LABEL: define{{.*}} [2 x i64] @f_f64x1_i32x2_s([2 x i64] %x.coerce)
+struct f64x1_i32x2_s f_f64x1_i32x2_s(struct f64x1_i32x2_s x) {
+  return x;
+}
+
+struct f32x2_i32x2_s {
+  float a, b;
+  int32_t c, d;
+};
+
+// CHECK-LABEL: define{{.*}} [2 x i64] @f_f32x2_i32x2_s([2 x i64] %x.coerce)
+struct f32x2_i32x2_s f_f32x2_i32x2_s(struct f32x2_i32x2_s x) {
+  return x;
+}
+
+/// 3. WOA > 2 × GRLEN
+/// a. It’s passed by reference and are replaced in the argument list with the
+/// address. If there is an available GAR, the reference is passed in the GAR,
+/// and passed on the stack if no GAR is available.
+
+struct i64x4_s {
+  int64_t a, b, c, d;
+};
+
+// CHECK-LABEL: define{{.*}} void @f_i64x4_s(ptr{{.*}} sret(%struct.i64x4_s) align 8 %agg.result, ptr{{.*}} %x)
+struct i64x4_s f_i64x4_s(struct i64x4_s x) {
+  return x;
+}
+
+struct f64x4_s {
+  double a, b, c, d;
+};
+
+// CHECK-LABEL: define{{.*}} void @f_f64x4_s(ptr{{.*}} sret(%struct.f64x4_s) align 8 %agg.result, ptr{{.*}} %x)
+struct f64x4_s f_f64x4_s(struct f64x4_s x) {
+  return x;
+}
+
+/// Part 3: Union arguments and return value.
+
+/// Check empty unions are ignored.
+
+union empty_u {};
+
+// CHECK-LABEL: define{{.*}} void @f_empty_u()
+union empty_u f_empty_u(union empty_u x) {
+  return x;
+}
+
+/// Union is passed in GAR or stack.
+/// 1. 0 < WOA ≤ GRLEN
+/// a. The argument is passed in a GAR, or on the stack by value if no GAR is
+/// available.
+
+union i32_f32_u {
+  int32_t a;
+  float b;
+};
+
+// CHECK-LABEL: define{{.*}} i64 @f_i32_f32_u(i64 %x.coerce)
+union i32_f32_u f_i32_f32_u(union i32_f32_u x) {
+  return x;
+}
+
+union i64_f64_u {
+  int64_t a;
+  double b;
+};
+
+// CHECK-LABEL: define{{.*}} i64 @f_i64_f64_u(i64 %x.coerce)
+union i64_f64_u f_i64_f64_u(union i64_f64_u x) {
+  return x;
+}
+
+/// 2. GRLEN < WOA ≤ 2 × GRLEN
+/// a. The argument is passed in a pair of available GAR, with the low-order
+/// bits in the lower-numbered GAR and the high-order bits in the
+/// higher-numbered GAR. If only one GAR is available, the low-order bits are in
+/// the GAR and the high-order bits are on the stack. The arguments are passed
+/// on the stack when no GAR is available.
+
+union i128_f128_u {
+  __int128_t a;
+  long double b;
+};
+
+// CHECK-LABEL: define{{.*}} i128 @f_i128_f128_u(i128 %x.coerce)
+union i128_f128_u f_i128_f128_u(union i128_f128_u x) {
+  return x;
+}
+
+/// 3. WOA > 2 × GRLEN
+/// a. It’s passed by reference and are replaced in the argument list with the
+/// address. If there is an available GAR, the reference is passed in the GAR,
+/// and passed on the stack if no GAR is available.
+
+union i64_arr3_u {
+  int64_t a[3];
+};
+
+// CHECK-LABEL: define{{.*}} void @f_i64_arr3_u(ptr{{.*}} sret(%union.i64_arr3_u) align 8 %agg.result, ptr{{.*}} %x)
+union i64_arr3_u f_i64_arr3_u(union i64_arr3_u x) {
+  return x;
+}
+
+/// Part 4: Complex number arguments and return value.
+
+/// A complex floating-point number, or a structure containing just one complex
+/// floating-point number, is passed as though it were a structure containing
+/// two floating-point reals.
+
+// CHECK-LABEL: define{{.*}} { float, float } @f_floatcomplex(float noundef %x.coerce0, float noundef %x.coerce1)
+float __complex__ f_floatcomplex(float __complex__ x) { return x; }
+
+// CHECK-LABEL: define{{.*}} { double, double } @f_doublecomplex(double noundef %x.coerce0, double noundef %x.coerce1)
+double __complex__ f_doublecomplex(double __complex__ x) { return x; }
+
+struct floatcomplex_s {
+  float __complex__ c;
+};
+// CHECK-LABEL: define{{.*}} { float, float } @f_floatcomplex_s(float %0, float %1)
+struct floatcomplex_s f_floatcomplex_s(struct floatcomplex_s x) {
+  return x;
+}
+
+struct doublecomplex_s {
+  double __complex__ c;
+};
+// CHECK-LABEL: define{{.*}} { double, double } @f_doublecomplex_s(double %0, double %1)
+struct doublecomplex_s f_doublecomplex_s(struct doublecomplex_s x) {
+  return x;
+}
+
+/// Part 5: Variadic arguments.
+
+/// Variadic arguments are passed in GARs in the same manner as named arguments.
+
+int f_va_callee(int, ...);
+
+// CHECK-LABEL: define{{.*}} void @f_va_caller()
+// CHECK: call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i64 noundef 3, double noundef 4.000000e+00, double noundef 5.000000e+00, i64 {{.*}}, [2 x i64] {{.*}})
+void f_va_caller(void) {
+  f_va_callee(1, 2, 3LL, 4.0f, 5.0, (struct i16x4_s){6, 7, 8, 9},
+              (struct i64x2_s){10, 11});
+}
+
+// CHECK-LABEL: @f_va_int(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[FMT_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT:    [[VA:%.*]] = alloca ptr, align 8
+// CHECK-NEXT:    [[V:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store ptr [[FMT:%.*]], ptr [[FMT_ADDR]], align 8
+// CHECK-NEXT:    call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT:    [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
+// CHECK-NEXT:    [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i64 8
+// CHECK-NEXT:    store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 8
+// CHECK-NEXT:    store i32 [[TMP0]], ptr [[V]], align 4
+// CHECK-NEXT:    call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[V]], align 4
+// CHECK-NEXT:    ret i32 [[TMP1]]
+int f_va_int(char *fmt, ...) {
+  __builtin_va_list va;
+  __builtin_va_start(va, fmt);
+  int v = __builtin_va_arg(va, int);
+  __builtin_va_end(va);
+  return v;
+}
+
+/// Part 6. Structures with zero size fields (bitfields or arrays).
+
+/// Check that zero size fields in structure are ignored.
+/// Note that this rule is not explicitly documented in ABI spec but it matches
+/// GCC's behavior.
+
+struct f64x2_zsfs_s {
+  double a;
+  int : 0;
+  __int128_t : 0;
+  int b[0];
+  __int128_t c[0];
+  double d;
+};
+
+// CHECK-LABEL: define{{.*}} { double, double } @f_f64x2_zsfs_s(double %0, double %1)
+struct f64x2_zsfs_s f_f64x2_zsfs_s(struct f64x2_zsfs_s x) {
+  return x;
+}
+

diff  --git a/clang/test/CodeGen/ext-int-cc.c b/clang/test/CodeGen/ext-int-cc.c
index 451bec4c05aa..e3c80d71c2bd 100644
--- a/clang/test/CodeGen/ext-int-cc.c
+++ b/clang/test/CodeGen/ext-int-cc.c
@@ -27,6 +27,8 @@
 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -triple arm64_32-apple-ios -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=AARCH64
 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -triple arm64_32-apple-ios -target-abi darwinpcs -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=AARCH64DARWIN
 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -triple arm -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=ARM
+// RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -triple loongarch64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=LA64
+// RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -triple loongarch32 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=LA32
 
 // Make sure 128 and 64 bit versions are passed like integers.
 void ParamPassing(_BitInt(128) b, _BitInt(64) c) {}
@@ -57,6 +59,8 @@ void ParamPassing(_BitInt(128) b, _BitInt(64) c) {}
 // AARCH64: define{{.*}} void @ParamPassing(i128 %{{.+}}, i64 %{{.+}})
 // AARCH64DARWIN: define{{.*}} void @ParamPassing(i128 %{{.+}}, i64 %{{.+}})
 // ARM: define{{.*}} arm_aapcscc void @ParamPassing(i128* byval(i128) align 8 %{{.+}}, i64 %{{.+}})
+// LA64: define{{.*}} void @ParamPassing(i128 %{{.+}}, i64 %{{.+}})
+// LA32: define{{.*}} void @ParamPassing(i128* %{{.+}}, i64 %{{.+}})
 
 void ParamPassing2(_BitInt(127) b, _BitInt(63) c) {}
 // LIN64: define{{.*}} void @ParamPassing2(i64 %{{.+}}, i64 %{{.+}}, i64 %{{.+}})
@@ -86,6 +90,8 @@ void ParamPassing2(_BitInt(127) b, _BitInt(63) c) {}
 // AARCH64: define{{.*}} void @ParamPassing2(i127 %{{.+}}, i63 %{{.+}})
 // AARCH64DARWIN: define{{.*}} void @ParamPassing2(i127 %{{.+}}, i63 %{{.+}})
 // ARM: define{{.*}} arm_aapcscc void @ParamPassing2(i127* byval(i127) align 8 %{{.+}}, i63 %{{.+}})
+// LA64: define{{.*}} void @ParamPassing2(i127 %{{.+}}, i63 signext %{{.+}})
+// LA32: define{{.*}} void @ParamPassing2(i127* %{{.+}}, i63 %{{.+}})
 
 // Make sure we follow the signext rules for promotable integer types.
 void ParamPassing3(_BitInt(15) a, _BitInt(31) b) {}
@@ -116,6 +122,8 @@ void ParamPassing3(_BitInt(15) a, _BitInt(31) b) {}
 // AARCH64: define{{.*}} void @ParamPassing3(i15 %{{.+}}, i31 %{{.+}})
 // AARCH64DARWIN: define{{.*}} void @ParamPassing3(i15 signext %{{.+}}, i31 signext %{{.+}})
 // ARM: define{{.*}} arm_aapcscc void @ParamPassing3(i15 signext %{{.+}}, i31 signext %{{.+}})
+// LA64: define{{.*}} void @ParamPassing3(i15 signext %{{.+}}, i31 signext %{{.+}})
+// LA32: define{{.*}} void @ParamPassing3(i15 signext %{{.+}}, i31 signext %{{.+}})
 
 #if __BITINT_MAXWIDTH__ > 128
 // When supported, bit-precise types that are >128 are passed indirectly. Note,
@@ -150,6 +158,8 @@ void ParamPassing4(_BitInt(129) a) {}
 // AARCH64-NOT: define{{.*}} void @ParamPassing4(i129* byval(i129) align 8 %{{.+}})
 // AARCH64DARWIN-NOT: define{{.*}} void @ParamPassing4(i129* byval(i129) align 8 %{{.+}})
 // ARM-NOT: define{{.*}} arm_aapcscc void @ParamPassing4(i129* byval(i129) align 8 %{{.+}})
+// LA64-NOT: define{{.*}} void @ParamPassing4(i129* %{{.+}})
+// LA32-NOT: define{{.*}} void @ParamPassing4(i129* %{{.+}})
 #endif
 
 _BitInt(63) ReturnPassing(void){}
@@ -180,6 +190,8 @@ _BitInt(63) ReturnPassing(void){}
 // AARCH64: define{{.*}} i63 @ReturnPassing(
 // AARCH64DARWIN: define{{.*}} i63 @ReturnPassing(
 // ARM: define{{.*}} arm_aapcscc i63 @ReturnPassing(
+// LA64: define{{.*}} signext i63 @ReturnPassing(
+// LA32: define{{.*}} i63 @ReturnPassing(
 
 _BitInt(64) ReturnPassing2(void){}
 // LIN64: define{{.*}} i64 @ReturnPassing2(
@@ -209,6 +221,8 @@ _BitInt(64) ReturnPassing2(void){}
 // AARCH64: define{{.*}} i64 @ReturnPassing2(
 // AARCH64DARWIN: define{{.*}} i64 @ReturnPassing2(
 // ARM: define{{.*}} arm_aapcscc i64 @ReturnPassing2(
+// LA64: define{{.*}} i64 @ReturnPassing2(
+// LA32: define{{.*}} i64 @ReturnPassing2(
 
 _BitInt(127) ReturnPassing3(void){}
 // LIN64: define{{.*}} { i64, i64 } @ReturnPassing3(
@@ -240,6 +254,8 @@ _BitInt(127) ReturnPassing3(void){}
 // AARCH64: define{{.*}} i127 @ReturnPassing3(
 // AARCH64DARWIN: define{{.*}} i127 @ReturnPassing3(
 // ARM: define{{.*}} arm_aapcscc void @ReturnPassing3(i127* noalias sret
+// LA64: define{{.*}} i127 @ReturnPassing3(
+// LA32: define{{.*}} void @ReturnPassing3(i127* noalias sret
 
 _BitInt(128) ReturnPassing4(void){}
 // LIN64: define{{.*}} { i64, i64 } @ReturnPassing4(
@@ -269,6 +285,8 @@ _BitInt(128) ReturnPassing4(void){}
 // AARCH64: define{{.*}} i128 @ReturnPassing4(
 // AARCH64DARWIN: define{{.*}} i128 @ReturnPassing4(
 // ARM: define{{.*}} arm_aapcscc void @ReturnPassing4(i128* noalias sret
+// LA64: define{{.*}} i128 @ReturnPassing4(
+// LA32: define{{.*}} void @ReturnPassing4(i128* noalias sret
 
 #if __BITINT_MAXWIDTH__ > 128
 _BitInt(129) ReturnPassing5(void){}
@@ -299,6 +317,8 @@ _BitInt(129) ReturnPassing5(void){}
 // AARCH64-NOT: define{{.*}} void @ReturnPassing5(i129* noalias sret
 // AARCH64DARWIN-NOT: define{{.*}} void @ReturnPassing5(i129* noalias sret
 // ARM-NOT: define{{.*}} arm_aapcscc void @ReturnPassing5(i129* noalias sret
+// LA64-NOT: define{{.*}} void @ReturnPassing5(i129* noalias sret
+// LA32-NOT: define{{.*}} void @ReturnPassing5(i129* noalias sret
 
 // SparcV9 is odd in that it has a return-size limit of 256, not 128 or 64
 // like other platforms, so test to make sure this behavior will still work.

diff  --git a/clang/test/CodeGenCXX/LoongArch/abi-lp64d-struct-inherit.cpp b/clang/test/CodeGenCXX/LoongArch/abi-lp64d-struct-inherit.cpp
new file mode 100644
index 000000000000..6d8018564bd6
--- /dev/null
+++ b/clang/test/CodeGenCXX/LoongArch/abi-lp64d-struct-inherit.cpp
@@ -0,0 +1,95 @@
+// RUN: %clang_cc1 -triple loongarch64 -target-feature +f -target-feature +d -target-abi lp64d \
+// RUN:   -emit-llvm %s -o - | FileCheck %s
+
+#include <stdint.h>
+
+/// Ensure that fields inherited from a parent struct are treated in the same
+/// way as fields directly in the child for the purposes of LoongArch ABI rules.
+
+struct parent1_int32_s {
+  int32_t i1;
+};
+
+struct child1_int32_s : parent1_int32_s {
+  int32_t i2;
+};
+
+// CHECK-LABEL: define{{.*}} i64 @_Z30int32_int32_struct_inheritance14child1_int32_s(i64 %a.coerce)
+struct child1_int32_s int32_int32_struct_inheritance(struct child1_int32_s a) {
+  return a;
+}
+
+struct parent2_int32_s {
+  int32_t i1;
+};
+
+struct child2_float_s : parent2_int32_s {
+  float f1;
+};
+
+// CHECK-LABEL: define{{.*}} { i32, float } @_Z30int32_float_struct_inheritance14child2_float_s(i32 %0, float %1)
+struct child2_float_s int32_float_struct_inheritance(struct child2_float_s a) {
+  return a;
+}
+
+struct parent3_float_s {
+  float f1;
+};
+
+struct child3_int64_s : parent3_float_s {
+  int64_t i1;
+};
+
+// CHECK-LABEL: define{{.*}} { float, i64 } @_Z30float_int64_struct_inheritance14child3_int64_s(float %0, i64 %1)
+struct child3_int64_s float_int64_struct_inheritance(struct child3_int64_s a) {
+  return a;
+}
+
+struct parent4_double_s {
+  double d1;
+};
+
+struct child4_double_s : parent4_double_s {
+  double d1;
+};
+
+// CHECK-LABEL: define{{.*}} { double, double } @_Z32double_double_struct_inheritance15child4_double_s(double %0, double %1)
+struct child4_double_s double_double_struct_inheritance(struct child4_double_s a) {
+  return a;
+}
+
+/// When virtual inheritance is used, the resulting struct isn't eligible for
+/// passing in registers.
+
+struct parent5_virtual_s {
+  int32_t i1;
+};
+
+struct child5_virtual_s : virtual parent5_virtual_s {
+  float f1;
+};
+
+// CHECK-LABEL: define{{.*}} void @_ZN16child5_virtual_sC1EOS_(ptr noundef nonnull align 8 dereferenceable(12) %this, ptr noundef nonnull align 8 dereferenceable(12) %0)
+struct child5_virtual_s int32_float_virtual_struct_inheritance(struct child5_virtual_s a) {
+  return a;
+}
+
+/// Check for correct lowering in the presence of diamond inheritance.
+
+struct parent6_float_s {
+  float f1;
+};
+
+struct child6a_s : parent6_float_s {
+};
+
+struct child6b_s : parent6_float_s {
+};
+
+struct grandchild_6_s : child6a_s, child6b_s {
+};
+
+// CHECK-LABEL: define{{.*}} { float, float } @_Z38float_float_diamond_struct_inheritance14grandchild_6_s(float %0, float %1)
+struct grandchild_6_s float_float_diamond_struct_inheritance(struct grandchild_6_s a) {
+  return a;
+}


        


More information about the cfe-commits mailing list