[llvm-branch-commits] [cfe-branch] r102024 - in /cfe/branches/Apple/williamson: ./ lib/CodeGen/TargetInfo.cpp test/CodeGen/x86_32-arguments.c test/CodeGen/x86_64-arguments.c
Daniel Dunbar
daniel at zuster.org
Wed Apr 21 13:10:11 PDT 2010
Author: ddunbar
Date: Wed Apr 21 15:10:10 2010
New Revision: 102024
URL: http://llvm.org/viewvc/llvm-project?rev=102024&view=rev
Log:
Merge fixes for byval alignment. <rdar://problem/7878226>
--- Merging r102015 into '.':
U lib/CodeGen/TargetInfo.cpp
--- Merging r102016 into '.':
U test/CodeGen/x86_64-arguments.c
--- Merging r102018 into '.':
U test/CodeGen/x86_32-arguments.c
--- Merging r102019 into '.':
G test/CodeGen/x86_64-arguments.c
G test/CodeGen/x86_32-arguments.c
G lib/CodeGen/TargetInfo.cpp
Modified:
cfe/branches/Apple/williamson/ (props changed)
cfe/branches/Apple/williamson/lib/CodeGen/TargetInfo.cpp
cfe/branches/Apple/williamson/test/CodeGen/x86_32-arguments.c
cfe/branches/Apple/williamson/test/CodeGen/x86_64-arguments.c
Propchange: cfe/branches/Apple/williamson/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Apr 21 15:10:10 2010
@@ -1 +1 @@
-/cfe/trunk:101792,101810,101877
+/cfe/trunk:101792,101810,101877,102015-102016,102018-102019
Modified: cfe/branches/Apple/williamson/lib/CodeGen/TargetInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/branches/Apple/williamson/lib/CodeGen/TargetInfo.cpp?rev=102024&r1=102023&r2=102024&view=diff
==============================================================================
--- cfe/branches/Apple/williamson/lib/CodeGen/TargetInfo.cpp (original)
+++ cfe/branches/Apple/williamson/lib/CodeGen/TargetInfo.cpp Wed Apr 21 15:10:10 2010
@@ -43,7 +43,8 @@
getCoerceToType()->print(OS);
break;
case Indirect:
- OS << "Indirect Align=" << getIndirectAlign();
+ OS << "Indirect Align=" << getIndirectAlign()
+ << " Byal=" << getIndirectByVal();
break;
case Expand:
OS << "Expand";
@@ -270,7 +271,7 @@
llvm::LLVMContext &VMContext) const {
if (CodeGenFunction::hasAggregateLLVMType(Ty))
return ABIArgInfo::getIndirect(0);
-
+
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -291,8 +292,10 @@
static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
- static unsigned getIndirectArgumentAlignment(QualType Ty,
- ASTContext &Context);
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context,
+ bool ByVal = true) const;
public:
ABIArgInfo classifyReturnType(QualType RetTy,
@@ -490,14 +493,19 @@
}
}
-unsigned X86_32ABIInfo::getIndirectArgumentAlignment(QualType Ty,
- ASTContext &Context) {
- unsigned Align = Context.getTypeAlign(Ty);
- if (Align < 128) return 0;
- if (const RecordType* RT = Ty->getAs<RecordType>())
- if (typeContainsSSEVector(RT->getDecl(), Context))
- return 16;
- return 0;
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty,
+ ASTContext &Context,
+ bool ByVal) const {
+ if (!ByVal)
+ return ABIArgInfo::getIndirect(0, false);
+
+ // Compute the byval alignment. We trust the back-end to honor the
+ // minimum ABI alignment for byval, to make cleaner IR.
+ const unsigned MinABIAlign = 4;
+ unsigned Align = Context.getTypeAlign(Ty) / 8;
+ if (Align > MinABIAlign)
+ return ABIArgInfo::getIndirect(Align);
+ return ABIArgInfo::getIndirect(0);
}
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
@@ -510,11 +518,10 @@
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (hasNonTrivialDestructorOrCopyConstructor(RT))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
-
+ return getIndirectResult(Ty, Context, /*ByVal=*/false);
+
if (RT->getDecl()->hasFlexibleArrayMember())
- return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
- Context));
+ return getIndirectResult(Ty, Context);
}
// Ignore empty structs.
@@ -529,7 +536,7 @@
canExpandIndirectArgument(Ty, Context))
return ABIArgInfo::getExpand();
- return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty, Context));
+ return getIndirectResult(Ty, Context);
} else {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -685,9 +692,12 @@
ASTContext &Context) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be returned in memory.
+ ABIArgInfo getIndirectReturnResult(QualType Ty, ASTContext &Context) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty,
- ASTContext &Context) const;
+ ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context) const;
ABIArgInfo classifyReturnType(QualType RetTy,
ASTContext &Context,
@@ -1060,6 +1070,22 @@
return ABIArgInfo::getCoerce(CoerceTo);
}
+ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty,
+ ASTContext &Context) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ return ABIArgInfo::getIndirect(0);
+}
+
ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
ASTContext &Context) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
@@ -1073,10 +1099,16 @@
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
- bool ByVal = !isRecordWithNonTrivialDestructorOrCopyConstructor(Ty);
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
- // FIXME: Set alignment correctly.
- return ABIArgInfo::getIndirect(0, ByVal);
+ // Compute the byval alignment. We trust the back-end to honor the
+ // minimum ABI alignment for byval, to make cleaner IR.
+ const unsigned MinABIAlign = 8;
+ unsigned Align = Context.getTypeAlign(Ty) / 8;
+ if (Align > MinABIAlign)
+ return ABIArgInfo::getIndirect(Align);
+ return ABIArgInfo::getIndirect(0);
}
ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
@@ -1104,7 +1136,7 @@
// AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
// hidden argument.
case Memory:
- return getIndirectResult(RetTy, Context);
+ return getIndirectReturnResult(RetTy, Context);
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
// available register of the sequence %rax, %rdx is used.
Modified: cfe/branches/Apple/williamson/test/CodeGen/x86_32-arguments.c
URL: http://llvm.org/viewvc/llvm-project/cfe/branches/Apple/williamson/test/CodeGen/x86_32-arguments.c?rev=102024&r1=102023&r2=102024&view=diff
==============================================================================
--- cfe/branches/Apple/williamson/test/CodeGen/x86_32-arguments.c (original)
+++ cfe/branches/Apple/williamson/test/CodeGen/x86_32-arguments.c Wed Apr 21 15:10:10 2010
@@ -1,45 +1,45 @@
// RUN: %clang_cc1 -fblocks -triple i386-apple-darwin9 -emit-llvm -o %t %s
-// RUN: grep 'define signext i8 @f0()' %t
-// RUN: grep 'define signext i16 @f1()' %t
-// RUN: grep 'define i32 @f2()' %t
-// RUN: grep 'define float @f3()' %t
-// RUN: grep 'define double @f4()' %t
-// RUN: grep 'define x86_fp80 @f5()' %t
-// RUN: grep 'define void @f6(i8 signext %a0, i16 signext %a1, i32 %a2, i64 %a3, i8\* %a4)' %t
-// RUN: grep 'define void @f7(i32 %a0)' %t
-// RUN: grep 'define i64 @f8_1()' %t
-// RUN: grep 'define void @f8_2(i32 %a0.0, i32 %a0.1)' %t
+// RUN: FileCheck < %t %s
+// CHECK: define signext i8 @f0()
char f0(void) {
return 0;
}
+// CHECK: define signext i16 @f1()
short f1(void) {
return 0;
}
+// CHECK: define i32 @f2()
int f2(void) {
return 0;
}
+// CHECK: define float @f3()
float f3(void) {
return 0;
}
+// CHECK: define double @f4()
double f4(void) {
return 0;
}
+// CHECK: define x86_fp80 @f5()
long double f5(void) {
return 0;
}
+// CHECK: define void @f6(i8 signext %a0, i16 signext %a1, i32 %a2, i64 %a3, i8* %a4)
void f6(char a0, short a1, int a2, long long a3, void *a4) {}
-typedef enum { A, B, C } E;
-
-void f7(E a0) {}
+// CHECK: define void @f7(i32 %a0)
+typedef enum { A, B, C } e7;
+void f7(e7 a0) {}
+// CHECK: define i64 @f8_1()
+// CHECK: define void @f8_2(i32 %a0.0, i32 %a0.1)
struct s8 {
int a;
int b;
@@ -49,11 +49,11 @@
// This should be passed just as s8.
-// RUN: grep 'define i64 @f9_1()' %t
+// CHECK: define i64 @f9_1()
// FIXME: llvm-gcc expands this, this may have some value for the
// backend in terms of optimization but doesn't change the ABI.
-// RUN: grep 'define void @f9_2(%.truct.s9\* byval %a0)' %t
+// CHECK: define void @f9_2(%struct.s9* byval %a0)
struct s9 {
int a : 17;
int b;
@@ -63,7 +63,7 @@
// Return of small structures and unions
-// RUN: grep 'float @f10()' %t
+// CHECK: float @f10()
struct s10 {
union { };
float f;
@@ -71,12 +71,12 @@
// Small vectors and 1 x {i64,double} are returned in registers
-// RUN: grep 'i32 @f11()' %t
-// RUN: grep -F 'void @f12(<2 x i32>* sret %agg.result)' %t
-// RUN: grep 'i64 @f13()' %t
-// RUN: grep 'i64 @f14()' %t
-// RUN: grep '<2 x i64> @f15()' %t
-// RUN: grep '<2 x i64> @f16()' %t
+// CHECK: i32 @f11()
+// CHECK: void @f12(<2 x i32>* sret %agg.result)
+// CHECK: i64 @f13()
+// CHECK: i64 @f14()
+// CHECK: <2 x i64> @f15()
+// CHECK: <2 x i64> @f16()
typedef short T11 __attribute__ ((vector_size (4)));
T11 f11(void) { while (1) {} }
typedef int T12 __attribute__ ((vector_size (8)));
@@ -93,12 +93,12 @@
// And when the single element in a struct (but not for 64 and
// 128-bits).
-// RUN: grep 'i32 @f17()' %t
-// RUN: grep -F 'void @f18(%2* sret %agg.result)' %t
-// RUN: grep -F 'void @f19(%3* sret %agg.result)' %t
-// RUN: grep -F 'void @f20(%4* sret %agg.result)' %t
-// RUN: grep -F 'void @f21(%5* sret %agg.result)' %t
-// RUN: grep -F 'void @f22(%6* sret %agg.result)' %t
+// CHECK: i32 @f17()
+// CHECK: void @f18(%2* sret %agg.result)
+// CHECK: void @f19(%3* sret %agg.result)
+// CHECK: void @f20(%4* sret %agg.result)
+// CHECK: void @f21(%5* sret %agg.result)
+// CHECK: void @f22(%6* sret %agg.result)
struct { T11 a; } f17(void) { while (1) {} }
struct { T12 a; } f18(void) { while (1) {} }
struct { T13 a; } f19(void) { while (1) {} }
@@ -108,97 +108,109 @@
// Single element structures are handled specially
-// RUN: grep -F 'float @f23()' %t
-// RUN: grep -F 'float @f24()' %t
-// RUN: grep -F 'float @f25()' %t
+// CHECK: float @f23()
+// CHECK: float @f24()
+// CHECK: float @f25()
struct { float a; } f23(void) { while (1) {} }
struct { float a[1]; } f24(void) { while (1) {} }
struct { struct {} a; struct { float a[1]; } b; } f25(void) { while (1) {} }
// Small structures are handled recursively
-// RUN: grep -F 'i32 @f26()' %t
-// RUN: grep 'void @f27(%.truct.s27\* sret %agg.result)' %t
+// CHECK: i32 @f26()
+// CHECK: void @f27(%struct.s27* sret %agg.result)
struct s26 { struct { char a, b; } a; struct { char a, b; } b; } f26(void) { while (1) {} }
struct s27 { struct { char a, b, c; } a; struct { char a; } b; } f27(void) { while (1) {} }
-// RUN: grep 'void @f28(%.truct.s28\* sret %agg.result)' %t
+// CHECK: void @f28(%struct.s28* sret %agg.result)
struct s28 { int a; int b[]; } f28(void) { while (1) {} }
-// RUN: grep 'define i16 @f29()' %t
+// CHECK: define i16 @f29()
struct s29 { struct { } a[1]; char b; char c; } f29(void) { while (1) {} }
-// RUN: grep 'define i16 @f30()' %t
+// CHECK: define i16 @f30()
struct s30 { char a; char b : 4; } f30(void) { while (1) {} }
-// RUN: grep 'define float @f31()' %t
+// CHECK: define float @f31()
struct s31 { char : 0; float b; char : 0; } f31(void) { while (1) {} }
-// RUN: grep 'define i32 @f32()' %t
+// CHECK: define i32 @f32()
struct s32 { char a; unsigned : 0; } f32(void) { while (1) {} }
-// RUN: grep 'define float @f33()' %t
+// CHECK: define float @f33()
struct s33 { float a; long long : 0; } f33(void) { while (1) {} }
-// RUN: grep 'define float @f34()' %t
+// CHECK: define float @f34()
struct s34 { struct { int : 0; } a; float b; } f34(void) { while (1) {} }
-// RUN: grep 'define i16 @f35()' %t
+// CHECK: define i16 @f35()
struct s35 { struct { int : 0; } a; char b; char c; } f35(void) { while (1) {} }
-// RUN: grep 'define i16 @f36()' %t
+// CHECK: define i16 @f36()
struct s36 { struct { int : 0; } a[2][10]; char b; char c; } f36(void) { while (1) {} }
-// RUN: grep 'define float @f37()' %t
+// CHECK: define float @f37()
struct s37 { float c[1][1]; } f37(void) { while (1) {} }
-// RUN: grep 'define void @f38(.struct.s38. sret .agg.result)' %t
+// CHECK: define void @f38(%struct.s38* sret %agg.result)
struct s38 { char a[3]; short b; } f38(void) { while (1) {} }
-// RUN: grep 'define void @f39(.struct.s39. byval align 16 .x)' %t
+// CHECK: define void @f39(%struct.s39* byval align 16 %x)
typedef int v39 __attribute((vector_size(16)));
struct s39 { v39 x; };
void f39(struct s39 x) {}
// <rdar://problem/7247671>
-// RUN: grep 'define i32 @f40()' %t
+// CHECK: define i32 @f40()
enum e40 { ec0 = 0 };
enum e40 f40(void) { }
-// RUN: grep 'define void ()\* @f41()' %t
+// CHECK: define void ()* @f41()
typedef void (^vvbp)(void);
vvbp f41(void) { }
-// RUN: grep 'define i32 @f42()' %t
+// CHECK: define i32 @f42()
struct s42 { enum e40 f0; } f42(void) { }
-// RUN: grep 'define i64 @f43()' %t
+// CHECK: define i64 @f43()
struct s43 { enum e40 f0; int f1; } f43(void) { }
-// RUN: grep 'define i32 @f44()' %t
+// CHECK: define i32 @f44()
struct s44 { vvbp f0; } f44(void) { }
-// RUN: grep 'define i64 @f45()' %t
+// CHECK: define i64 @f45()
struct s45 { vvbp f0; int f1; } f45(void) { }
-// RUN: grep 'define void @f46(i32 %a0)' %t
+// CHECK: define void @f46(i32 %a0)
void f46(enum e40 a0) { }
-// RUN: grep 'define void @f47(void ()\* %a1)' %t
+// CHECK: define void @f47(void ()* %a1)
void f47(vvbp a1) { }
-// RUN: grep 'define void @f48(i32 %a0.0)' %t
+// CHECK: define void @f48(i32 %a0.0)
struct s48 { enum e40 f0; };
void f48(struct s48 a0) { }
-// RUN: grep 'define void @f49(i32 %a0.0, i32 %a0.1)' %t
+// CHECK: define void @f49(i32 %a0.0, i32 %a0.1)
struct s49 { enum e40 f0; int f1; };
void f49(struct s49 a0) { }
-// RUN: grep 'define void @f50(void ()\* %a0.0)' %t
+// CHECK: define void @f50(void ()* %a0.0)
struct s50 { vvbp f0; };
void f50(struct s50 a0) { }
-// RUN: grep 'define void @f51(void ()\* %a0.0, i32 %a0.1)' %t
+// CHECK: define void @f51(void ()* %a0.0, i32 %a0.1)
struct s51 { vvbp f0; int f1; };
void f51(struct s51 a0) { }
+// CHECK: define void @f52(%struct.s52* byval align 16 %x)
+struct s52 {
+ long double a;
+};
+void f52(struct s52 x) {}
+
+// CHECK: define void @f53(%struct.s53* byval align 32 %x)
+struct __attribute__((aligned(32))) s53 {
+ int x;
+ int y;
+};
+void f53(struct s53 x) {}
Modified: cfe/branches/Apple/williamson/test/CodeGen/x86_64-arguments.c
URL: http://llvm.org/viewvc/llvm-project/cfe/branches/Apple/williamson/test/CodeGen/x86_64-arguments.c?rev=102024&r1=102023&r2=102024&view=diff
==============================================================================
--- cfe/branches/Apple/williamson/test/CodeGen/x86_64-arguments.c (original)
+++ cfe/branches/Apple/williamson/test/CodeGen/x86_64-arguments.c Wed Apr 21 15:10:10 2010
@@ -1,49 +1,51 @@
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o %t %s
-// RUN: grep 'define signext i8 @f0()' %t
-// RUN: grep 'define signext i16 @f1()' %t
-// RUN: grep 'define i32 @f2()' %t
-// RUN: grep 'define float @f3()' %t
-// RUN: grep 'define double @f4()' %t
-// RUN: grep 'define x86_fp80 @f5()' %t
-// RUN: grep 'define void @f6(i8 signext %a0, i16 signext %a1, i32 %a2, i64 %a3, i8\* %a4)' %t
-// RUN: grep 'define void @f7(i32 %a0)' %t
-// RUN: grep '.0 = type { i64, double }' %t
-// RUN: grep 'define .0 @f8_1()' %t
-// RUN: grep 'define void @f8_2(.0)' %t
+// RUN: FileCheck < %t %s
+// CHECK: %0 = type { i64, double }
+
+// CHECK: define signext i8 @f0()
char f0(void) {
return 0;
}
+// CHECK: define signext i16 @f1()
short f1(void) {
return 0;
}
+// CHECK: define i32 @f2()
int f2(void) {
return 0;
}
+// CHECK: define float @f3()
float f3(void) {
return 0;
}
+// CHECK: define double @f4()
double f4(void) {
return 0;
}
+// CHECK: define x86_fp80 @f5()
long double f5(void) {
return 0;
}
+// CHECK: define void @f6(i8 signext %a0, i16 signext %a1, i32 %a2, i64 %a3, i8* %a4)
void f6(char a0, short a1, int a2, long long a3, void *a4) {
}
-typedef enum { A, B, C } E;
-
-void f7(E a0) {
+// CHECK: define void @f7(i32 %a0)
+typedef enum { A, B, C } e7;
+void f7(e7 a0) {
}
// Test merging/passing of upper eightbyte with X87 class.
+//
+// CHECK: define %0 @f8_1()
+// CHECK: define void @f8_2(%0)
union u8 {
long double a;
int b;
@@ -51,48 +53,63 @@
union u8 f8_1() { while (1) {} }
void f8_2(union u8 a0) {}
-// RUN: grep 'define i64 @f9()' %t
+// CHECK: define i64 @f9()
struct s9 { int a; int b; int : 0; } f9(void) { while (1) {} }
-// RUN: grep 'define void @f10(i64)' %t
+// CHECK: define void @f10(i64)
struct s10 { int a; int b; int : 0; };
void f10(struct s10 a0) {}
-// RUN: grep 'define void @f11(.union.anon. sret .agg.result)' %t
+// CHECK: define void @f11(%struct.s19* sret %agg.result)
union { long double a; float b; } f11() { while (1) {} }
-// RUN: grep 'define i64 @f12_0()' %t
-// RUN: grep 'define void @f12_1(i64)' %t
+// CHECK: define i64 @f12_0()
+// CHECK: define void @f12_1(i64)
struct s12 { int a __attribute__((aligned(16))); };
struct s12 f12_0(void) { while (1) {} }
void f12_1(struct s12 a0) {}
// Check that sret parameter is accounted for when checking available integer
// registers.
-// RUN: grep 'define void @f13(.struct.s13_0. sret .agg.result, i32 .a, i32 .b, i32 .c, i32 .d, .struct.s13_1. byval .e, i32 .f)' %t
+// CHECK: define void @f13(%struct.s13_0* sret %agg.result, i32 %a, i32 %b, i32 %c, i32 %d, %struct.s13_1* byval %e, i32 %f)
struct s13_0 { long long f0[3]; };
struct s13_1 { long long f0[2]; };
-struct s13_0 f13(int a, int b, int c, int d,
+struct s13_0 f13(int a, int b, int c, int d,
struct s13_1 e, int f) { while (1) {} }
-// RUN: grep 'define void @f14(.*, i8 signext .X)' %t
-void f14(int a, int b, int c, int d, int e, int f,
- char X) {}
-// RUN: grep 'define void @f15(.*, i8\* .X)' %t
-void f15(int a, int b, int c, int d, int e, int f,
- void *X) {}
-// RUN: grep 'define void @f16(.*, float .X)' %t
+// CHECK: define void @f14({{.*}}, i8 signext %X)
+void f14(int a, int b, int c, int d, int e, int f, char X) {}
+
+// CHECK: define void @f15({{.*}}, i8* %X)
+void f15(int a, int b, int c, int d, int e, int f, void *X) {}
+
+// CHECK: define void @f16({{.*}}, float %X)
void f16(float a, float b, float c, float d, float e, float f, float g, float h,
float X) {}
-// RUN: grep 'define void @f17(.*, x86_fp80 .X)' %t
+
+// CHECK: define void @f17({{.*}}, x86_fp80 %X)
void f17(float a, float b, float c, float d, float e, float f, float g, float h,
long double X) {}
// Check for valid coercion.
-// RUN: grep '.. = bitcast i64. .* to .struct.f18_s0.' %t
-// RUN: grep '.. = load .struct.f18_s0. .., align 1' %t
-// RUN: grep 'store .struct.f18_s0 .., .struct.f18_s0. .f18_arg1' %t
+// CHECK: [[f18_t0:%.*]] = bitcast i64* {{.*}} to %struct.f18_s0*
+// CHECK: [[f18_t1:%.*]] = load %struct.f18_s0* [[f18_t0]], align 1
+// CHECK: store %struct.f18_s0 [[f18_t1]], %struct.f18_s0* %f18_arg1
struct f18_s0 { int f0; };
void f18(int a, struct f18_s0 f18_arg1) { while (1) {} }
+// Check byval alignment.
+
+// CHECK: define void @f19(%struct.s19* byval align 16 %x)
+struct s19 {
+ long double a;
+};
+void f19(struct s19 x) {}
+
+// CHECK: define void @f20(%struct.s20* byval align 32 %x)
+struct __attribute__((aligned(32))) s20 {
+ int x;
+ int y;
+};
+void f20(struct s20 x) {}
More information about the llvm-branch-commits
mailing list