[cfe-commits] r133855 - in /cfe/trunk: lib/CodeGen/CGExpr.cpp lib/CodeGen/CGExprScalar.cpp test/CodeGen/vla.c test/CodeGenCXX/vla.cpp
John McCall
rjmccall at apple.com
Fri Jun 24 18:32:37 PDT 2011
Author: rjmccall
Date: Fri Jun 24 20:32:37 2011
New Revision: 133855
URL: http://llvm.org/viewvc/llvm-project?rev=133855&view=rev
Log:
Mark the multiply which occurs as part of performing pointer
arithmetic on a VLA as 'nsw', per discussion with djg, and
implement pointer arithmetic (other than array accesses) and
pointer subtraction for VLA types.
Modified:
cfe/trunk/lib/CodeGen/CGExpr.cpp
cfe/trunk/lib/CodeGen/CGExprScalar.cpp
cfe/trunk/test/CodeGen/vla.c
cfe/trunk/test/CodeGenCXX/vla.cpp
Modified: cfe/trunk/lib/CodeGen/CGExpr.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGExpr.cpp?rev=133855&r1=133854&r2=133855&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGExpr.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGExpr.cpp Fri Jun 24 20:32:37 2011
@@ -1589,12 +1589,17 @@
// The element count here is the total number of non-VLA elements.
llvm::Value *numElements = getVLASize(vla).first;
- Idx = Builder.CreateMul(Idx, numElements);
-
- if (getContext().getLangOptions().isSignedOverflowDefined())
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (getLangOptions().isSignedOverflowDefined()) {
+ Idx = Builder.CreateMul(Idx, numElements);
Address = Builder.CreateGEP(Address, Idx, "arrayidx");
- else
+ } else {
+ Idx = Builder.CreateNSWMul(Idx, numElements);
Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
+ }
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
// Indexing over an interface, as in "NSString *P; P[4];"
llvm::Value *InterfaceSize =
Modified: cfe/trunk/lib/CodeGen/CGExprScalar.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGExprScalar.cpp?rev=133855&r1=133854&r2=133855&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGExprScalar.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGExprScalar.cpp Fri Jun 24 20:32:37 2011
@@ -1285,7 +1285,7 @@
// overflow because of promotion rules; we're just eliding a few steps here.
if (type->isSignedIntegerOrEnumerationType() &&
value->getType()->getPrimitiveSizeInBits() >=
- CGF.CGM.IntTy->getBitWidth())
+ CGF.IntTy->getBitWidth())
value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
else
value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
@@ -1295,8 +1295,9 @@
QualType type = ptr->getPointeeType();
// VLA types don't have constant size.
- if (type->isVariableArrayType()) {
- llvm::Value *numElts = CGF.getVLASize(type).first;
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(type)) {
+ llvm::Value *numElts = CGF.getVLASize(vla).first;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
value = Builder.CreateGEP(value, numElts, "vla.inc");
@@ -1828,196 +1829,187 @@
return phi;
}
-Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
- if (!Ops.Ty->isAnyPointerType()) {
- if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
- case LangOptions::SOB_Defined:
- return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
- case LangOptions::SOB_Trapping:
- return EmitOverflowCheckedBinOp(Ops);
- }
- }
-
- if (Ops.LHS->getType()->isFPOrFPVectorTy())
- return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
-
- return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
- }
-
- // Must have binary (not unary) expr here. Unary pointer decrement doesn't
- // use this path.
- const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
-
- if (Ops.Ty->isPointerType() &&
- Ops.Ty->getAs<PointerType>()->isVariableArrayType()) {
- // The amount of the addition needs to account for the VLA size
- CGF.ErrorUnsupported(BinOp, "VLA pointer addition");
- }
+/// Emit pointer + index arithmetic.
+static Value *emitPointerArithmetic(CodeGenFunction &CGF,
+ const BinOpInfo &op,
+ bool isSubtraction) {
+ // Must have binary (not unary) expr here. Unary pointer
+ // increment/decrement doesn't use this path.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
- Value *Ptr, *Idx;
- Expr *IdxExp;
- const PointerType *PT = BinOp->getLHS()->getType()->getAs<PointerType>();
- const ObjCObjectPointerType *OPT =
- BinOp->getLHS()->getType()->getAs<ObjCObjectPointerType>();
- if (PT || OPT) {
- Ptr = Ops.LHS;
- Idx = Ops.RHS;
- IdxExp = BinOp->getRHS();
- } else { // int + pointer
- PT = BinOp->getRHS()->getType()->getAs<PointerType>();
- OPT = BinOp->getRHS()->getType()->getAs<ObjCObjectPointerType>();
- assert((PT || OPT) && "Invalid add expr");
- Ptr = Ops.RHS;
- Idx = Ops.LHS;
- IdxExp = BinOp->getLHS();
- }
-
- unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (Width < CGF.PointerWidthInBits) {
- // Zero or sign extend the pointer value based on whether the index is
- // signed or not.
- const llvm::Type *IdxType = CGF.IntPtrTy;
- if (IdxExp->getType()->isSignedIntegerOrEnumerationType())
- Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
- else
- Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
- }
- const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType();
- // Handle interface types, which are not represented with a concrete type.
- if (const ObjCObjectType *OIT = ElementType->getAs<ObjCObjectType>()) {
- llvm::Value *InterfaceSize =
- llvm::ConstantInt::get(Idx->getType(),
- CGF.getContext().getTypeSizeInChars(OIT).getQuantity());
- Idx = Builder.CreateMul(Idx, InterfaceSize);
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
- Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
- return Builder.CreateBitCast(Res, Ptr->getType());
+ Value *pointer = op.LHS;
+ Expr *pointerOperand = expr->getLHS();
+ Value *index = op.RHS;
+ Expr *indexOperand = expr->getRHS();
+
+ // In a subtraction, the LHS is always the pointer.
+ if (!isSubtraction && !pointer->getType()->isPointerTy()) {
+ std::swap(pointer, index);
+ std::swap(pointerOperand, indexOperand);
+ }
+
+ unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
+ if (width != CGF.PointerWidthInBits) {
+ // Zero-extend or sign-extend the pointer value according to
+ // whether the index is signed or not.
+ bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
+ index = CGF.Builder.CreateIntCast(index, CGF.PtrDiffTy, isSigned,
+ "idx.ext");
+ }
+
+ // If this is subtraction, negate the index.
+ if (isSubtraction)
+ index = CGF.Builder.CreateNeg(index, "idx.neg");
+
+ const PointerType *pointerType
+ = pointerOperand->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ QualType objectType = pointerOperand->getType()
+ ->castAs<ObjCObjectPointerType>()
+ ->getPointeeType();
+ llvm::Value *objectSize
+ = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
+
+ index = CGF.Builder.CreateMul(index, objectSize);
+
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
+ }
+
+ QualType elementType = pointerType->getPointeeType();
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = CGF.getVLASize(vla).first;
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (CGF.getLangOptions().isSignedOverflowDefined()) {
+ index = CGF.Builder.CreateMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+ } else {
+ index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+ }
+ return pointer;
}
// Explicitly handle GNU void* and function pointer arithmetic extensions. The
// GNU void* casts amount to no-ops since our void* type is i8*, but this is
// future proof.
- if (ElementType->isVoidType() || ElementType->isFunctionType()) {
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
- Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
- return Builder.CreateBitCast(Res, Ptr->getType());
+ if (elementType->isVoidType() || elementType->isFunctionType()) {
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
+ }
+
+ if (CGF.getLangOptions().isSignedOverflowDefined())
+ return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+
+ return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+}
+
+Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
+ if (op.LHS->getType()->isPointerTy() ||
+ op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
+
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(op);
+ }
}
+
+ if (op.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFAdd(op.LHS, op.RHS, "add");
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
- return Builder.CreateGEP(Ptr, Idx, "add.ptr");
- return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr");
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
}
-Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
- if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
- if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
+Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
+ // The LHS is always a pointer if either side is.
+ if (!op.LHS->getType()->isPointerTy()) {
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
- return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
+ return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
case LangOptions::SOB_Defined:
- return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
case LangOptions::SOB_Trapping:
- return EmitOverflowCheckedBinOp(Ops);
+ return EmitOverflowCheckedBinOp(op);
}
}
- if (Ops.LHS->getType()->isFPOrFPVectorTy())
- return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
+ if (op.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFSub(op.LHS, op.RHS, "sub");
- return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
}
- // Must have binary (not unary) expr here. Unary pointer increment doesn't
- // use this path.
- const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
-
- if (BinOp->getLHS()->getType()->isPointerType() &&
- BinOp->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
- // The amount of the addition needs to account for the VLA size for
- // ptr-int
- // The amount of the division needs to account for the VLA size for
- // ptr-ptr.
- CGF.ErrorUnsupported(BinOp, "VLA pointer subtraction");
- }
-
- const QualType LHSType = BinOp->getLHS()->getType();
- const QualType LHSElementType = LHSType->getPointeeType();
- if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
- // pointer - int
- Value *Idx = Ops.RHS;
- unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (Width < CGF.PointerWidthInBits) {
- // Zero or sign extend the pointer value based on whether the index is
- // signed or not.
- const llvm::Type *IdxType = CGF.IntPtrTy;
- if (BinOp->getRHS()->getType()->isSignedIntegerOrEnumerationType())
- Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
- else
- Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
- }
- Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
+ // If the RHS is not a pointer, then we have normal pointer
+ // arithmetic.
+ if (!op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ true);
- // Handle interface types, which are not represented with a concrete type.
- if (const ObjCObjectType *OIT = LHSElementType->getAs<ObjCObjectType>()) {
- llvm::Value *InterfaceSize =
- llvm::ConstantInt::get(Idx->getType(),
- CGF.getContext().
- getTypeSizeInChars(OIT).getQuantity());
- Idx = Builder.CreateMul(Idx, InterfaceSize);
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
- Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
- return Builder.CreateBitCast(Res, Ops.LHS->getType());
- }
+ // Otherwise, this is a pointer subtraction.
- // Explicitly handle GNU void* and function pointer arithmetic
- // extensions. The GNU void* casts amount to no-ops since our void* type is
- // i8*, but this is future proof.
- if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
- Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
- return Builder.CreateBitCast(Res, Ops.LHS->getType());
- }
+ // Do the raw subtraction part.
+ llvm::Value *LHS
+ = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
+ llvm::Value *RHS
+ = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
+ Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
- return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr");
- return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr");
- }
-
- // pointer - pointer
- Value *LHS = Ops.LHS;
- Value *RHS = Ops.RHS;
+ // Okay, figure out the element size.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
+ QualType elementType = expr->getLHS()->getType()->getPointeeType();
- CharUnits ElementSize;
+ llvm::Value *divisor = 0;
- // Handle GCC extension for pointer arithmetic on void* and function pointer
- // types.
- if (LHSElementType->isVoidType() || LHSElementType->isFunctionType())
- ElementSize = CharUnits::One();
- else
- ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType);
+ // For a variable-length array, this is going to be non-constant.
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ llvm::Value *numElements;
+ llvm::tie(numElements, elementType) = CGF.getVLASize(vla);
+
+ divisor = numElements;
+
+ // Scale the number of non-VLA elements by the non-VLA element size.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
+ if (!eltSize.isOne())
+ divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
+
+ // For everything elese, we can just compute it, safe in the
+ // assumption that Sema won't let anything through that we can't
+ // safely compute the size of.
+ } else {
+ CharUnits elementSize;
+ // Handle GCC extension for pointer arithmetic on void* and
+ // function pointer types.
+ if (elementType->isVoidType() || elementType->isFunctionType())
+ elementSize = CharUnits::One();
+ else
+ elementSize = CGF.getContext().getTypeSizeInChars(elementType);
- const llvm::Type *ResultType = ConvertType(Ops.Ty);
- LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
- RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
- Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
-
- // Optimize out the shift for element size of 1.
- if (ElementSize.isOne())
- return BytesBetween;
+ // Don't even emit the divide for element size of 1.
+ if (elementSize.isOne())
+ return diffInChars;
+ divisor = CGF.CGM.getSize(elementSize);
+ }
+
// Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
// pointer difference in C is only defined in the case where both operands
// are pointing to elements of an array.
- Value *BytesPerElt =
- llvm::ConstantInt::get(ResultType, ElementSize.getQuantity());
- return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
+ return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
}
Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
Modified: cfe/trunk/test/CodeGen/vla.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/vla.c?rev=133855&r1=133854&r2=133855&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/vla.c (original)
+++ cfe/trunk/test/CodeGen/vla.c Fri Jun 24 20:32:37 2011
@@ -94,7 +94,7 @@
// CHECK-NEXT: store
// CHECK-NEXT: [[N:%.*]] = load i32* [[NV]], align 4
// CHECK-NEXT: [[P:%.*]] = load [5 x double]** [[PV]], align 4
- // CHECK-NEXT: [[T0:%.*]] = mul i32 1, [[N]]
+ // CHECK-NEXT: [[T0:%.*]] = mul nsw i32 1, [[N]]
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [5 x double]* [[P]], i32 [[T0]]
// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [5 x double]* [[T1]], i32 2
// CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [5 x double]* [[T2]], i32 0, i32 3
@@ -102,3 +102,46 @@
// CHECK-NEXT: ret double [[T4]]
return p[1][2][3];
}
+
+int test4(unsigned n, char (*p)[n][n+1][6]) {
+ // CHECK: define i32 @test4(
+ // CHECK: [[N:%.*]] = alloca i32, align 4
+ // CHECK-NEXT: [[P:%.*]] = alloca [6 x i8]*, align 4
+ // CHECK-NEXT: [[P2:%.*]] = alloca [6 x i8]*, align 4
+ // CHECK-NEXT: store i32
+ // CHECK-NEXT: store [6 x i8]*
+
+ // VLA captures.
+ // CHECK-NEXT: [[DIM0:%.*]] = load i32* [[N]], align 4
+ // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4
+ // CHECK-NEXT: [[DIM1:%.*]] = add i32 [[T0]], 1
+
+ // __typeof. FIXME: does this really need to be loaded?
+ // CHECK-NEXT: load [6 x i8]** [[P]]
+
+ // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]** [[P]], align 4
+ // CHECK-NEXT: [[T1:%.*]] = load i32* [[N]], align 4
+ // CHECK-NEXT: [[T2:%.*]] = udiv i32 [[T1]], 2
+ // CHECK-NEXT: [[T3:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
+ // CHECK-NEXT: [[T4:%.*]] = mul nsw i32 [[T2]], [[T3]]
+ // CHECK-NEXT: [[T5:%.*]] = getelementptr inbounds [6 x i8]* [[T0]], i32 [[T4]]
+ // CHECK-NEXT: [[T6:%.*]] = load i32* [[N]], align 4
+ // CHECK-NEXT: [[T7:%.*]] = udiv i32 [[T6]], 4
+ // CHECK-NEXT: [[T8:%.*]] = sub i32 0, [[T7]]
+ // CHECK-NEXT: [[T9:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
+ // CHECK-NEXT: [[T10:%.*]] = mul nsw i32 [[T8]], [[T9]]
+ // CHECK-NEXT: [[T11:%.*]] = getelementptr inbounds [6 x i8]* [[T5]], i32 [[T10]]
+ // CHECK-NEXT: store [6 x i8]* [[T11]], [6 x i8]** [[P2]], align 4
+ __typeof(p) p2 = (p + n/2) - n/4;
+
+ // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]** [[P2]], align 4
+ // CHECK-NEXT: [[T1:%.*]] = load [6 x i8]** [[P]], align 4
+ // CHECK-NEXT: [[T2:%.*]] = ptrtoint [6 x i8]* [[T0]] to i32
+ // CHECK-NEXT: [[T3:%.*]] = ptrtoint [6 x i8]* [[T1]] to i32
+ // CHECK-NEXT: [[T4:%.*]] = sub i32 [[T2]], [[T3]]
+ // CHECK-NEXT: [[T5:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
+ // CHECK-NEXT: [[T6:%.*]] = mul nuw i32 6, [[T5]]
+ // CHECK-NEXT: [[T7:%.*]] = sdiv exact i32 [[T4]], [[T6]]
+ // CHECK-NEXT: ret i32 [[T7]]
+ return p2 - p;
+}
Modified: cfe/trunk/test/CodeGenCXX/vla.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/vla.cpp?rev=133855&r1=133854&r2=133855&view=diff
==============================================================================
--- cfe/trunk/test/CodeGenCXX/vla.cpp (original)
+++ cfe/trunk/test/CodeGenCXX/vla.cpp Fri Jun 24 20:32:37 2011
@@ -24,14 +24,14 @@
array_t &ref = *(array_t*) array;
// CHECK-NEXT: [[T0:%.*]] = load i16** [[REF]]
- // CHECK-NEXT: [[T1:%.*]] = mul i64 1, [[DIM1]]
+ // CHECK-NEXT: [[T1:%.*]] = mul nsw i64 1, [[DIM1]]
// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i16* [[T0]], i64 [[T1]]
// CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i16* [[T2]], i64 2
// CHECK-NEXT: store i16 3, i16* [[T3]]
ref[1][2] = 3;
// CHECK-NEXT: [[T0:%.*]] = load i16** [[REF]]
- // CHECK-NEXT: [[T1:%.*]] = mul i64 4, [[DIM1]]
+ // CHECK-NEXT: [[T1:%.*]] = mul nsw i64 4, [[DIM1]]
// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i16* [[T0]], i64 [[T1]]
// CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i16* [[T2]], i64 5
// CHECK-NEXT: [[T4:%.*]] = load i16* [[T3]]
More information about the cfe-commits
mailing list