[clang] 39baaab - [CodeGen] Emit IR for fixed-point unary operators.
Bevin Hansson via cfe-commits
cfe-commits at lists.llvm.org
Wed Apr 8 05:42:15 PDT 2020
Author: Bevin Hansson
Date: 2020-04-08T14:33:04+02:00
New Revision: 39baaabf6de4cfcbb942434084298a3f9acf5f89
URL: https://github.com/llvm/llvm-project/commit/39baaabf6de4cfcbb942434084298a3f9acf5f89
DIFF: https://github.com/llvm/llvm-project/commit/39baaabf6de4cfcbb942434084298a3f9acf5f89.diff
LOG: [CodeGen] Emit IR for fixed-point unary operators.
Reviewers: rjmccall, leonardchan
Subscribers: cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D73183
Added:
clang/test/Frontend/fixed_point_unary.c
Modified:
clang/lib/CodeGen/CGExprScalar.cpp
Removed:
################################################################################
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 63eb4b0fe932..1e11884e11e9 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -129,11 +129,10 @@ struct BinOpInfo {
return true;
}
- /// Check if either operand is a fixed point type or integer type, with at
- /// least one being a fixed point type. In any case, this
- /// operation did not follow usual arithmetic conversion and both operands may
- /// not be the same.
- bool isFixedPointBinOp() const {
+ /// Check if at least one operand is a fixed point type. In such cases, this
+ /// operation did not follow usual arithmetic conversion and both operands
+ /// might not be of the same type.
+ bool isFixedPointOp() const {
// We cannot simply check the result type since comparison operations return
// an int.
if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
@@ -141,6 +140,8 @@ struct BinOpInfo {
QualType RHSType = BinOp->getRHS()->getType();
return LHSType->isFixedPointType() || RHSType->isFixedPointType();
}
+ if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
+ return UnOp->getSubExpr()->getType()->isFixedPointType();
return false;
}
};
@@ -746,7 +747,7 @@ class ScalarExprEmitter
Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
return propagateFMFlags(V, Ops);
}
- if (Ops.isFixedPointBinOp())
+ if (Ops.isFixedPointOp())
return EmitFixedPointBinOp(Ops);
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
@@ -2620,6 +2621,36 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
}
+ // Fixed-point types.
+ } else if (type->isFixedPointType()) {
+ // Fixed-point types are tricky. In some cases, it isn't possible to
+ // represent a 1 or a -1 in the type at all. Piggyback off of
+ // EmitFixedPointBinOp to avoid having to reimplement saturation.
+ BinOpInfo Info;
+ Info.E = E;
+ Info.Ty = E->getType();
+ Info.Opcode = isInc ? BO_Add : BO_Sub;
+ Info.LHS = value;
+ Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
+ // If the type is signed, it's better to represent this as +(-1) or -(-1),
+ // since -1 is guaranteed to be representable.
+ if (type->isSignedFixedPointType()) {
+ Info.Opcode = isInc ? BO_Sub : BO_Add;
+ Info.RHS = Builder.CreateNeg(Info.RHS);
+ }
+ // Now, convert from our invented integer literal to the type of the unary
+ // op. This will upscale and saturate if necessary. This value can become
+ // undef in some cases.
+ FixedPointSemantics SrcSema =
+ FixedPointSemantics::GetIntegerSemantics(value->getType()
+ ->getScalarSizeInBits(),
+ /*IsSigned=*/true);
+ FixedPointSemantics DstSema =
+ CGF.getContext().getFixedPointSemantics(Info.Ty);
+ Info.RHS = EmitFixedPointConversion(Info.RHS, SrcSema, DstSema,
+ E->getExprLoc());
+ value = EmitFixedPointBinOp(Info);
+
// Objective-C pointer types.
} else {
const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
@@ -3123,7 +3154,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
}
return Val;
}
- else if (Ops.isFixedPointBinOp())
+ else if (Ops.isFixedPointOp())
return EmitFixedPointBinOp(Ops);
else if (Ops.Ty->hasUnsignedIntegerRepresentation())
return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
@@ -3487,7 +3518,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
return propagateFMFlags(V, op);
}
- if (op.isFixedPointBinOp())
+ if (op.isFixedPointOp())
return EmitFixedPointBinOp(op);
return Builder.CreateAdd(op.LHS, op.RHS, "add");
@@ -3499,14 +3530,19 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
using llvm::APSInt;
using llvm::ConstantInt;
- const auto *BinOp = cast<BinaryOperator>(op.E);
-
- // The result is a fixed point type and at least one of the operands is fixed
- // point while the other is either fixed point or an int. This resulting type
- // should be determined by Sema::handleFixedPointConversions().
+ // This is either a binary operation where at least one of the operands is
+ // a fixed-point type, or a unary operation where the operand is a fixed-point
+ // type. The result type of a binary operation is determined by
+ // Sema::handleFixedPointConversions().
QualType ResultTy = op.Ty;
- QualType LHSTy = BinOp->getLHS()->getType();
- QualType RHSTy = BinOp->getRHS()->getType();
+ QualType LHSTy, RHSTy;
+ if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
+ LHSTy = BinOp->getLHS()->getType();
+ RHSTy = BinOp->getRHS()->getType();
+ } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
+ LHSTy = UnOp->getSubExpr()->getType();
+ RHSTy = UnOp->getSubExpr()->getType();
+ }
ASTContext &Ctx = CGF.getContext();
Value *LHS = op.LHS;
Value *RHS = op.RHS;
@@ -3518,13 +3554,13 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
// Convert the operands to the full precision type.
Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema,
- BinOp->getExprLoc());
+ op.E->getExprLoc());
Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
- BinOp->getExprLoc());
+ op.E->getExprLoc());
// Perform the actual addition.
Value *Result;
- switch (BinOp->getOpcode()) {
+ switch (op.Opcode) {
case BO_Add: {
if (ResultFixedSema.isSaturated()) {
llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
@@ -3621,7 +3657,7 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
// Convert to the result type.
return EmitFixedPointConversion(Result, CommonFixedSema, ResultFixedSema,
- BinOp->getExprLoc());
+ op.E->getExprLoc());
}
Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
@@ -3655,7 +3691,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
return propagateFMFlags(V, op);
}
- if (op.isFixedPointBinOp())
+ if (op.isFixedPointOp())
return EmitFixedPointBinOp(op);
return Builder.CreateSub(op.LHS, op.RHS, "sub");
@@ -3958,7 +3994,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
E->getExprLoc());
}
- if (BOInfo.isFixedPointBinOp()) {
+ if (BOInfo.isFixedPointOp()) {
Result = EmitFixedPointBinOp(BOInfo);
} else if (LHS->getType()->isFPOrFPVectorTy()) {
if (!IsSignaling)
diff --git a/clang/test/Frontend/fixed_point_unary.c b/clang/test/Frontend/fixed_point_unary.c
new file mode 100644
index 000000000000..79af819fad8a
--- /dev/null
+++ b/clang/test/Frontend/fixed_point_unary.c
@@ -0,0 +1,264 @@
+// RUN: %clang_cc1 -ffixed-point -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,SIGNED
+// RUN: %clang_cc1 -ffixed-point -fpadding-on-unsigned-fixed-point -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,UNSIGNED
+
+_Accum a;
+_Fract f;
+long _Fract lf;
+unsigned _Accum ua;
+short unsigned _Accum usa;
+unsigned _Fract uf;
+
+_Sat _Accum sa;
+_Sat _Fract sf;
+_Sat long _Fract slf;
+_Sat unsigned _Accum sua;
+_Sat short unsigned _Accum susa;
+_Sat unsigned _Fract suf;
+
+// CHECK-LABEL: @Increment(
+void Increment() {
+// CHECK: [[TMP0:%.*]] = load i32, i32* @a, align 4
+// CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], -32768
+// CHECK-NEXT: store i32 [[TMP1]], i32* @a, align 4
+ a++;
+
+// CHECK: [[TMP2:%.*]] = load i16, i16* @f, align 2
+// CHECK-NEXT: [[TMP3:%.*]] = sub i16 [[TMP2]], -32768
+// CHECK-NEXT: store i16 [[TMP3]], i16* @f, align 2
+ f++;
+
+// CHECK: [[TMP4:%.*]] = load i32, i32* @lf, align 4
+// CHECK-NEXT: [[TMP5:%.*]] = sub i32 [[TMP4]], -2147483648
+// CHECK-NEXT: store i32 [[TMP5]], i32* @lf, align 4
+ lf++;
+
+// CHECK: [[TMP6:%.*]] = load i32, i32* @ua, align 4
+// SIGNED-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 65536
+// UNSIGNED-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 32768
+// CHECK-NEXT: store i32 [[TMP7]], i32* @ua, align 4
+ ua++;
+
+// CHECK: [[TMP8:%.*]] = load i16, i16* @usa, align 2
+// SIGNED-NEXT: [[TMP9:%.*]] = add i16 [[TMP8]], 256
+// UNSIGNED-NEXT: [[TMP9:%.*]] = add i16 [[TMP8]], 128
+// CHECK-NEXT: store i16 [[TMP9]], i16* @usa, align 2
+ usa++;
+
+// CHECK: [[TMP10:%.*]] = load i16, i16* @uf, align 2
+// SIGNED-NEXT: [[TMP11:%.*]] = add i16 [[TMP10]], undef
+// UNSIGNED-NEXT: [[TMP11:%.*]] = add i16 [[TMP10]], -32768
+// CHECK-NEXT: store i16 [[TMP11]], i16* @uf, align 2
+ uf++;
+
+// CHECK: [[TMP12:%.*]] = load i32, i32* @sa, align 4
+// CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[TMP12]], i32 -32768)
+// CHECK-NEXT: store i32 [[TMP13]], i32* @sa, align 4
+ sa++;
+
+// CHECK: [[TMP14:%.*]] = load i16, i16* @sf, align 2
+// CHECK-NEXT: [[TMP15:%.*]] = call i16 @llvm.ssub.sat.i16(i16 [[TMP14]], i16 -32768)
+// CHECK-NEXT: store i16 [[TMP15]], i16* @sf, align 2
+ sf++;
+
+// CHECK: [[TMP16:%.*]] = load i32, i32* @slf, align 4
+// CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[TMP16]], i32 -2147483648)
+// CHECK-NEXT: store i32 [[TMP17]], i32* @slf, align 4
+ slf++;
+
+// CHECK: [[TMP18:%.*]] = load i32, i32* @sua, align 4
+// SIGNED-NEXT: [[TMP19:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[TMP18]], i32 65536)
+// SIGNED-NEXT: store i32 [[TMP19]], i32* @sua, align 4
+// UNSIGNED-NEXT: [[RESIZE:%.*]] = trunc i32 [[TMP18]] to i31
+// UNSIGNED-NEXT: [[TMP19:%.*]] = call i31 @llvm.uadd.sat.i31(i31 [[RESIZE]], i31 32768)
+// UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i31 [[TMP19]] to i32
+// UNSIGNED-NEXT: store i32 [[RESIZE1]], i32* @sua, align 4
+ sua++;
+
+// CHECK: [[TMP20:%.*]] = load i16, i16* @susa, align 2
+// SIGNED-NEXT: [[TMP21:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[TMP20]], i16 256)
+// SIGNED-NEXT: store i16 [[TMP21]], i16* @susa, align 2
+// UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i16 [[TMP20]] to i15
+// UNSIGNED-NEXT: [[TMP21:%.*]] = call i15 @llvm.uadd.sat.i15(i15 [[RESIZE2]], i15 128)
+// UNSIGNED-NEXT: [[RESIZE3:%.*]] = zext i15 [[TMP21]] to i16
+// UNSIGNED-NEXT: store i16 [[RESIZE3]], i16* @susa, align 2
+ susa++;
+
+// CHECK: [[TMP22:%.*]] = load i16, i16* @suf, align 2
+// SIGNED-NEXT: [[TMP23:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[TMP22]], i16 -1)
+// SIGNED-NEXT: store i16 [[TMP23]], i16* @suf, align 2
+// UNSIGNED-NEXT: [[RESIZE4:%.*]] = trunc i16 [[TMP22]] to i15
+// UNSIGNED-NEXT: [[TMP23:%.*]] = call i15 @llvm.uadd.sat.i15(i15 [[RESIZE4]], i15 -1)
+// UNSIGNED-NEXT: [[RESIZE5:%.*]] = zext i15 [[TMP23]] to i16
+// UNSIGNED-NEXT: store i16 [[RESIZE5]], i16* @suf, align 2
+ suf++;
+}
+
+// CHECK-LABEL: @Decrement(
+void Decrement() {
+// CHECK: [[TMP0:%.*]] = load i32, i32* @a, align 4
+// CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], -32768
+// CHECK-NEXT: store i32 [[TMP1]], i32* @a, align 4
+ a--;
+
+// CHECK: [[TMP2:%.*]] = load i16, i16* @f, align 2
+// CHECK-NEXT: [[TMP3:%.*]] = add i16 [[TMP2]], -32768
+// CHECK-NEXT: store i16 [[TMP3]], i16* @f, align 2
+ f--;
+
+// CHECK: [[TMP4:%.*]] = load i32, i32* @lf, align 4
+// CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP4]], -2147483648
+// CHECK-NEXT: store i32 [[TMP5]], i32* @lf, align 4
+ lf--;
+
+// CHECK: [[TMP6:%.*]] = load i32, i32* @ua, align 4
+// SIGNED-NEXT: [[TMP7:%.*]] = sub i32 [[TMP6]], 65536
+// UNSIGNED-NEXT: [[TMP7:%.*]] = sub i32 [[TMP6]], 32768
+// CHECK-NEXT: store i32 [[TMP7]], i32* @ua, align 4
+ ua--;
+
+// CHECK: [[TMP8:%.*]] = load i16, i16* @usa, align 2
+// SIGNED-NEXT: [[TMP9:%.*]] = sub i16 [[TMP8]], 256
+// UNSIGNED-NEXT: [[TMP9:%.*]] = sub i16 [[TMP8]], 128
+// CHECK-NEXT: store i16 [[TMP9]], i16* @usa, align 2
+ usa--;
+
+// CHECK: [[TMP10:%.*]] = load i16, i16* @uf, align 2
+// SIGNED-NEXT: [[TMP11:%.*]] = sub i16 [[TMP10]], undef
+// UNSIGNED-NEXT: [[TMP11:%.*]] = sub i16 [[TMP10]], -32768
+// CHECK-NEXT: store i16 [[TMP11]], i16* @uf, align 2
+ uf--;
+
+// CHECK: [[TMP12:%.*]] = load i32, i32* @sa, align 4
+// CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[TMP12]], i32 -32768)
+// CHECK-NEXT: store i32 [[TMP13]], i32* @sa, align 4
+ sa--;
+
+// CHECK: [[TMP14:%.*]] = load i16, i16* @sf, align 2
+// CHECK-NEXT: [[TMP15:%.*]] = call i16 @llvm.sadd.sat.i16(i16 [[TMP14]], i16 -32768)
+// CHECK-NEXT: store i16 [[TMP15]], i16* @sf, align 2
+ sf--;
+
+// CHECK: [[TMP16:%.*]] = load i32, i32* @slf, align 4
+// CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[TMP16]], i32 -2147483648)
+// CHECK-NEXT: store i32 [[TMP17]], i32* @slf, align 4
+ slf--;
+
+// CHECK: [[TMP18:%.*]] = load i32, i32* @sua, align 4
+// SIGNED-NEXT: [[TMP19:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[TMP18]], i32 65536)
+// SIGNED-NEXT: store i32 [[TMP19]], i32* @sua, align 4
+// UNSIGNED-NEXT: [[RESIZE:%.*]] = trunc i32 [[TMP18]] to i31
+// UNSIGNED-NEXT: [[TMP19:%.*]] = call i31 @llvm.usub.sat.i31(i31 [[RESIZE]], i31 32768)
+// UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i31 [[TMP19]] to i32
+// UNSIGNED-NEXT: store i32 [[RESIZE1]], i32* @sua, align 4
+ sua--;
+
+// CHECK: [[TMP20:%.*]] = load i16, i16* @susa, align 2
+// SIGNED-NEXT: [[TMP21:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[TMP20]], i16 256)
+// SIGNED-NEXT: store i16 [[TMP21]], i16* @susa, align 2
+// UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i16 [[TMP20]] to i15
+// UNSIGNED-NEXT: [[TMP21:%.*]] = call i15 @llvm.usub.sat.i15(i15 [[RESIZE2]], i15 128)
+// UNSIGNED-NEXT: [[RESIZE3:%.*]] = zext i15 [[TMP21]] to i16
+// UNSIGNED-NEXT: store i16 [[RESIZE3]], i16* @susa, align 2
+ susa--;
+
+// CHECK: [[TMP22:%.*]] = load i16, i16* @suf, align 2
+// SIGNED-NEXT: [[TMP23:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[TMP22]], i16 -1)
+// SIGNED-NEXT: store i16 [[TMP23]], i16* @suf, align 2
+// UNSIGNED-NEXT: [[RESIZE4:%.*]] = trunc i16 [[TMP22]] to i15
+// UNSIGNED-NEXT: [[TMP23:%.*]] = call i15 @llvm.usub.sat.i15(i15 [[RESIZE4]], i15 -1)
+// UNSIGNED-NEXT: [[RESIZE5:%.*]] = zext i15 [[TMP23]] to i16
+// UNSIGNED-NEXT: store i16 [[RESIZE5]], i16* @suf, align 2
+ suf--;
+}
+
+// CHECK-LABEL: @Minus(
+void Minus() {
+// CHECK: [[TMP0:%.*]] = load i32, i32* @a, align 4
+// CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[TMP0]]
+// CHECK-NEXT: store i32 [[TMP1]], i32* @a, align 4
+ a = -a;
+
+// CHECK: [[TMP2:%.*]] = load i16, i16* @f, align 2
+// CHECK-NEXT: [[TMP3:%.*]] = sub i16 0, [[TMP2]]
+// CHECK-NEXT: store i16 [[TMP3]], i16* @f, align 2
+ f = -f;
+
+// CHECK: [[TMP4:%.*]] = load i16, i16* @usa, align 2
+// CHECK-NEXT: [[TMP5:%.*]] = sub i16 0, [[TMP4]]
+// CHECK-NEXT: store i16 [[TMP5]], i16* @usa, align 2
+ usa = -usa;
+
+// CHECK: [[TMP6:%.*]] = load i16, i16* @uf, align 2
+// CHECK-NEXT: [[TMP7:%.*]] = sub i16 0, [[TMP6]]
+// CHECK-NEXT: store i16 [[TMP7]], i16* @uf, align 2
+ uf = -uf;
+
+// CHECK: [[TMP8:%.*]] = load i32, i32* @sa, align 4
+// CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.ssub.sat.i32(i32 0, i32 [[TMP8]])
+// CHECK-NEXT: store i32 [[TMP9]], i32* @sa, align 4
+ sa = -sa;
+
+// CHECK: [[TMP10:%.*]] = load i16, i16* @sf, align 2
+// CHECK-NEXT: [[TMP11:%.*]] = call i16 @llvm.ssub.sat.i16(i16 0, i16 [[TMP10]])
+// CHECK-NEXT: store i16 [[TMP11]], i16* @sf, align 2
+ sf = -sf;
+
+// CHECK: [[TMP12:%.*]] = load i16, i16* @susa, align 2
+// SIGNED-NEXT: [[TMP13:%.*]] = call i16 @llvm.usub.sat.i16(i16 0, i16 [[TMP12]])
+// SIGNED-NEXT: store i16 [[TMP13]], i16* @susa, align 2
+// UNSIGNED-NEXT: [[RESIZE:%.*]] = trunc i16 [[TMP12]] to i15
+// UNSIGNED-NEXT: [[TMP13:%.*]] = call i15 @llvm.usub.sat.i15(i15 0, i15 [[RESIZE]])
+// UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i15 [[TMP13]] to i16
+// UNSIGNED-NEXT: store i16 [[RESIZE1]], i16* @susa, align 2
+ susa = -susa;
+
+// CHECK: [[TMP14:%.*]] = load i16, i16* @suf, align 2
+// SIGNED-NEXT: [[TMP15:%.*]] = call i16 @llvm.usub.sat.i16(i16 0, i16 [[TMP14]])
+// SIGNED-NEXT: store i16 [[TMP15]], i16* @suf, align 2
+// UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i16 [[TMP14]] to i15
+// UNSIGNED-NEXT: [[TMP15:%.*]] = call i15 @llvm.usub.sat.i15(i15 0, i15 [[RESIZE2]])
+// UNSIGNED-NEXT: [[RESIZE3:%.*]] = zext i15 [[TMP15]] to i16
+// UNSIGNED-NEXT: store i16 [[RESIZE3]], i16* @suf, align 2
+ suf = -suf;
+}
+
+// CHECK-LABEL: @Plus(
+void Plus() {
+// CHECK: [[TMP0:%.*]] = load i32, i32* @a, align 4
+// CHECK-NEXT: store i32 [[TMP0]], i32* @a, align 4
+ a = +a;
+
+// CHECK: [[TMP1:%.*]] = load i16, i16* @uf, align 2
+// CHECK-NEXT: store i16 [[TMP1]], i16* @uf, align 2
+ uf = +uf;
+
+// CHECK: [[TMP2:%.*]] = load i32, i32* @sa, align 4
+// CHECK-NEXT: store i32 [[TMP2]], i32* @sa, align 4
+ sa = +sa;
+}
+
+// CHECK-LABEL: @Not(
+void Not() {
+ int i;
+
+// CHECK: [[TMP0:%.*]] = load i32, i32* @a, align 4
+// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0
+// CHECK-NEXT: [[LNOT:%.*]] = xor i1 [[TOBOOL]], true
+// CHECK-NEXT: [[LNOT_EXT:%.*]] = zext i1 [[LNOT]] to i32
+// CHECK-NEXT: store i32 [[LNOT_EXT]], i32* %i, align 4
+ i = !a;
+
+// CHECK: [[TMP1:%.*]] = load i16, i16* @uf, align 2
+// CHECK-NEXT: [[TOBOOL1:%.*]] = icmp ne i16 [[TMP1]], 0
+// CHECK-NEXT: [[LNOT2:%.*]] = xor i1 [[TOBOOL1]], true
+// CHECK-NEXT: [[LNOT_EXT3:%.*]] = zext i1 [[LNOT2]] to i32
+// CHECK-NEXT: store i32 [[LNOT_EXT3]], i32* %i, align 4
+ i = !uf;
+
+// CHECK: [[TMP2:%.*]] = load i16, i16* @susa, align 2
+// CHECK-NEXT: [[TOBOOL4:%.*]] = icmp ne i16 [[TMP2]], 0
+// CHECK-NEXT: [[LNOT5:%.*]] = xor i1 [[TOBOOL4]], true
+// CHECK-NEXT: [[LNOT_EXT6:%.*]] = zext i1 [[LNOT5]] to i32
+// CHECK-NEXT: store i32 [[LNOT_EXT6]], i32* %i, align 4
+ i = !susa;
+}
More information about the cfe-commits
mailing list