[clang] [llvm] [RISC-V] Make EmitRISCVCpuSupports accept multiple features (PR #104917)
Piyou Chen via cfe-commits
cfe-commits at lists.llvm.org
Tue Aug 20 05:07:34 PDT 2024
https://github.com/BeMg updated https://github.com/llvm/llvm-project/pull/104917
>From c8b31f1e1d1d30cb8523772d3fd15a0358c540d6 Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Tue, 20 Aug 2024 04:37:20 -0700
Subject: [PATCH 1/2] [RISC-V] Make EmitRISCVCpuSupports accept multiple
features
This patch creates an additional EmitRISCVCpuSupports function to handle situations with multiple features. It also modifies the original EmitRISCVCpuSupports function to invoke the new one.
---
clang/lib/CodeGen/CGBuiltin.cpp | 71 ++++++++++++------
clang/lib/CodeGen/CodeGenFunction.h | 1 +
clang/test/CodeGen/builtin-cpu-supports.c | 72 ++++++++++---------
.../llvm/TargetParser/RISCVTargetParser.h | 2 +
4 files changed, 91 insertions(+), 55 deletions(-)
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index f424ddaa175400..39df4f134c9ef3 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -64,6 +64,7 @@
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/TargetParser/AArch64TargetParser.h"
#include "llvm/TargetParser/RISCVISAInfo.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
#include "llvm/TargetParser/X86TargetParser.h"
#include <optional>
#include <sstream>
@@ -14439,33 +14440,57 @@ Value *CodeGenFunction::EmitRISCVCpuSupports(const CallExpr *E) {
if (!getContext().getTargetInfo().validateCpuSupports(FeatureStr))
return Builder.getFalse();
- // Note: We are making an unchecked assumption that the size of the
- // feature array is >= 1. This holds for any version of compiler-rt
- // which defines this interface.
- llvm::ArrayType *ArrayOfInt64Ty = llvm::ArrayType::get(Int64Ty, 1);
+ return EmitRISCVCpuSupports(ArrayRef<StringRef>(FeatureStr));
+}
+
+static Value *loadRISCVFeatureBits(unsigned Index, CGBuilderTy &Builder,
+ CodeGenModule &CGM,
+ llvm::LLVMContext &Context) {
+ llvm::Type *Int32Ty = llvm::Type::getInt32Ty(Context);
+ llvm::Type *Int64Ty = llvm::Type::getInt64Ty(Context);
+ llvm::ArrayType *ArrayOfInt64Ty =
+ llvm::ArrayType::get(Int64Ty, llvm::RISCV::RISCVFeatureBitSize);
llvm::Type *StructTy = llvm::StructType::get(Int32Ty, ArrayOfInt64Ty);
llvm::Constant *RISCVFeaturesBits =
CGM.CreateRuntimeVariable(StructTy, "__riscv_feature_bits");
- auto *GV = cast<llvm::GlobalValue>(RISCVFeaturesBits);
- GV->setDSOLocal(true);
-
- auto LoadFeatureBit = [&](unsigned Index) {
- // Create GEP then load.
- Value *IndexVal = llvm::ConstantInt::get(Int32Ty, Index);
- llvm::Value *GEPIndices[] = {Builder.getInt32(0), Builder.getInt32(1),
- IndexVal};
- Value *Ptr =
- Builder.CreateInBoundsGEP(StructTy, RISCVFeaturesBits, GEPIndices);
- Value *FeaturesBit =
- Builder.CreateAlignedLoad(Int64Ty, Ptr, CharUnits::fromQuantity(8));
- return FeaturesBit;
- };
+ cast<llvm::GlobalValue>(RISCVFeaturesBits)->setDSOLocal(true);
+ Value *IndexVal = llvm::ConstantInt::get(Int32Ty, Index);
+ llvm::Value *GEPIndices[] = {Builder.getInt32(0), Builder.getInt32(1),
+ IndexVal};
+ Value *Ptr =
+ Builder.CreateInBoundsGEP(StructTy, RISCVFeaturesBits, GEPIndices);
+ Value *FeaturesBit =
+ Builder.CreateAlignedLoad(Int64Ty, Ptr, CharUnits::fromQuantity(8));
+ return FeaturesBit;
+}
+
+Value *CodeGenFunction::EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs) {
+ const unsigned RISCVFeatureLength = llvm::RISCV::RISCVFeatureBitSize;
+ SmallVector<uint64_t, 2> RequireBitMasks(RISCVFeatureLength);
+
+ for (auto Feat : FeaturesStrs) {
+ auto [GroupID, BitPos] = RISCVISAInfo::getRISCVFeaturesBitsInfo(Feat);
+
+ // If there isn't BitPos for this feature, skip this version.
+ // It also report the warning to user during compilation.
+ if (BitPos == -1)
+ return Builder.getFalse();
- auto [GroupID, BitPos] = RISCVISAInfo::getRISCVFeaturesBitsInfo(FeatureStr);
- assert(BitPos != -1 && "validation should have rejected this feature");
- Value *MaskV = Builder.getInt64(1ULL << BitPos);
- Value *Bitset = Builder.CreateAnd(LoadFeatureBit(GroupID), MaskV);
- return Builder.CreateICmpEQ(Bitset, MaskV);
+ RequireBitMasks[GroupID] |= (1ULL << BitPos);
+ }
+
+ Value *Result = Builder.getTrue();
+ for (unsigned Idx = 0; Idx < RISCVFeatureLength; Idx++) {
+ if (RequireBitMasks[Idx] == 0)
+ continue;
+
+ Value *Mask = Builder.getInt64(RequireBitMasks[Idx]);
+ Value *Bitset = Builder.CreateAnd(
+ loadRISCVFeatureBits(Idx, Builder, CGM, getLLVMContext()), Mask);
+ Value *CmpV = Builder.CreateICmpEQ(Bitset, Mask);
+ Result = Builder.CreateAnd(Result, CmpV);
+ }
+ return Result;
}
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 57e0b7f91e9bf8..e1b9ada3c1e1fd 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -4704,6 +4704,7 @@ class CodeGenFunction : public CodeGenTypeCache {
ReturnValueSlot ReturnValue);
llvm::Value *EmitRISCVCpuSupports(const CallExpr *E);
+ llvm::Value *EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs);
llvm::Value *EmitRISCVCpuInit();
void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
diff --git a/clang/test/CodeGen/builtin-cpu-supports.c b/clang/test/CodeGen/builtin-cpu-supports.c
index b252484fc3df95..144b79d3150adf 100644
--- a/clang/test/CodeGen/builtin-cpu-supports.c
+++ b/clang/test/CodeGen/builtin-cpu-supports.c
@@ -251,34 +251,38 @@ int test_ppc(int a) {
// CHECK-RV32-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-RV32-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK-RV32-NEXT: call void @__init_riscv_feature_bits(ptr null)
-// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [1 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
// CHECK-RV32-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1
// CHECK-RV32-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1
-// CHECK-RV32-NEXT: br i1 [[TMP2]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
+// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
// CHECK-RV32: if.then:
// CHECK-RV32-NEXT: store i32 3, ptr [[RETVAL]], align 4
// CHECK-RV32-NEXT: br label [[RETURN:%.*]]
// CHECK-RV32: if.else:
-// CHECK-RV32-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [1 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
-// CHECK-RV32-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 4
-// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 4
-// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[IF_THEN1:%.*]], label [[IF_ELSE2:%.*]]
+// CHECK-RV32-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
+// CHECK-RV32-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 4
+// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 4
+// CHECK-RV32-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
+// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[IF_THEN1:%.*]], label [[IF_ELSE2:%.*]]
// CHECK-RV32: if.then1:
// CHECK-RV32-NEXT: store i32 7, ptr [[RETVAL]], align 4
// CHECK-RV32-NEXT: br label [[RETURN]]
// CHECK-RV32: if.else2:
-// CHECK-RV32-NEXT: [[TMP6:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [1 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
-// CHECK-RV32-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], 2097152
-// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 2097152
-// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[IF_THEN3:%.*]], label [[IF_ELSE4:%.*]]
+// CHECK-RV32-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
+// CHECK-RV32-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], 2097152
+// CHECK-RV32-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP9]], 2097152
+// CHECK-RV32-NEXT: [[TMP11:%.*]] = and i1 true, [[TMP10]]
+// CHECK-RV32-NEXT: br i1 [[TMP11]], label [[IF_THEN3:%.*]], label [[IF_ELSE4:%.*]]
// CHECK-RV32: if.then3:
// CHECK-RV32-NEXT: store i32 11, ptr [[RETVAL]], align 4
// CHECK-RV32-NEXT: br label [[RETURN]]
// CHECK-RV32: if.else4:
-// CHECK-RV32-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [1 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8
-// CHECK-RV32-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], 8
-// CHECK-RV32-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP10]], 8
-// CHECK-RV32-NEXT: br i1 [[TMP11]], label [[IF_THEN5:%.*]], label [[IF_END:%.*]]
+// CHECK-RV32-NEXT: [[TMP12:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8
+// CHECK-RV32-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], 8
+// CHECK-RV32-NEXT: [[TMP14:%.*]] = icmp eq i64 [[TMP13]], 8
+// CHECK-RV32-NEXT: [[TMP15:%.*]] = and i1 true, [[TMP14]]
+// CHECK-RV32-NEXT: br i1 [[TMP15]], label [[IF_THEN5:%.*]], label [[IF_END:%.*]]
// CHECK-RV32: if.then5:
// CHECK-RV32-NEXT: store i32 13, ptr [[RETVAL]], align 4
// CHECK-RV32-NEXT: br label [[RETURN]]
@@ -292,8 +296,8 @@ int test_ppc(int a) {
// CHECK-RV32-NEXT: store i32 0, ptr [[RETVAL]], align 4
// CHECK-RV32-NEXT: br label [[RETURN]]
// CHECK-RV32: return:
-// CHECK-RV32-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4
-// CHECK-RV32-NEXT: ret i32 [[TMP12]]
+// CHECK-RV32-NEXT: [[TMP16:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK-RV32-NEXT: ret i32 [[TMP16]]
//
// CHECK-RV64-LABEL: define dso_local signext i32 @test_riscv(
// CHECK-RV64-SAME: i32 noundef signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -302,34 +306,38 @@ int test_ppc(int a) {
// CHECK-RV64-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-RV64-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK-RV64-NEXT: call void @__init_riscv_feature_bits(ptr null)
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [1 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
// CHECK-RV64-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1
// CHECK-RV64-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1
-// CHECK-RV64-NEXT: br i1 [[TMP2]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
+// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
// CHECK-RV64: if.then:
// CHECK-RV64-NEXT: store i32 3, ptr [[RETVAL]], align 4
// CHECK-RV64-NEXT: br label [[RETURN:%.*]]
// CHECK-RV64: if.else:
-// CHECK-RV64-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [1 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
-// CHECK-RV64-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 4
-// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 4
-// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[IF_THEN1:%.*]], label [[IF_ELSE2:%.*]]
+// CHECK-RV64-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
+// CHECK-RV64-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 4
+// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 4
+// CHECK-RV64-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
+// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[IF_THEN1:%.*]], label [[IF_ELSE2:%.*]]
// CHECK-RV64: if.then1:
// CHECK-RV64-NEXT: store i32 7, ptr [[RETVAL]], align 4
// CHECK-RV64-NEXT: br label [[RETURN]]
// CHECK-RV64: if.else2:
-// CHECK-RV64-NEXT: [[TMP6:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [1 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
-// CHECK-RV64-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], 2097152
-// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 2097152
-// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[IF_THEN3:%.*]], label [[IF_ELSE4:%.*]]
+// CHECK-RV64-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
+// CHECK-RV64-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], 2097152
+// CHECK-RV64-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP9]], 2097152
+// CHECK-RV64-NEXT: [[TMP11:%.*]] = and i1 true, [[TMP10]]
+// CHECK-RV64-NEXT: br i1 [[TMP11]], label [[IF_THEN3:%.*]], label [[IF_ELSE4:%.*]]
// CHECK-RV64: if.then3:
// CHECK-RV64-NEXT: store i32 11, ptr [[RETVAL]], align 4
// CHECK-RV64-NEXT: br label [[RETURN]]
// CHECK-RV64: if.else4:
-// CHECK-RV64-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [1 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8
-// CHECK-RV64-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], 8
-// CHECK-RV64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP10]], 8
-// CHECK-RV64-NEXT: br i1 [[TMP11]], label [[IF_THEN5:%.*]], label [[IF_END:%.*]]
+// CHECK-RV64-NEXT: [[TMP12:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8
+// CHECK-RV64-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], 8
+// CHECK-RV64-NEXT: [[TMP14:%.*]] = icmp eq i64 [[TMP13]], 8
+// CHECK-RV64-NEXT: [[TMP15:%.*]] = and i1 true, [[TMP14]]
+// CHECK-RV64-NEXT: br i1 [[TMP15]], label [[IF_THEN5:%.*]], label [[IF_END:%.*]]
// CHECK-RV64: if.then5:
// CHECK-RV64-NEXT: store i32 13, ptr [[RETVAL]], align 4
// CHECK-RV64-NEXT: br label [[RETURN]]
@@ -343,8 +351,8 @@ int test_ppc(int a) {
// CHECK-RV64-NEXT: store i32 0, ptr [[RETVAL]], align 4
// CHECK-RV64-NEXT: br label [[RETURN]]
// CHECK-RV64: return:
-// CHECK-RV64-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4
-// CHECK-RV64-NEXT: ret i32 [[TMP12]]
+// CHECK-RV64-NEXT: [[TMP16:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK-RV64-NEXT: ret i32 [[TMP16]]
//
int test_riscv(int a) {
__builtin_cpu_init();
diff --git a/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/llvm/include/llvm/TargetParser/RISCVTargetParser.h
index c75778952e0f51..0a1be7d7e3e507 100644
--- a/llvm/include/llvm/TargetParser/RISCVTargetParser.h
+++ b/llvm/include/llvm/TargetParser/RISCVTargetParser.h
@@ -32,6 +32,8 @@ struct RISCVExtensionBitmask {
};
} // namespace RISCVExtensionBitmaskTable
+static constexpr unsigned RISCVFeatureBitSize = 2;
+
// We use 64 bits as the known part in the scalable vector types.
static constexpr unsigned RVVBitsPerBlock = 64;
>From 9d4a9bbb41f336d7ecb1be75fdc3ea585e72491e Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Tue, 20 Aug 2024 05:01:07 -0700
Subject: [PATCH 2/2] Avoid the unnecessary "true" expression
---
clang/lib/CodeGen/CGBuiltin.cpp | 7 ++-
clang/test/CodeGen/builtin-cpu-supports.c | 68 ++++++++++-------------
2 files changed, 35 insertions(+), 40 deletions(-)
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 39df4f134c9ef3..93c2f449b751df 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -14479,7 +14479,7 @@ Value *CodeGenFunction::EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs) {
RequireBitMasks[GroupID] |= (1ULL << BitPos);
}
- Value *Result = Builder.getTrue();
+ Value *Result = nullptr;
for (unsigned Idx = 0; Idx < RISCVFeatureLength; Idx++) {
if (RequireBitMasks[Idx] == 0)
continue;
@@ -14488,8 +14488,11 @@ Value *CodeGenFunction::EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs) {
Value *Bitset = Builder.CreateAnd(
loadRISCVFeatureBits(Idx, Builder, CGM, getLLVMContext()), Mask);
Value *CmpV = Builder.CreateICmpEQ(Bitset, Mask);
- Result = Builder.CreateAnd(Result, CmpV);
+ Result = (!Result) ? CmpV : Builder.CreateAnd(Result, CmpV);
}
+
+ assert(Result && "Should has value here.");
+
return Result;
}
diff --git a/clang/test/CodeGen/builtin-cpu-supports.c b/clang/test/CodeGen/builtin-cpu-supports.c
index 144b79d3150adf..72fc9a433dd6e8 100644
--- a/clang/test/CodeGen/builtin-cpu-supports.c
+++ b/clang/test/CodeGen/builtin-cpu-supports.c
@@ -254,35 +254,31 @@ int test_ppc(int a) {
// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
// CHECK-RV32-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1
// CHECK-RV32-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1
-// CHECK-RV32-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
-// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+// CHECK-RV32-NEXT: br i1 [[TMP2]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
// CHECK-RV32: if.then:
// CHECK-RV32-NEXT: store i32 3, ptr [[RETVAL]], align 4
// CHECK-RV32-NEXT: br label [[RETURN:%.*]]
// CHECK-RV32: if.else:
-// CHECK-RV32-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
-// CHECK-RV32-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 4
-// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 4
-// CHECK-RV32-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
-// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[IF_THEN1:%.*]], label [[IF_ELSE2:%.*]]
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
+// CHECK-RV32-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 4
+// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 4
+// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[IF_THEN1:%.*]], label [[IF_ELSE2:%.*]]
// CHECK-RV32: if.then1:
// CHECK-RV32-NEXT: store i32 7, ptr [[RETVAL]], align 4
// CHECK-RV32-NEXT: br label [[RETURN]]
// CHECK-RV32: if.else2:
-// CHECK-RV32-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
-// CHECK-RV32-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], 2097152
-// CHECK-RV32-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP9]], 2097152
-// CHECK-RV32-NEXT: [[TMP11:%.*]] = and i1 true, [[TMP10]]
-// CHECK-RV32-NEXT: br i1 [[TMP11]], label [[IF_THEN3:%.*]], label [[IF_ELSE4:%.*]]
+// CHECK-RV32-NEXT: [[TMP6:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
+// CHECK-RV32-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], 2097152
+// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 2097152
+// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[IF_THEN3:%.*]], label [[IF_ELSE4:%.*]]
// CHECK-RV32: if.then3:
// CHECK-RV32-NEXT: store i32 11, ptr [[RETVAL]], align 4
// CHECK-RV32-NEXT: br label [[RETURN]]
// CHECK-RV32: if.else4:
-// CHECK-RV32-NEXT: [[TMP12:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8
-// CHECK-RV32-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], 8
-// CHECK-RV32-NEXT: [[TMP14:%.*]] = icmp eq i64 [[TMP13]], 8
-// CHECK-RV32-NEXT: [[TMP15:%.*]] = and i1 true, [[TMP14]]
-// CHECK-RV32-NEXT: br i1 [[TMP15]], label [[IF_THEN5:%.*]], label [[IF_END:%.*]]
+// CHECK-RV32-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8
+// CHECK-RV32-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], 8
+// CHECK-RV32-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP10]], 8
+// CHECK-RV32-NEXT: br i1 [[TMP11]], label [[IF_THEN5:%.*]], label [[IF_END:%.*]]
// CHECK-RV32: if.then5:
// CHECK-RV32-NEXT: store i32 13, ptr [[RETVAL]], align 4
// CHECK-RV32-NEXT: br label [[RETURN]]
@@ -296,8 +292,8 @@ int test_ppc(int a) {
// CHECK-RV32-NEXT: store i32 0, ptr [[RETVAL]], align 4
// CHECK-RV32-NEXT: br label [[RETURN]]
// CHECK-RV32: return:
-// CHECK-RV32-NEXT: [[TMP16:%.*]] = load i32, ptr [[RETVAL]], align 4
-// CHECK-RV32-NEXT: ret i32 [[TMP16]]
+// CHECK-RV32-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK-RV32-NEXT: ret i32 [[TMP12]]
//
// CHECK-RV64-LABEL: define dso_local signext i32 @test_riscv(
// CHECK-RV64-SAME: i32 noundef signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -309,35 +305,31 @@ int test_ppc(int a) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
// CHECK-RV64-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1
// CHECK-RV64-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1
-// CHECK-RV64-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
-// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+// CHECK-RV64-NEXT: br i1 [[TMP2]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
// CHECK-RV64: if.then:
// CHECK-RV64-NEXT: store i32 3, ptr [[RETVAL]], align 4
// CHECK-RV64-NEXT: br label [[RETURN:%.*]]
// CHECK-RV64: if.else:
-// CHECK-RV64-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
-// CHECK-RV64-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 4
-// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 4
-// CHECK-RV64-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
-// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[IF_THEN1:%.*]], label [[IF_ELSE2:%.*]]
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
+// CHECK-RV64-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 4
+// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 4
+// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[IF_THEN1:%.*]], label [[IF_ELSE2:%.*]]
// CHECK-RV64: if.then1:
// CHECK-RV64-NEXT: store i32 7, ptr [[RETVAL]], align 4
// CHECK-RV64-NEXT: br label [[RETURN]]
// CHECK-RV64: if.else2:
-// CHECK-RV64-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
-// CHECK-RV64-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], 2097152
-// CHECK-RV64-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP9]], 2097152
-// CHECK-RV64-NEXT: [[TMP11:%.*]] = and i1 true, [[TMP10]]
-// CHECK-RV64-NEXT: br i1 [[TMP11]], label [[IF_THEN3:%.*]], label [[IF_ELSE4:%.*]]
+// CHECK-RV64-NEXT: [[TMP6:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8
+// CHECK-RV64-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], 2097152
+// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 2097152
+// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[IF_THEN3:%.*]], label [[IF_ELSE4:%.*]]
// CHECK-RV64: if.then3:
// CHECK-RV64-NEXT: store i32 11, ptr [[RETVAL]], align 4
// CHECK-RV64-NEXT: br label [[RETURN]]
// CHECK-RV64: if.else4:
-// CHECK-RV64-NEXT: [[TMP12:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8
-// CHECK-RV64-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], 8
-// CHECK-RV64-NEXT: [[TMP14:%.*]] = icmp eq i64 [[TMP13]], 8
-// CHECK-RV64-NEXT: [[TMP15:%.*]] = and i1 true, [[TMP14]]
-// CHECK-RV64-NEXT: br i1 [[TMP15]], label [[IF_THEN5:%.*]], label [[IF_END:%.*]]
+// CHECK-RV64-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8
+// CHECK-RV64-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], 8
+// CHECK-RV64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP10]], 8
+// CHECK-RV64-NEXT: br i1 [[TMP11]], label [[IF_THEN5:%.*]], label [[IF_END:%.*]]
// CHECK-RV64: if.then5:
// CHECK-RV64-NEXT: store i32 13, ptr [[RETVAL]], align 4
// CHECK-RV64-NEXT: br label [[RETURN]]
@@ -351,8 +343,8 @@ int test_ppc(int a) {
// CHECK-RV64-NEXT: store i32 0, ptr [[RETVAL]], align 4
// CHECK-RV64-NEXT: br label [[RETURN]]
// CHECK-RV64: return:
-// CHECK-RV64-NEXT: [[TMP16:%.*]] = load i32, ptr [[RETVAL]], align 4
-// CHECK-RV64-NEXT: ret i32 [[TMP16]]
+// CHECK-RV64-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK-RV64-NEXT: ret i32 [[TMP12]]
//
int test_riscv(int a) {
__builtin_cpu_init();
More information about the cfe-commits
mailing list