[clang] [Clang][RISCV] Use `__builtin_popcount` in `__riscv_cpop_32/64` (PR #76286)

via cfe-commits cfe-commits at lists.llvm.org
Sat Dec 23 06:00:42 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-risc-v

Author: Yingwei Zheng (dtcxzyw)

<details>
<summary>Changes</summary>

This patch replaces `__builtin_riscv_cpop_32/64` with `__builtin_popcount(ll)` because `__builtin_riscv_cpop_32/64` is not implemented in clang.
Thank @<!-- -->Liaoshihua for reporting this!

It is an alternative to #<!-- -->76256.

---
Full diff: https://github.com/llvm/llvm-project/pull/76286.diff


2 Files Affected:

- (modified) clang/lib/Headers/riscv_bitmanip.h (+2-2) 
- (modified) clang/test/CodeGen/RISCV/rvb-intrinsics/zbb.c (+30-4) 


``````````diff
diff --git a/clang/lib/Headers/riscv_bitmanip.h b/clang/lib/Headers/riscv_bitmanip.h
index 1a81cc8618c975..044cbaa037e43a 100644
--- a/clang/lib/Headers/riscv_bitmanip.h
+++ b/clang/lib/Headers/riscv_bitmanip.h
@@ -34,7 +34,7 @@ __riscv_ctz_32(uint32_t __x) {
 
 static __inline__ unsigned __attribute__((__always_inline__, __nodebug__))
 __riscv_cpop_32(uint32_t __x) {
-  return __builtin_riscv_cpop_32(__x);
+  return __builtin_popcount(__x);
 }
 
 #if __riscv_xlen == 64
@@ -55,7 +55,7 @@ __riscv_ctz_64(uint64_t __x) {
 
 static __inline__ unsigned __attribute__((__always_inline__, __nodebug__))
 __riscv_cpop_64(uint64_t __x) {
-  return __builtin_riscv_cpop_64(__x);
+  return __builtin_popcountll(__x);
 }
 #endif
 #endif // defined(__riscv_zbb)
diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/zbb.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/zbb.c
index 5edbc578e82e9a..fbc51b4bf144ae 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/zbb.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/zbb.c
@@ -51,8 +51,8 @@ unsigned int clz_32(uint32_t a) {
 // RV64ZBB-LABEL: @clz_64(
 // RV64ZBB-NEXT:  entry:
 // RV64ZBB-NEXT:    [[TMP0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[A:%.*]], i1 false)
-// RV64ZBB-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP0]] to i32
-// RV64ZBB-NEXT:    ret i32 [[CAST]]
+// RV64ZBB-NEXT:    [[CAST_I:%.*]] = trunc i64 [[TMP0]] to i32
+// RV64ZBB-NEXT:    ret i32 [[CAST_I]]
 //
 unsigned int clz_64(uint64_t a) {
   return __riscv_clz_64(a);
@@ -77,10 +77,36 @@ unsigned int ctz_32(uint32_t a) {
 // RV64ZBB-LABEL: @ctz_64(
 // RV64ZBB-NEXT:  entry:
 // RV64ZBB-NEXT:    [[TMP0:%.*]] = call i64 @llvm.cttz.i64(i64 [[A:%.*]], i1 false)
-// RV64ZBB-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP0]] to i32
-// RV64ZBB-NEXT:    ret i32 [[CAST]]
+// RV64ZBB-NEXT:    [[CAST_I:%.*]] = trunc i64 [[TMP0]] to i32
+// RV64ZBB-NEXT:    ret i32 [[CAST_I]]
 //
 unsigned int ctz_64(uint64_t a) {
   return __riscv_ctz_64(a);
 }
 #endif
+
+// RV32ZBB-LABEL: @cpop_32(
+// RV32ZBB-NEXT:  entry:
+// RV32ZBB-NEXT:    [[TMP0:%.*]] = call i32 @llvm.ctpop.i32(i32 [[A:%.*]])
+// RV32ZBB-NEXT:    ret i32 [[TMP0]]
+//
+// RV64ZBB-LABEL: @cpop_32(
+// RV64ZBB-NEXT:  entry:
+// RV64ZBB-NEXT:    [[TMP0:%.*]] = call i32 @llvm.ctpop.i32(i32 [[A:%.*]])
+// RV64ZBB-NEXT:    ret i32 [[TMP0]]
+//
+unsigned int cpop_32(uint32_t a) {
+  return __riscv_cpop_32(a);
+}
+
+#if __riscv_xlen == 64
+// RV64ZBB-LABEL: @cpop_64(
+// RV64ZBB-NEXT:  entry:
+// RV64ZBB-NEXT:    [[TMP0:%.*]] = call i64 @llvm.ctpop.i64(i64 [[A:%.*]])
+// RV64ZBB-NEXT:    [[CAST_I:%.*]] = trunc i64 [[TMP0]] to i32
+// RV64ZBB-NEXT:    ret i32 [[CAST_I]]
+//
+unsigned int cpop_64(uint64_t a) {
+  return __riscv_cpop_64(a);
+}
+#endif

``````````

</details>


https://github.com/llvm/llvm-project/pull/76286


More information about the cfe-commits mailing list