[clang] [RISCV] Run mem2reg to simplify Zbc tests (PR #70169)

Wang Pengcheng via cfe-commits cfe-commits at lists.llvm.org
Tue Oct 24 23:36:48 PDT 2023


https://github.com/wangpc-pp created https://github.com/llvm/llvm-project/pull/70169

None

>From c5500404b5885e7038b0360d7d8bfbb317d6a1b5 Mon Sep 17 00:00:00 2001
From: wangpc <wangpengcheng.pp at bytedance.com>
Date: Wed, 25 Oct 2023 14:36:09 +0800
Subject: [PATCH] [RISCV] Run mem2reg to simplify Zbc tests

---
 clang/test/CodeGen/RISCV/rvb-intrinsics/zbc.c | 72 +++++--------------
 1 file changed, 16 insertions(+), 56 deletions(-)

diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/zbc.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/zbc.c
index aa5bebe38dd6b2d..ae9153eff155e19 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/zbc.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/zbc.c
@@ -1,7 +1,9 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zbc -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZBC
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zbc -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZBC
 
 #include <stdint.h>
@@ -9,14 +11,8 @@
 #if __riscv_xlen == 64
 // RV64ZBC-LABEL: @clmul_64(
 // RV64ZBC-NEXT:  entry:
-// RV64ZBC-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBC-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBC-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
-// RV64ZBC-NEXT:    store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
-// RV64ZBC-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
-// RV64ZBC-NEXT:    [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
-// RV64ZBC-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBC-NEXT:    ret i64 [[TMP2]]
+// RV64ZBC-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+// RV64ZBC-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t clmul_64(uint64_t a, uint64_t b) {
   return __builtin_riscv_clmul_64(a, b);
@@ -24,14 +20,8 @@ uint64_t clmul_64(uint64_t a, uint64_t b) {
 
 // RV64ZBC-LABEL: @clmulh_64(
 // RV64ZBC-NEXT:  entry:
-// RV64ZBC-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBC-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBC-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
-// RV64ZBC-NEXT:    store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
-// RV64ZBC-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
-// RV64ZBC-NEXT:    [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
-// RV64ZBC-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBC-NEXT:    ret i64 [[TMP2]]
+// RV64ZBC-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+// RV64ZBC-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t clmulh_64(uint64_t a, uint64_t b) {
   return __builtin_riscv_clmulh_64(a, b);
@@ -39,14 +29,8 @@ uint64_t clmulh_64(uint64_t a, uint64_t b) {
 
 // RV64ZBC-LABEL: @clmulr_64(
 // RV64ZBC-NEXT:  entry:
-// RV64ZBC-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBC-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBC-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
-// RV64ZBC-NEXT:    store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
-// RV64ZBC-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
-// RV64ZBC-NEXT:    [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
-// RV64ZBC-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.clmulr.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBC-NEXT:    ret i64 [[TMP2]]
+// RV64ZBC-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.clmulr.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+// RV64ZBC-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t clmulr_64(uint64_t a, uint64_t b) {
   return __builtin_riscv_clmulr_64(a, b);
@@ -55,25 +39,13 @@ uint64_t clmulr_64(uint64_t a, uint64_t b) {
 
 // RV32ZBC-LABEL: @clmul_32(
 // RV32ZBC-NEXT:  entry:
-// RV32ZBC-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBC-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBC-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV32ZBC-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-// RV32ZBC-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV32ZBC-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV32ZBC-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBC-NEXT:    ret i32 [[TMP2]]
+// RV32ZBC-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[A:%.*]], i32 [[B:%.*]])
+// RV32ZBC-NEXT:    ret i32 [[TMP0]]
 //
 // RV64ZBC-LABEL: @clmul_32(
 // RV64ZBC-NEXT:  entry:
-// RV64ZBC-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBC-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBC-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV64ZBC-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-// RV64ZBC-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV64ZBC-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV64ZBC-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV64ZBC-NEXT:    ret i32 [[TMP2]]
+// RV64ZBC-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[A:%.*]], i32 [[B:%.*]])
+// RV64ZBC-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t clmul_32(uint32_t a, uint32_t b) {
   return __builtin_riscv_clmul_32(a, b);
@@ -82,14 +54,8 @@ uint32_t clmul_32(uint32_t a, uint32_t b) {
 #if __riscv_xlen == 32
 // RV32ZBC-LABEL: @clmulh_32(
 // RV32ZBC-NEXT:  entry:
-// RV32ZBC-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBC-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBC-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV32ZBC-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-// RV32ZBC-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV32ZBC-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV32ZBC-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBC-NEXT:    ret i32 [[TMP2]]
+// RV32ZBC-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[A:%.*]], i32 [[B:%.*]])
+// RV32ZBC-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t clmulh_32(uint32_t a, uint32_t b) {
   return __builtin_riscv_clmulh_32(a, b);
@@ -97,14 +63,8 @@ uint32_t clmulh_32(uint32_t a, uint32_t b) {
 
 // RV32ZBC-LABEL: @clmulr_32(
 // RV32ZBC-NEXT:  entry:
-// RV32ZBC-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBC-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBC-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV32ZBC-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-// RV32ZBC-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV32ZBC-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV32ZBC-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.clmulr.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBC-NEXT:    ret i32 [[TMP2]]
+// RV32ZBC-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.clmulr.i32(i32 [[A:%.*]], i32 [[B:%.*]])
+// RV32ZBC-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t clmulr_32(uint32_t a, uint32_t b) {
   return __builtin_riscv_clmulr_32(a, b);



More information about the cfe-commits mailing list