[clang] 7fb0f4a - [RISCV] Run mem2reg on the scalar C builtin tests to remove allocas and simplify checks. NFC

Craig Topper via cfe-commits cfe-commits at lists.llvm.org
Wed Jul 19 10:30:51 PDT 2023


Author: Craig Topper
Date: 2023-07-19T10:30:42-07:00
New Revision: 7fb0f4a6eba2351da8ef993d2b488d7dc5827b44

URL: https://github.com/llvm/llvm-project/commit/7fb0f4a6eba2351da8ef993d2b488d7dc5827b44
DIFF: https://github.com/llvm/llvm-project/commit/7fb0f4a6eba2351da8ef993d2b488d7dc5827b44.diff

LOG: [RISCV] Run mem2reg on the scalar C builtin tests to remove allocas and simplify checks. NFC

As requested on D155647.

Added: 
    

Modified: 
    clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-xtheadbb.c
    clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c
    clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkc.c
    clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c
    clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-xtheadbb.c
    clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c
    clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkc.c
    clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c
    clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
    clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
    clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
    clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
    clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
    clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
    clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
    clang/test/CodeGen/RISCV/rvk-intrinsics/zksed.c
    clang/test/CodeGen/RISCV/rvk-intrinsics/zksh.c

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-xtheadbb.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-xtheadbb.c
index a16b1436fef9c7..bab550fc518209 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-xtheadbb.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-xtheadbb.c
@@ -1,14 +1,12 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv32 -target-feature +xtheadbb -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV32XTHEADBB
 
 // RV32XTHEADBB-LABEL: @clz_32(
 // RV32XTHEADBB-NEXT:  entry:
-// RV32XTHEADBB-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
-// RV32XTHEADBB-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV32XTHEADBB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV32XTHEADBB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 false)
-// RV32XTHEADBB-NEXT:    ret i32 [[TMP1]]
+// RV32XTHEADBB-NEXT:    [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[A:%.*]], i1 false)
+// RV32XTHEADBB-NEXT:    ret i32 [[TMP0]]
 //
 unsigned int clz_32(unsigned int a) {
   return __builtin_riscv_clz_32(a);
@@ -16,12 +14,9 @@ unsigned int clz_32(unsigned int a) {
 
 // RV32XTHEADBB-LABEL: @clo_32(
 // RV32XTHEADBB-NEXT:  entry:
-// RV32XTHEADBB-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
-// RV32XTHEADBB-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV32XTHEADBB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV32XTHEADBB-NEXT:    [[NOT:%.*]] = xor i32 [[TMP0]], -1
-// RV32XTHEADBB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[NOT]], i1 false)
-// RV32XTHEADBB-NEXT:    ret i32 [[TMP1]]
+// RV32XTHEADBB-NEXT:    [[NOT:%.*]] = xor i32 [[A:%.*]], -1
+// RV32XTHEADBB-NEXT:    [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[NOT]], i1 false)
+// RV32XTHEADBB-NEXT:    ret i32 [[TMP0]]
 //
 unsigned int clo_32(unsigned int a) {
   return __builtin_riscv_clz_32(~a);

diff  --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c
index 2e08182797c882..78e3c7da4348eb 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c
@@ -1,16 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zbkb -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZBKB
 
 #include <stdint.h>
 
 // RV32ZBKB-LABEL: @brev8(
 // RV32ZBKB-NEXT:  entry:
-// RV32ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[TMP0]])
-// RV32ZBKB-NEXT:    ret i32 [[TMP1]]
+// RV32ZBKB-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[RS1:%.*]])
+// RV32ZBKB-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t brev8(uint32_t rs1)
 {
@@ -19,11 +17,8 @@ uint32_t brev8(uint32_t rs1)
 
 // RV32ZBKB-LABEL: @zip(
 // RV32ZBKB-NEXT:  entry:
-// RV32ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.zip.i32(i32 [[TMP0]])
-// RV32ZBKB-NEXT:    ret i32 [[TMP1]]
+// RV32ZBKB-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.zip.i32(i32 [[RS1:%.*]])
+// RV32ZBKB-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t zip(uint32_t rs1)
 {
@@ -32,11 +27,8 @@ uint32_t zip(uint32_t rs1)
 
 // RV32ZBKB-LABEL: @unzip(
 // RV32ZBKB-NEXT:  entry:
-// RV32ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.unzip.i32(i32 [[TMP0]])
-// RV32ZBKB-NEXT:    ret i32 [[TMP1]]
+// RV32ZBKB-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.unzip.i32(i32 [[RS1:%.*]])
+// RV32ZBKB-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t unzip(uint32_t rs1)
 {

diff  --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkc.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkc.c
index 4535578fad83f4..e44aa76083ad21 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkc.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkc.c
@@ -1,19 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zbkc -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZBKC
 
 #include <stdint.h>
 
 // RV32ZBKC-LABEL: @clmul_32(
 // RV32ZBKC-NEXT:  entry:
-// RV32ZBKC-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKC-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKC-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV32ZBKC-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-// RV32ZBKC-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV32ZBKC-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV32ZBKC-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKC-NEXT:    ret i32 [[TMP2]]
+// RV32ZBKC-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[A:%.*]], i32 [[B:%.*]])
+// RV32ZBKC-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t clmul_32(uint32_t a, uint32_t b) {
   return __builtin_riscv_clmul_32(a, b);
@@ -21,14 +16,8 @@ uint32_t clmul_32(uint32_t a, uint32_t b) {
 
 // RV32ZBKC-LABEL: @clmulh_32(
 // RV32ZBKC-NEXT:  entry:
-// RV32ZBKC-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKC-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKC-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV32ZBKC-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-// RV32ZBKC-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV32ZBKC-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV32ZBKC-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKC-NEXT:    ret i32 [[TMP2]]
+// RV32ZBKC-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[A:%.*]], i32 [[B:%.*]])
+// RV32ZBKC-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t clmulh_32(uint32_t a, uint32_t b) {
   return __builtin_riscv_clmulh_32(a, b);

diff  --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c
index 06c24d1a1e092f..fd521622da8323 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c
@@ -1,19 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zbkx -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZBKX
 
 #include <stdint.h>
 
 // RV32ZBKX-LABEL: @xperm8(
 // RV32ZBKX-NEXT:  entry:
-// RV32ZBKX-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKX-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKX-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZBKX-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZBKX-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKX-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZBKX-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.xperm8.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKX-NEXT:    ret i32 [[TMP2]]
+// RV32ZBKX-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.xperm8.i32(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZBKX-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t xperm8(uint32_t rs1, uint32_t rs2)
 {
@@ -22,14 +17,8 @@ uint32_t xperm8(uint32_t rs1, uint32_t rs2)
 
 // RV32ZBKX-LABEL: @xperm4(
 // RV32ZBKX-NEXT:  entry:
-// RV32ZBKX-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKX-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKX-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZBKX-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZBKX-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKX-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZBKX-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.xperm4.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKX-NEXT:    ret i32 [[TMP2]]
+// RV32ZBKX-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.xperm4.i32(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZBKX-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t xperm4(uint32_t rs1, uint32_t rs2)
 {

diff  --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-xtheadbb.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-xtheadbb.c
index da74ca92137c11..9e2af356b491ab 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-xtheadbb.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-xtheadbb.c
@@ -1,14 +1,12 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadbb -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64XTHEADBB
 
 // RV64XTHEADBB-LABEL: @clz_32(
 // RV64XTHEADBB-NEXT:  entry:
-// RV64XTHEADBB-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
-// RV64XTHEADBB-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV64XTHEADBB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV64XTHEADBB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 false)
-// RV64XTHEADBB-NEXT:    ret i32 [[TMP1]]
+// RV64XTHEADBB-NEXT:    [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[A:%.*]], i1 false)
+// RV64XTHEADBB-NEXT:    ret i32 [[TMP0]]
 //
 unsigned int clz_32(unsigned int a) {
   return __builtin_riscv_clz_32(a);
@@ -16,12 +14,9 @@ unsigned int clz_32(unsigned int a) {
 
 // RV64XTHEADBB-LABEL: @clo_32(
 // RV64XTHEADBB-NEXT:  entry:
-// RV64XTHEADBB-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
-// RV64XTHEADBB-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV64XTHEADBB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV64XTHEADBB-NEXT:    [[NOT:%.*]] = xor i32 [[TMP0]], -1
-// RV64XTHEADBB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[NOT]], i1 false)
-// RV64XTHEADBB-NEXT:    ret i32 [[TMP1]]
+// RV64XTHEADBB-NEXT:    [[NOT:%.*]] = xor i32 [[A:%.*]], -1
+// RV64XTHEADBB-NEXT:    [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[NOT]], i1 false)
+// RV64XTHEADBB-NEXT:    ret i32 [[TMP0]]
 //
 unsigned int clo_32(unsigned int a) {
   return __builtin_riscv_clz_32(~a);
@@ -29,11 +24,8 @@ unsigned int clo_32(unsigned int a) {
 
 // RV64XTHEADBB-LABEL: @clz_64(
 // RV64XTHEADBB-NEXT:  entry:
-// RV64XTHEADBB-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
-// RV64XTHEADBB-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
-// RV64XTHEADBB-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
-// RV64XTHEADBB-NEXT:    [[TMP1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[TMP0]], i1 false)
-// RV64XTHEADBB-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// RV64XTHEADBB-NEXT:    [[TMP0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[A:%.*]], i1 false)
+// RV64XTHEADBB-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP0]] to i32
 // RV64XTHEADBB-NEXT:    ret i32 [[CAST]]
 //
 unsigned int clz_64(unsigned long a) {
@@ -42,12 +34,9 @@ unsigned int clz_64(unsigned long a) {
 
 // RV64XTHEADBB-LABEL: @clo_64(
 // RV64XTHEADBB-NEXT:  entry:
-// RV64XTHEADBB-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
-// RV64XTHEADBB-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
-// RV64XTHEADBB-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
-// RV64XTHEADBB-NEXT:    [[NOT:%.*]] = xor i64 [[TMP0]], -1
-// RV64XTHEADBB-NEXT:    [[TMP1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[NOT]], i1 false)
-// RV64XTHEADBB-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// RV64XTHEADBB-NEXT:    [[NOT:%.*]] = xor i64 [[A:%.*]], -1
+// RV64XTHEADBB-NEXT:    [[TMP0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[NOT]], i1 false)
+// RV64XTHEADBB-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP0]] to i32
 // RV64XTHEADBB-NEXT:    ret i32 [[CAST]]
 //
 unsigned int clo_64(unsigned long a) {

diff  --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c
index f978a6ab62e786..db411427586307 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c
@@ -1,16 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zbkb -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZBKB
 
 #include <stdint.h>
 
 // RV64ZBKB-LABEL: @brev8_32(
 // RV64ZBKB-NEXT:  entry:
-// RV64ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZBKB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[TMP0]])
-// RV64ZBKB-NEXT:    ret i32 [[TMP1]]
+// RV64ZBKB-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[RS1:%.*]])
+// RV64ZBKB-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t brev8_32(uint32_t rs1)
 {
@@ -19,11 +17,8 @@ uint32_t brev8_32(uint32_t rs1)
 
 // RV64ZBKB-LABEL: @brev8_64(
 // RV64ZBKB-NEXT:  entry:
-// RV64ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKB-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZBKB-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZBKB-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.brev8.i64(i64 [[TMP0]])
-// RV64ZBKB-NEXT:    ret i64 [[TMP1]]
+// RV64ZBKB-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.brev8.i64(i64 [[RS1:%.*]])
+// RV64ZBKB-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t brev8_64(uint64_t rs1)
 {

diff  --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkc.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkc.c
index 5f8ae2138da814..d133d434ccd7e3 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkc.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkc.c
@@ -1,19 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zbkc -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZBKC
 
 #include <stdint.h>
 
 // RV64ZBKC-LABEL: @clmul_64(
 // RV64ZBKC-NEXT:  entry:
-// RV64ZBKC-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKC-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKC-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
-// RV64ZBKC-NEXT:    store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
-// RV64ZBKC-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
-// RV64ZBKC-NEXT:    [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
-// RV64ZBKC-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKC-NEXT:    ret i64 [[TMP2]]
+// RV64ZBKC-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+// RV64ZBKC-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t clmul_64(uint64_t a, uint64_t b) {
   return __builtin_riscv_clmul_64(a, b);
@@ -21,14 +16,8 @@ uint64_t clmul_64(uint64_t a, uint64_t b) {
 
 // RV64ZBKC-LABEL: @clmulh_64(
 // RV64ZBKC-NEXT:  entry:
-// RV64ZBKC-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKC-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKC-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
-// RV64ZBKC-NEXT:    store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
-// RV64ZBKC-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
-// RV64ZBKC-NEXT:    [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
-// RV64ZBKC-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKC-NEXT:    ret i64 [[TMP2]]
+// RV64ZBKC-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+// RV64ZBKC-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t clmulh_64(uint64_t a, uint64_t b) {
   return __builtin_riscv_clmulh_64(a, b);
@@ -36,14 +25,8 @@ uint64_t clmulh_64(uint64_t a, uint64_t b) {
 
 // RV64ZBKC-LABEL: @clmul_32(
 // RV64ZBKC-NEXT:  entry:
-// RV64ZBKC-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBKC-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBKC-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV64ZBKC-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-// RV64ZBKC-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV64ZBKC-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV64ZBKC-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV64ZBKC-NEXT:    ret i32 [[TMP2]]
+// RV64ZBKC-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[A:%.*]], i32 [[B:%.*]])
+// RV64ZBKC-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t clmul_32(uint32_t a, uint32_t b) {
   return __builtin_riscv_clmul_32(a, b);

diff  --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c
index 43e69aa6bcdcc9..93c1f7fcb761e7 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c
@@ -1,19 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zbkx -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZBKX
 
 #include <stdint.h>
 
 // RV64ZBKX-LABEL: @xperm8(
 // RV64ZBKX-NEXT:  entry:
-// RV64ZBKX-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKX-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKX-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZBKX-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZBKX-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZBKX-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZBKX-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.xperm8.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKX-NEXT:    ret i64 [[TMP2]]
+// RV64ZBKX-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.xperm8.i64(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZBKX-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t xperm8(uint64_t rs1, uint64_t rs2)
 {
@@ -22,14 +17,8 @@ uint64_t xperm8(uint64_t rs1, uint64_t rs2)
 
 // RV64ZBKX-LABEL: @xperm4(
 // RV64ZBKX-NEXT:  entry:
-// RV64ZBKX-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKX-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKX-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZBKX-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZBKX-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZBKX-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZBKX-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.xperm4.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKX-NEXT:    ret i64 [[TMP2]]
+// RV64ZBKX-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.xperm4.i64(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZBKX-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t xperm4(uint64_t rs1, uint64_t rs2)
 {

diff  --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
index f9472d4c36fb3b..b2da68e8604a9e 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
@@ -1,19 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zknd -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZKND
 
 #include <stdint.h>
 
 // RV32ZKND-LABEL: @aes32dsi(
 // RV32ZKND-NEXT:  entry:
-// RV32ZKND-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKND-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKND-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKND-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKND-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKND-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKND-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32dsi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
-// RV32ZKND-NEXT:    ret i32 [[TMP2]]
+// RV32ZKND-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.aes32dsi(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 3)
+// RV32ZKND-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t aes32dsi(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_aes32dsi(rs1, rs2, 3);
@@ -21,14 +16,8 @@ uint32_t aes32dsi(uint32_t rs1, uint32_t rs2) {
 
 // RV32ZKND-LABEL: @aes32dsmi(
 // RV32ZKND-NEXT:  entry:
-// RV32ZKND-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKND-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKND-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKND-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKND-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKND-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKND-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32dsmi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
-// RV32ZKND-NEXT:    ret i32 [[TMP2]]
+// RV32ZKND-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.aes32dsmi(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 3)
+// RV32ZKND-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t aes32dsmi(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_aes32dsmi(rs1, rs2, 3);

diff  --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
index 7243c3f3e2cb1e..8f906e094c724c 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
@@ -1,19 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zkne -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZKNE
 
 #include <stdint.h>
 
 // RV32ZKNE-LABEL: @aes32esi(
 // RV32ZKNE-NEXT:  entry:
-// RV32ZKNE-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNE-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNE-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNE-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNE-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNE-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNE-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32esi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
-// RV32ZKNE-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNE-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.aes32esi(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 3)
+// RV32ZKNE-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t aes32esi(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_aes32esi(rs1, rs2, 3);
@@ -21,14 +16,8 @@ uint32_t aes32esi(uint32_t rs1, uint32_t rs2) {
 
 // RV32ZKNE-LABEL: @aes32esmi(
 // RV32ZKNE-NEXT:  entry:
-// RV32ZKNE-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNE-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNE-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNE-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNE-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNE-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNE-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32esmi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
-// RV32ZKNE-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNE-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.aes32esmi(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 3)
+// RV32ZKNE-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t aes32esmi(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_aes32esmi(rs1, rs2, 3);

diff  --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
index 3cc429c175d3dd..9bb320b093f3cb 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
@@ -1,16 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zknh -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZKNH
 
 #include <stdint.h>
 
 // RV32ZKNH-LABEL: @sha256sig0(
 // RV32ZKNH-NEXT:  entry:
-// RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP0]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[RS1:%.*]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha256sig0(uint32_t rs1) {
   return __builtin_riscv_sha256sig0(rs1);
@@ -18,11 +16,8 @@ uint32_t sha256sig0(uint32_t rs1) {
 
 // RV32ZKNH-LABEL: @sha256sig1(
 // RV32ZKNH-NEXT:  entry:
-// RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[TMP0]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[RS1:%.*]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha256sig1(uint32_t rs1) {
   return __builtin_riscv_sha256sig1(rs1);
@@ -30,11 +25,8 @@ uint32_t sha256sig1(uint32_t rs1) {
 
 // RV32ZKNH-LABEL: @sha256sum0(
 // RV32ZKNH-NEXT:  entry:
-// RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP0]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[RS1:%.*]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha256sum0(uint32_t rs1) {
   return __builtin_riscv_sha256sum0(rs1);
@@ -42,11 +34,8 @@ uint32_t sha256sum0(uint32_t rs1) {
 
 // RV32ZKNH-LABEL: @sha256sum1(
 // RV32ZKNH-NEXT:  entry:
-// RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[TMP0]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[RS1:%.*]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha256sum1(uint32_t rs1) {
   return __builtin_riscv_sha256sum1(rs1);
@@ -54,14 +43,8 @@ uint32_t sha256sum1(uint32_t rs1) {
 
 // RV32ZKNH-LABEL: @sha512sig0h(
 // RV32ZKNH-NEXT:  entry:
-// RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig0h(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sig0h(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha512sig0h(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_sha512sig0h(rs1, rs2);
@@ -69,14 +52,8 @@ uint32_t sha512sig0h(uint32_t rs1, uint32_t rs2) {
 
 // RV32ZKNH-LABEL: @sha512sig0l(
 // RV32ZKNH-NEXT:  entry:
-// RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig0l(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sig0l(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha512sig0l(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_sha512sig0l(rs1, rs2);
@@ -84,14 +61,8 @@ uint32_t sha512sig0l(uint32_t rs1, uint32_t rs2) {
 
 // RV32ZKNH-LABEL: @sha512sig1h(
 // RV32ZKNH-NEXT:  entry:
-// RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig1h(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sig1h(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha512sig1h(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_sha512sig1h(rs1, rs2);
@@ -99,14 +70,8 @@ uint32_t sha512sig1h(uint32_t rs1, uint32_t rs2) {
 
 // RV32ZKNH-LABEL: @sha512sig1l(
 // RV32ZKNH-NEXT:  entry:
-// RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig1l(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sig1l(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha512sig1l(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_sha512sig1l(rs1, rs2);
@@ -114,14 +79,8 @@ uint32_t sha512sig1l(uint32_t rs1, uint32_t rs2) {
 
 // RV32ZKNH-LABEL: @sha512sum0r(
 // RV32ZKNH-NEXT:  entry:
-// RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sum0r(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sum0r(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha512sum0r(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_sha512sum0r(rs1, rs2);
@@ -129,14 +88,8 @@ uint32_t sha512sum0r(uint32_t rs1, uint32_t rs2) {
 
 // RV32ZKNH-LABEL: @sha512sum1r(
 // RV32ZKNH-NEXT:  entry:
-// RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sum1r(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sum1r(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha512sum1r(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_sha512sum1r(rs1, rs2);

diff  --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
index e51c85c578d1ab..a008ce36f1b7ba 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
@@ -1,18 +1,17 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zknd -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKND-ZKNE
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zkne -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKND-ZKNE
 
 #include <stdint.h>
 
 // RV64ZKND-ZKNE-LABEL: @aes64ks1i(
 // RV64ZKND-ZKNE-NEXT:  entry:
-// RV64ZKND-ZKNE-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-ZKNE-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.aes64ks1i(i64 [[TMP0]], i32 0)
-// RV64ZKND-ZKNE-NEXT:    ret i64 [[TMP1]]
+// RV64ZKND-ZKNE-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.aes64ks1i(i64 [[RS1:%.*]], i32 0)
+// RV64ZKND-ZKNE-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t aes64ks1i(uint64_t rs1) {
   return __builtin_riscv_aes64ks1i(rs1, 0);
@@ -20,14 +19,8 @@ uint64_t aes64ks1i(uint64_t rs1) {
 
 // RV64ZKND-ZKNE-LABEL: @aes64ks2(
 // RV64ZKND-ZKNE-NEXT:  entry:
-// RV64ZKND-ZKNE-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-ZKNE-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-ZKNE-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ks2(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKND-ZKNE-NEXT:    ret i64 [[TMP2]]
+// RV64ZKND-ZKNE-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.aes64ks2(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZKND-ZKNE-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t aes64ks2(uint64_t rs1, uint64_t rs2) {
   return __builtin_riscv_aes64ks2(rs1, rs2);

diff  --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
index 33375f0d483cff..a42555fe9ac6a8 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
@@ -1,19 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zknd -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKND
 
 #include <stdint.h>
 
 // RV64ZKND-LABEL: @aes64dsm(
 // RV64ZKND-NEXT:  entry:
-// RV64ZKND-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKND-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKND-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKND-NEXT:    ret i64 [[TMP2]]
+// RV64ZKND-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZKND-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t aes64dsm(uint64_t rs1, uint64_t rs2) {
   return __builtin_riscv_aes64dsm(rs1, rs2);
@@ -22,14 +17,8 @@ uint64_t aes64dsm(uint64_t rs1, uint64_t rs2) {
 
 // RV64ZKND-LABEL: @aes64ds(
 // RV64ZKND-NEXT:  entry:
-// RV64ZKND-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKND-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKND-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKND-NEXT:    ret i64 [[TMP2]]
+// RV64ZKND-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZKND-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t aes64ds(uint64_t rs1, uint64_t rs2) {
   return __builtin_riscv_aes64ds(rs1, rs2);
@@ -38,11 +27,8 @@ uint64_t aes64ds(uint64_t rs1, uint64_t rs2) {
 
 // RV64ZKND-LABEL: @aes64im(
 // RV64ZKND-NEXT:  entry:
-// RV64ZKND-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[TMP0]])
-// RV64ZKND-NEXT:    ret i64 [[TMP1]]
+// RV64ZKND-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[RS1:%.*]])
+// RV64ZKND-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t aes64im(uint64_t rs1) {
   return __builtin_riscv_aes64im(rs1);

diff  --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
index bd0c301339ad3c..49f9de21f24dc5 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
@@ -1,19 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zkne -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKNE
 
 #include <stdint.h>
 
 // RV64ZKNE-LABEL: @aes64es(
 // RV64ZKNE-NEXT:  entry:
-// RV64ZKNE-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNE-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNE-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNE-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKNE-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNE-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKNE-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.aes64es(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKNE-NEXT:    ret i64 [[TMP2]]
+// RV64ZKNE-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.aes64es(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZKNE-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t aes64es(uint64_t rs1, uint64_t rs2) {
   return __builtin_riscv_aes64es(rs1, rs2);
@@ -22,14 +17,8 @@ uint64_t aes64es(uint64_t rs1, uint64_t rs2) {
 
 // RV64ZKNE-LABEL: @aes64esm(
 // RV64ZKNE-NEXT:  entry:
-// RV64ZKNE-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNE-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNE-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNE-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKNE-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNE-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKNE-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.aes64esm(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKNE-NEXT:    ret i64 [[TMP2]]
+// RV64ZKNE-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.aes64esm(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZKNE-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t aes64esm(uint64_t rs1, uint64_t rs2) {
   return __builtin_riscv_aes64esm(rs1, rs2);

diff  --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
index fc54e627de3cae..83326a70591a3a 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
@@ -1,16 +1,14 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zknh -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKNH
 
 #include <stdint.h>
 
 // RV64ZKNH-LABEL: @sha512sig0(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[RS1:%.*]])
+// RV64ZKNH-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t sha512sig0(uint64_t rs1) {
   return __builtin_riscv_sha512sig0(rs1);
@@ -19,11 +17,8 @@ uint64_t sha512sig0(uint64_t rs1) {
 
 // RV64ZKNH-LABEL: @sha512sig1(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig1(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.sha512sig1(i64 [[RS1:%.*]])
+// RV64ZKNH-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t sha512sig1(uint64_t rs1) {
   return __builtin_riscv_sha512sig1(rs1);
@@ -32,11 +27,8 @@ uint64_t sha512sig1(uint64_t rs1) {
 
 // RV64ZKNH-LABEL: @sha512sum0(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[RS1:%.*]])
+// RV64ZKNH-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t sha512sum0(uint64_t rs1) {
   return __builtin_riscv_sha512sum0(rs1);
@@ -45,11 +37,8 @@ uint64_t sha512sum0(uint64_t rs1) {
 
 // RV64ZKNH-LABEL: @sha512sum1(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum1(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.sha512sum1(i64 [[RS1:%.*]])
+// RV64ZKNH-NEXT:    ret i64 [[TMP0]]
 //
 uint64_t sha512sum1(uint64_t rs1) {
   return __builtin_riscv_sha512sum1(rs1);
@@ -58,11 +47,8 @@ uint64_t sha512sum1(uint64_t rs1) {
 
 // RV64ZKNH-LABEL: @sha256sig0(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[RS1:%.*]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha256sig0(uint32_t rs1) {
   return __builtin_riscv_sha256sig0(rs1);
@@ -70,11 +56,8 @@ uint32_t sha256sig0(uint32_t rs1) {
 
 // RV64ZKNH-LABEL: @sha256sig1(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[RS1:%.*]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha256sig1(uint32_t rs1) {
   return __builtin_riscv_sha256sig1(rs1);
@@ -83,11 +66,8 @@ uint32_t sha256sig1(uint32_t rs1) {
 
 // RV64ZKNH-LABEL: @sha256sum0(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[RS1:%.*]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha256sum0(uint32_t rs1) {
   return __builtin_riscv_sha256sum0(rs1);
@@ -95,11 +75,8 @@ uint32_t sha256sum0(uint32_t rs1) {
 
 // RV64ZKNH-LABEL: @sha256sum1(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[RS1:%.*]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sha256sum1(uint32_t rs1) {
   return __builtin_riscv_sha256sum1(rs1);

diff  --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/zksed.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/zksed.c
index 9407003ead9817..40651f5cc4cedf 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/zksed.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/zksed.c
@@ -1,32 +1,22 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zksed -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZKSED
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zksed -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKSED
 
 #include <stdint.h>
 
 // RV32ZKSED-LABEL: @sm4ks(
 // RV32ZKSED-NEXT:  entry:
-// RV32ZKSED-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSED-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSED-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKSED-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSED-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ks(i32 [[TMP0]], i32 [[TMP1]], i32 0)
-// RV32ZKSED-NEXT:    ret i32 [[TMP2]]
+// RV32ZKSED-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sm4ks(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 0)
+// RV32ZKSED-NEXT:    ret i32 [[TMP0]]
 //
 // RV64ZKSED-LABEL: @sm4ks(
 // RV64ZKSED-NEXT:  entry:
-// RV64ZKSED-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSED-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSED-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKSED-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV64ZKSED-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKSED-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV64ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ks(i32 [[TMP0]], i32 [[TMP1]], i32 0)
-// RV64ZKSED-NEXT:    ret i32 [[TMP2]]
+// RV64ZKSED-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sm4ks(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 0)
+// RV64ZKSED-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sm4ks(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_sm4ks(rs1, rs2, 0);
@@ -34,25 +24,13 @@ uint32_t sm4ks(uint32_t rs1, uint32_t rs2) {
 
 // RV32ZKSED-LABEL: @sm4ed(
 // RV32ZKSED-NEXT:  entry:
-// RV32ZKSED-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSED-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSED-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKSED-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSED-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ed(i32 [[TMP0]], i32 [[TMP1]], i32 0)
-// RV32ZKSED-NEXT:    ret i32 [[TMP2]]
+// RV32ZKSED-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sm4ed(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 0)
+// RV32ZKSED-NEXT:    ret i32 [[TMP0]]
 //
 // RV64ZKSED-LABEL: @sm4ed(
 // RV64ZKSED-NEXT:  entry:
-// RV64ZKSED-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSED-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSED-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKSED-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV64ZKSED-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKSED-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV64ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ed(i32 [[TMP0]], i32 [[TMP1]], i32 0)
-// RV64ZKSED-NEXT:    ret i32 [[TMP2]]
+// RV64ZKSED-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sm4ed(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 0)
+// RV64ZKSED-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sm4ed(uint32_t rs1, uint32_t rs2) {
   return __builtin_riscv_sm4ed(rs1, rs2, 0);

diff  --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/zksh.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/zksh.c
index 53335996657403..3df0f5dca0ba6c 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/zksh.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/zksh.c
@@ -1,26 +1,22 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zksh -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZKSH
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zksh -emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKSH
 
 #include <stdint.h>
 
 // RV32ZKSH-LABEL: @sm3p0(
 // RV32ZKSH-NEXT:  entry:
-// RV32ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[TMP0]])
-// RV32ZKSH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKSH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[RS1:%.*]])
+// RV32ZKSH-NEXT:    ret i32 [[TMP0]]
 //
 // RV64ZKSH-LABEL: @sm3p0(
 // RV64ZKSH-NEXT:  entry:
-// RV64ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[TMP0]])
-// RV64ZKSH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKSH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[RS1:%.*]])
+// RV64ZKSH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sm3p0(uint32_t rs1) {
   return __builtin_riscv_sm3p0(rs1);
@@ -29,19 +25,13 @@ uint32_t sm3p0(uint32_t rs1) {
 
 // RV32ZKSH-LABEL: @sm3p1(
 // RV32ZKSH-NEXT:  entry:
-// RV32ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[TMP0]])
-// RV32ZKSH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKSH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[RS1:%.*]])
+// RV32ZKSH-NEXT:    ret i32 [[TMP0]]
 //
 // RV64ZKSH-LABEL: @sm3p1(
 // RV64ZKSH-NEXT:  entry:
-// RV64ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[TMP0]])
-// RV64ZKSH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKSH-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[RS1:%.*]])
+// RV64ZKSH-NEXT:    ret i32 [[TMP0]]
 //
 uint32_t sm3p1(uint32_t rs1) {
   return __builtin_riscv_sm3p1(rs1);


        


More information about the cfe-commits mailing list