[clang] ac03e3f - [RISCV] Use 'long' in aes64 Zknd/Zkne builtin tests. NFC
Craig Topper via cfe-commits
cfe-commits at lists.llvm.org
Thu Jul 6 01:14:34 PDT 2023
Author: Craig Topper
Date: 2023-07-06T01:14:02-07:00
New Revision: ac03e3f51c271b985efeb0fe6b9782cd6928be33
URL: https://github.com/llvm/llvm-project/commit/ac03e3f51c271b985efeb0fe6b9782cd6928be33
DIFF: https://github.com/llvm/llvm-project/commit/ac03e3f51c271b985efeb0fe6b9782cd6928be33.diff
LOG: [RISCV] Use 'long' in aes64 Zknd/Zkne builtin tests. NFC
This matches the data type of the intrinsics. This case be seen
from the removal of sext and trunc instructions from the IR.
Reviewed By: kito-cheng
Differential Revision: https://reviews.llvm.org/D154572
Added:
Modified:
clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
Removed:
################################################################################
diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
index 1132c98ef595f2..3596c451a611d9 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
@@ -6,32 +6,27 @@
// RV64ZKND-ZKNE-LABEL: @aes64ks1i(
// RV64ZKND-ZKNE-NEXT: entry:
-// RV64ZKND-ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKND-ZKNE-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKND-ZKNE-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
-// RV64ZKND-ZKNE-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64ks1i(i64 [[CONV]], i32 0)
-// RV64ZKND-ZKNE-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
-// RV64ZKND-ZKNE-NEXT: ret i32 [[CONV1]]
+// RV64ZKND-ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKND-ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
+// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
+// RV64ZKND-ZKNE-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64ks1i(i64 [[TMP0]], i32 0)
+// RV64ZKND-ZKNE-NEXT: ret i64 [[TMP1]]
//
-int aes64ks1i(int rs1) {
+long aes64ks1i(long rs1) {
return __builtin_riscv_aes64ks1i_64(rs1, 0);
}
// RV64ZKND-ZKNE-LABEL: @aes64ks2(
// RV64ZKND-ZKNE-NEXT: entry:
-// RV64ZKND-ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKND-ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKND-ZKNE-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKND-ZKNE-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKND-ZKNE-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
-// RV64ZKND-ZKNE-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV64ZKND-ZKNE-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
-// RV64ZKND-ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ks2(i64 [[CONV]], i64 [[CONV1]])
-// RV64ZKND-ZKNE-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
-// RV64ZKND-ZKNE-NEXT: ret i32 [[CONV2]]
+// RV64ZKND-ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKND-ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKND-ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
+// RV64ZKND-ZKNE-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
+// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
+// RV64ZKND-ZKNE-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
+// RV64ZKND-ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ks2(i64 [[TMP0]], i64 [[TMP1]])
+// RV64ZKND-ZKNE-NEXT: ret i64 [[TMP2]]
//
-int aes64ks2(int rs1, int rs2) {
+long aes64ks2(long rs1, long rs2) {
return __builtin_riscv_aes64ks2_64(rs1, rs2);
}
diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
index 94a1da79badb25..379587134dd636 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
@@ -5,52 +5,44 @@
// RV64ZKND-LABEL: @aes64dsm(
// RV64ZKND-NEXT: entry:
-// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKND-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKND-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV64ZKND-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKND-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
-// RV64ZKND-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV64ZKND-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
-// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[CONV]], i64 [[CONV1]])
-// RV64ZKND-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
-// RV64ZKND-NEXT: ret i32 [[CONV2]]
+// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
+// RV64ZKND-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
+// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
+// RV64ZKND-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
+// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[TMP0]], i64 [[TMP1]])
+// RV64ZKND-NEXT: ret i64 [[TMP2]]
//
-int aes64dsm(int rs1, int rs2) {
+long aes64dsm(long rs1, long rs2) {
return __builtin_riscv_aes64dsm_64(rs1, rs2);
}
// RV64ZKND-LABEL: @aes64ds(
// RV64ZKND-NEXT: entry:
-// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKND-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKND-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV64ZKND-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKND-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
-// RV64ZKND-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV64ZKND-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
-// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[CONV]], i64 [[CONV1]])
-// RV64ZKND-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
-// RV64ZKND-NEXT: ret i32 [[CONV2]]
+// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
+// RV64ZKND-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
+// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
+// RV64ZKND-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
+// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[TMP0]], i64 [[TMP1]])
+// RV64ZKND-NEXT: ret i64 [[TMP2]]
//
-int aes64ds(int rs1, int rs2) {
+long aes64ds(long rs1, long rs2) {
return __builtin_riscv_aes64ds_64(rs1, rs2);
}
// RV64ZKND-LABEL: @aes64im(
// RV64ZKND-NEXT: entry:
-// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKND-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKND-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKND-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
-// RV64ZKND-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[CONV]])
-// RV64ZKND-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
-// RV64ZKND-NEXT: ret i32 [[CONV1]]
+// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
+// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
+// RV64ZKND-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[TMP0]])
+// RV64ZKND-NEXT: ret i64 [[TMP1]]
//
-int aes64im(int rs1) {
+long aes64im(long rs1) {
return __builtin_riscv_aes64im_64(rs1);
}
diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
index a05698b1f7bc81..c0cf69b8280db4 100644
--- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
+++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
@@ -5,37 +5,31 @@
// RV64ZKNE-LABEL: @aes64es(
// RV64ZKNE-NEXT: entry:
-// RV64ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNE-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKNE-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV64ZKNE-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNE-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
-// RV64ZKNE-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV64ZKNE-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
-// RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64es(i64 [[CONV]], i64 [[CONV1]])
-// RV64ZKNE-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
-// RV64ZKNE-NEXT: ret i32 [[CONV2]]
+// RV64ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
+// RV64ZKNE-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
+// RV64ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
+// RV64ZKNE-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
+// RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64es(i64 [[TMP0]], i64 [[TMP1]])
+// RV64ZKNE-NEXT: ret i64 [[TMP2]]
//
-int aes64es(int rs1, int rs2) {
+long aes64es(long rs1, long rs2) {
return __builtin_riscv_aes64es_64(rs1, rs2);
}
// RV64ZKNE-LABEL: @aes64esm(
// RV64ZKNE-NEXT: entry:
-// RV64ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNE-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKNE-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV64ZKNE-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNE-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
-// RV64ZKNE-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV64ZKNE-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
-// RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64esm(i64 [[CONV]], i64 [[CONV1]])
-// RV64ZKNE-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
-// RV64ZKNE-NEXT: ret i32 [[CONV2]]
+// RV64ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
+// RV64ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
+// RV64ZKNE-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
+// RV64ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
+// RV64ZKNE-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
+// RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64esm(i64 [[TMP0]], i64 [[TMP1]])
+// RV64ZKNE-NEXT: ret i64 [[TMP2]]
//
-int aes64esm(int rs1, int rs2) {
+long aes64esm(long rs1, long rs2) {
return __builtin_riscv_aes64esm_64(rs1, rs2);
}
More information about the cfe-commits
mailing list