[llvm] 62a7bb0 - [RISCV][GISel] Resolve CHECK prefix conflict and add a bunch of FIXMEs to bitmanip tests. NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 8 15:25:48 PST 2024
Author: Craig Topper
Date: 2024-11-08T15:25:18-08:00
New Revision: 62a7bb09e3646780b7bceb7cef4eba257e3a9818
URL: https://github.com/llvm/llvm-project/commit/62a7bb09e3646780b7bceb7cef4eba257e3a9818
DIFF: https://github.com/llvm/llvm-project/commit/62a7bb09e3646780b7bceb7cef4eba257e3a9818.diff
LOG: [RISCV][GISel] Resolve CHECK prefix conflict and add a bunch of FIXMEs to bitmanip tests. NFC
Added:
Modified:
llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll
llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
index 02f81e136af797..87c579da697ce4 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
@@ -2,9 +2,9 @@
; RUN: llc -mtriple=riscv32 -global-isel -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=CHECK,RV32I
; RUN: llc -mtriple=riscv32 -global-isel -mattr=+zbb -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBB-ZBKB
+; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBB-ZBKB,RV32ZBB
; RUN: llc -mtriple=riscv32 -global-isel -mattr=+zbkb -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBB-ZBKB
+; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBB-ZBKB,RV32ZBKB
define i32 @andn_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: andn_i32:
@@ -334,8 +334,8 @@ define i8 @srli_i8(i8 %a) nounwind {
ret i8 %1
}
-; We could use sext.b+srai, but slli+srai offers more opportunities for
-; comppressed instructions.
+; FIXME: We should use slli+srai with Zbb for better compression.
+; FIXME: We should combine back to back srai.
define i8 @srai_i8(i8 %a) nounwind {
; RV32I-LABEL: srai_i8:
; RV32I: # %bb.0:
@@ -343,12 +343,24 @@ define i8 @srai_i8(i8 %a) nounwind {
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: srai a0, a0, 5
; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: srai_i8:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: sext.b a0, a0
+; RV32ZBB-NEXT: srai a0, a0, 5
+; RV32ZBB-NEXT: ret
+;
+; RV32ZBKB-LABEL: srai_i8:
+; RV32ZBKB: # %bb.0:
+; RV32ZBKB-NEXT: slli a0, a0, 24
+; RV32ZBKB-NEXT: srai a0, a0, 24
+; RV32ZBKB-NEXT: srai a0, a0, 5
+; RV32ZBKB-NEXT: ret
%1 = ashr i8 %a, 5
ret i8 %1
}
-; We could use zext.h+srli, but slli+srli offers more opportunities for
-; comppressed instructions.
+; FIXME: We should use slli+srli.
define i16 @srli_i16(i16 %a) nounwind {
; RV32I-LABEL: srli_i16:
; RV32I: # %bb.0:
@@ -367,8 +379,8 @@ define i16 @srli_i16(i16 %a) nounwind {
ret i16 %1
}
-; We could use sext.h+srai, but slli+srai offers more opportunities for
-; comppressed instructions.
+; FIXME: We should use slli+srai with Zbb/Zbkb for better compression.
+; FIXME: We should combine back to back sraiw.
define i16 @srai_i16(i16 %a) nounwind {
; RV32I-LABEL: srai_i16:
; RV32I: # %bb.0:
@@ -376,6 +388,19 @@ define i16 @srai_i16(i16 %a) nounwind {
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: srai a0, a0, 9
; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: srai_i16:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: sext.h a0, a0
+; RV32ZBB-NEXT: srai a0, a0, 9
+; RV32ZBB-NEXT: ret
+;
+; RV32ZBKB-LABEL: srai_i16:
+; RV32ZBKB: # %bb.0:
+; RV32ZBKB-NEXT: slli a0, a0, 16
+; RV32ZBKB-NEXT: srai a0, a0, 16
+; RV32ZBKB-NEXT: srai a0, a0, 9
+; RV32ZBKB-NEXT: ret
%1 = ashr i16 %a, 9
ret i16 %1
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll
index 8990c4dd3f26d5..0b6b2b2776a2a4 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll
@@ -616,6 +616,7 @@ define i32 @sextb_i32(i32 %a) nounwind {
ret i32 %shr
}
+; FIXME: Combine back to back srai.
define i64 @sextb_i64(i64 %a) nounwind {
; RV32I-LABEL: sextb_i64:
; RV32I: # %bb.0:
@@ -650,6 +651,7 @@ define i32 @sexth_i32(i32 %a) nounwind {
ret i32 %shr
}
+; FIXME: Combine back to back srai.
define i64 @sexth_i64(i64 %a) nounwind {
; RV32I-LABEL: sexth_i64:
; RV32I: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
index d9b093448cb46e..80e43c94aab0e6 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
@@ -106,6 +106,7 @@ define i64 @pack_i64_3(ptr %0, ptr %1) {
ret i64 %8
}
+; FIXME: Use packh.
define i32 @packh_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: packh_i32:
; CHECK: # %bb.0:
@@ -143,6 +144,7 @@ define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
ret i32 %or
}
+; FIMXE: Use packh
define i64 @packh_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: packh_i64:
; CHECK: # %bb.0:
@@ -161,6 +163,7 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
ret i64 %or
}
+; FIXME The andi+srli for RV32ZBKB should fold to 0.
define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: packh_i64_2:
; RV32I: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
index d9b7f16131c352..7ce65771571f0c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
@@ -2,10 +2,11 @@
; RUN: llc -mtriple=riscv64 -global-isel -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=CHECK,RV64I
; RUN: llc -mtriple=riscv64 -global-isel -mattr=+zbb -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBKB
+; RUN: | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBKB,RV64ZBB
; RUN: llc -mtriple=riscv64 -global-isel -mattr=+zbkb -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBKB
+; RUN: | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBKB,RV64ZBKB
+; FIXME: sext.w is unneeded.
define signext i32 @andn_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: andn_i32:
; RV64I: # %bb.0:
@@ -40,6 +41,7 @@ define i64 @andn_i64(i64 %a, i64 %b) nounwind {
ret i64 %and
}
+; FIXME: sext.w is unneeded.
define signext i32 @orn_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: orn_i32:
; RV64I: # %bb.0:
@@ -74,6 +76,7 @@ define i64 @orn_i64(i64 %a, i64 %b) nounwind {
ret i64 %or
}
+; FIXME: sext.w is unneeded.
define signext i32 @xnor_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: xnor_i32:
; RV64I: # %bb.0:
@@ -424,6 +427,7 @@ define i64 @rori_i64_fshr(i64 %a) nounwind {
ret i64 %1
}
+; FIXME: We should use srli instead of srliw for better compression.
define i8 @srli_i8(i8 %a) nounwind {
; CHECK-LABEL: srli_i8:
; CHECK: # %bb.0:
@@ -434,8 +438,8 @@ define i8 @srli_i8(i8 %a) nounwind {
ret i8 %1
}
-; We could use sext.b+srai, but slli+srai offers more opportunities for
-; comppressed instructions.
+; FIXME: We should use slli+srai with Zbb for better compression.
+; FIXME: We should combine back to back sraiw.
define i8 @srai_i8(i8 %a) nounwind {
; RV64I-LABEL: srai_i8:
; RV64I: # %bb.0:
@@ -443,12 +447,24 @@ define i8 @srai_i8(i8 %a) nounwind {
; RV64I-NEXT: sraiw a0, a0, 24
; RV64I-NEXT: sraiw a0, a0, 5
; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: srai_i8:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: sext.b a0, a0
+; RV64ZBB-NEXT: sraiw a0, a0, 5
+; RV64ZBB-NEXT: ret
+;
+; RV64ZBKB-LABEL: srai_i8:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: slli a0, a0, 24
+; RV64ZBKB-NEXT: sraiw a0, a0, 24
+; RV64ZBKB-NEXT: sraiw a0, a0, 5
+; RV64ZBKB-NEXT: ret
%1 = ashr i8 %a, 5
ret i8 %1
}
-; We could use zext.h+srli, but slli+srli offers more opportunities for
-; comppressed instructions.
+; FIXME: We should use slli+srli.
define i16 @srli_i16(i16 %a) nounwind {
; RV64I-LABEL: srli_i16:
; RV64I: # %bb.0:
@@ -457,12 +473,26 @@ define i16 @srli_i16(i16 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 6
; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: srli_i16:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: zext.h a0, a0
+; RV64ZBB-NEXT: srliw a0, a0, 6
+; RV64ZBB-NEXT: ret
+;
+; RV64ZBKB-LABEL: srli_i16:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: lui a1, 16
+; RV64ZBKB-NEXT: addi a1, a1, -1
+; RV64ZBKB-NEXT: and a0, a0, a1
+; RV64ZBKB-NEXT: srliw a0, a0, 6
+; RV64ZBKB-NEXT: ret
%1 = lshr i16 %a, 6
ret i16 %1
}
-; We could use sext.h+srai, but slli+srai offers more opportunities for
-; comppressed instructions.
+; FIXME: We should use slli+srai with Zbb/Zbkb for better compression.
+; FIXME: We should combine back to back sraiw.
define i16 @srai_i16(i16 %a) nounwind {
; RV64I-LABEL: srai_i16:
; RV64I: # %bb.0:
@@ -470,6 +500,19 @@ define i16 @srai_i16(i16 %a) nounwind {
; RV64I-NEXT: sraiw a0, a0, 16
; RV64I-NEXT: sraiw a0, a0, 9
; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: srai_i16:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: sext.h a0, a0
+; RV64ZBB-NEXT: sraiw a0, a0, 9
+; RV64ZBB-NEXT: ret
+;
+; RV64ZBKB-LABEL: srai_i16:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: slli a0, a0, 16
+; RV64ZBKB-NEXT: sraiw a0, a0, 16
+; RV64ZBKB-NEXT: sraiw a0, a0, 9
+; RV64ZBKB-NEXT: ret
%1 = ashr i16 %a, 9
ret i16 %1
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
index 835b4e32ae3206..89b30f733cc33a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
@@ -6,6 +6,7 @@
declare i32 @llvm.ctlz.i32(i32, i1)
+; FIXME: We don't need the shift pair before the beqz for RV64I.
define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-LABEL: ctlz_i32:
; RV64I: # %bb.0:
@@ -126,6 +127,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
ret i32 %2
}
+; FIXME: We don't need the shift pair before the beqz for RV64I.
define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-LABEL: log2_ceil_i32:
; RV64I: # %bb.0:
@@ -264,6 +266,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
ret i32 %4
}
+; FIXME: We don't need the shift pair before the beqz for RV64I.
define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-LABEL: ctlz_lshr_i32:
; RV64I: # %bb.0:
@@ -995,6 +998,8 @@ define i64 @max_i64(i64 %a, i64 %b) nounwind {
ret i64 %cond
}
+; FIXME: We don't need the shift pairs. The inputs are sign extended, we can
+; compare them directly.
define signext i32 @minu_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: minu_i32:
; RV64I: # %bb.0:
@@ -1041,6 +1046,8 @@ define i64 @minu_i64(i64 %a, i64 %b) nounwind {
ret i64 %cond
}
+; FIXME: We don't need the shift pairs. The inputs are sign extended, we can
+; compare them directly.
define signext i32 @maxu_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: maxu_i32:
; RV64I: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
index 6b57b179240d70..b449b7d1beaaec 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
@@ -4,6 +4,7 @@
; RUN: llc -mtriple=riscv64 -global-isel -mattr=+zbkb -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64ZBKB
+; FIXME: Use packw
define signext i32 @pack_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: pack_i32:
; RV64I: # %bb.0:
@@ -30,6 +31,7 @@ define signext i32 @pack_i32(i32 signext %a, i32 signext %b) nounwind {
ret i32 %or
}
+; FIXME: Use packw
define signext i32 @pack_i32_2(i16 zeroext %a, i16 zeroext %b) nounwind {
; RV64I-LABEL: pack_i32_2:
; RV64I: # %bb.0:
@@ -52,6 +54,7 @@ define signext i32 @pack_i32_2(i16 zeroext %a, i16 zeroext %b) nounwind {
}
; Test case where we don't have a sign_extend_inreg after the or.
+; FIXME: Use packw
define signext i32 @pack_i32_3(i16 zeroext %0, i16 zeroext %1, i32 signext %2) {
; RV64I-LABEL: pack_i32_3:
; RV64I: # %bb.0:
@@ -93,6 +96,7 @@ define i64 @pack_i64(i64 %a, i64 %b) nounwind {
ret i64 %or
}
+; FIXME: The slli+srli isn't needed with pack.
define i64 @pack_i64_2(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: pack_i64_2:
; RV64I: # %bb.0:
@@ -141,6 +145,7 @@ define i64 @pack_i64_3(ptr %0, ptr %1) {
ret i64 %8
}
+; FIXME: Use packh
define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: packh_i32:
; RV64I: # %bb.0:
@@ -168,6 +173,7 @@ define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
ret i32 %or
}
+; FIXME: Use packh
define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
; RV64I-LABEL: packh_i32_2:
; RV64I: # %bb.0:
@@ -191,6 +197,7 @@ define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
ret i32 %or
}
+; FIXME: Use packh
define i64 @packh_i64(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: packh_i64:
; RV64I: # %bb.0:
@@ -238,6 +245,7 @@ define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
ret i64 %or
}
+; FIXME: Use packh
define zeroext i16 @packh_i16(i8 zeroext %a, i8 zeroext %b) nounwind {
; RV64I-LABEL: packh_i16:
; RV64I: # %bb.0:
@@ -261,6 +269,7 @@ define zeroext i16 @packh_i16(i8 zeroext %a, i8 zeroext %b) nounwind {
ret i16 %or
}
+; FIXME: Use packh
define zeroext i16 @packh_i16_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2) {
; RV64I-LABEL: packh_i16_2:
; RV64I: # %bb.0:
@@ -289,6 +298,7 @@ define zeroext i16 @packh_i16_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2) {
ret i16 %8
}
+; FIXME: Use packh
define void @packh_i16_3(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, ptr %p) {
; RV64I-LABEL: packh_i16_3:
; RV64I: # %bb.0:
@@ -394,6 +404,7 @@ define i64 @pack_i64_imm() {
ret i64 1157442765409226768 ; 0x0101010101010101
}
+; FIXME: Use zext.h
define i32 @zexth_i32(i32 %a) nounwind {
; RV64I-LABEL: zexth_i32:
; RV64I: # %bb.0:
More information about the llvm-commits
mailing list