[llvm] 949caf3 - [RISCV] Zba testing improvements. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 14 13:59:17 PST 2024


Author: Craig Topper
Date: 2024-11-14T13:47:49-08:00
New Revision: 949caf39e4a445cc0600735ac0755dd0d4aa28f6

URL: https://github.com/llvm/llvm-project/commit/949caf39e4a445cc0600735ac0755dd0d4aa28f6
DIFF: https://github.com/llvm/llvm-project/commit/949caf39e4a445cc0600735ac0755dd0d4aa28f6.diff

LOG: [RISCV] Zba testing improvements. NFC

Add lshr+gep tests for RV32. These patterns are already handled, but we only tested for RV64.

Remove stale FIXMEs and adjust test case names in rv64zba..l

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rv32zba.ll
    llvm/test/CodeGen/RISCV/rv64zba.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index 89273ef0e50b5f..fec156ac2be27e 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -650,6 +650,115 @@ define i32 @addshl_5_8(i32 %a, i32 %b) {
   ret i32 %e
 }
 
+define i32 @srli_1_sh2add(ptr %0, i32 %1) {
+; RV32I-LABEL: srli_1_sh2add:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    andi a1, a1, -4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lw a0, 0(a0)
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: srli_1_sh2add:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    srli a1, a1, 1
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    lw a0, 0(a0)
+; RV32ZBA-NEXT:    ret
+  %3 = lshr i32 %1, 1
+  %4 = getelementptr inbounds i32, ptr %0, i32 %3
+  %5 = load i32, ptr %4, align 4
+  ret i32 %5
+}
+
+define i64 @srli_2_sh3add(ptr %0, i32 %1) {
+; RV32I-LABEL: srli_2_sh3add:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    andi a1, a1, -8
+; RV32I-NEXT:    add a1, a0, a1
+; RV32I-NEXT:    lw a0, 0(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: srli_2_sh3add:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    srli a1, a1, 2
+; RV32ZBA-NEXT:    sh3add a1, a1, a0
+; RV32ZBA-NEXT:    lw a0, 0(a1)
+; RV32ZBA-NEXT:    lw a1, 4(a1)
+; RV32ZBA-NEXT:    ret
+  %3 = lshr i32 %1, 2
+  %4 = getelementptr inbounds i64, ptr %0, i32 %3
+  %5 = load i64, ptr %4, align 8
+  ret i64 %5
+}
+
+define signext i16 @srli_2_sh1add(ptr %0, i32 %1) {
+; RV32I-LABEL: srli_2_sh1add:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    andi a1, a1, -2
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lh a0, 0(a0)
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: srli_2_sh1add:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    srli a1, a1, 2
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    lh a0, 0(a0)
+; RV32ZBA-NEXT:    ret
+  %3 = lshr i32 %1, 2
+  %4 = getelementptr inbounds i16, ptr %0, i32 %3
+  %5 = load i16, ptr %4, align 2
+  ret i16 %5
+}
+
+define i32 @srli_3_sh2add(ptr %0, i32 %1) {
+; RV32I-LABEL: srli_3_sh2add:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    andi a1, a1, -4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lw a0, 0(a0)
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: srli_3_sh2add:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    srli a1, a1, 3
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    lw a0, 0(a0)
+; RV32ZBA-NEXT:    ret
+  %3 = lshr i32 %1, 3
+  %4 = getelementptr inbounds i32, ptr %0, i32 %3
+  %5 = load i32, ptr %4, align 4
+  ret i32 %5
+}
+
+define i64 @srli_4_sh3add(ptr %0, i32 %1) {
+; RV32I-LABEL: srli_4_sh3add:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    andi a1, a1, -8
+; RV32I-NEXT:    add a1, a0, a1
+; RV32I-NEXT:    lw a0, 0(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: srli_4_sh3add:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    srli a1, a1, 4
+; RV32ZBA-NEXT:    sh3add a1, a1, a0
+; RV32ZBA-NEXT:    lw a0, 0(a1)
+; RV32ZBA-NEXT:    lw a1, 4(a1)
+; RV32ZBA-NEXT:    ret
+  %3 = lshr i32 %1, 4
+  %4 = getelementptr inbounds i64, ptr %0, i32 %3
+  %5 = load i64, ptr %4, align 8
+  ret i64 %5
+}
+
 define i32 @mul_neg1(i32 %a) {
 ; CHECK-LABEL: mul_neg1:
 ; CHECK:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 05b411bb12a241..07726b643b51ad 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -1713,7 +1713,6 @@ define i64 @addshl64_5_8(i64 %a, i64 %b) {
 }
 
 ; Make sure we use sext.h+slli+srli for Zba+Zbb.
-; FIXME: The RV64I and Zba only cases can be done with only 3 shifts.
 define zeroext i32 @sext_ashr_zext_i8(i8 %a) nounwind {
 ; RV64I-LABEL: sext_ashr_zext_i8:
 ; RV64I:       # %bb.0:
@@ -1830,7 +1829,6 @@ entry:
 }
 
 ; Make sure we use sext.h+slli+srli for Zba+Zbb.
-; FIXME: The RV64I and Zba only cases can be done with only 3 shifts.
 define zeroext i32 @sext_ashr_zext_i16(i16 %a) nounwind {
 ; RV64I-LABEL: sext_ashr_zext_i16:
 ; RV64I:       # %bb.0:
@@ -2262,8 +2260,8 @@ define i64 @srli_4_sh3add(ptr %0, i64 %1) {
   ret i64 %5
 }
 
-define signext i16 @shl_2_sh1add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: shl_2_sh1add:
+define signext i16 @shl_2_sh1adduw(ptr %0, i32 signext %1) {
+; RV64I-LABEL: shl_2_sh1adduw:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a1, a1, 34
 ; RV64I-NEXT:    srli a1, a1, 31
@@ -2271,7 +2269,7 @@ define signext i16 @shl_2_sh1add(ptr %0, i32 signext %1) {
 ; RV64I-NEXT:    lh a0, 0(a0)
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBA-LABEL: shl_2_sh1add:
+; RV64ZBA-LABEL: shl_2_sh1adduw:
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    slli a1, a1, 2
 ; RV64ZBA-NEXT:    sh1add.uw a0, a1, a0
@@ -2284,8 +2282,8 @@ define signext i16 @shl_2_sh1add(ptr %0, i32 signext %1) {
   ret i16 %6
 }
 
-define signext i32 @shl_16_sh2add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: shl_16_sh2add:
+define signext i32 @shl_16_sh2adduw(ptr %0, i32 signext %1) {
+; RV64I-LABEL: shl_16_sh2adduw:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a1, a1, 48
 ; RV64I-NEXT:    srli a1, a1, 30
@@ -2293,7 +2291,7 @@ define signext i32 @shl_16_sh2add(ptr %0, i32 signext %1) {
 ; RV64I-NEXT:    lw a0, 0(a0)
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBA-LABEL: shl_16_sh2add:
+; RV64ZBA-LABEL: shl_16_sh2adduw:
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    slli a1, a1, 16
 ; RV64ZBA-NEXT:    sh2add.uw a0, a1, a0
@@ -2306,8 +2304,8 @@ define signext i32 @shl_16_sh2add(ptr %0, i32 signext %1) {
   ret i32 %6
 }
 
-define i64 @shl_31_sh3add(ptr %0, i32 signext %1) {
-; RV64I-LABEL: shl_31_sh3add:
+define i64 @shl_31_sh3adduw(ptr %0, i32 signext %1) {
+; RV64I-LABEL: shl_31_sh3adduw:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    srli a1, a1, 29
@@ -2315,7 +2313,7 @@ define i64 @shl_31_sh3add(ptr %0, i32 signext %1) {
 ; RV64I-NEXT:    ld a0, 0(a0)
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBA-LABEL: shl_31_sh3add:
+; RV64ZBA-LABEL: shl_31_sh3adduw:
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    slli a1, a1, 31
 ; RV64ZBA-NEXT:    sh3add.uw a0, a1, a0


        


More information about the llvm-commits mailing list