[llvm] 8f023ec - [RISCV] Add coverage for select C, C1, C2 where (C1-C2)*[0,1] is cheap

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 16 12:47:06 PDT 2024


Author: Philip Reames
Date: 2024-09-16T12:46:43-07:00
New Revision: 8f023ec81df2f7d26db3c90e7a197d9f75472304

URL: https://github.com/llvm/llvm-project/commit/8f023ec81df2f7d26db3c90e7a197d9f75472304
DIFF: https://github.com/llvm/llvm-project/commit/8f023ec81df2f7d26db3c90e7a197d9f75472304.diff

LOG: [RISCV] Add coverage for select C, C1, C2 where (C1-C2)*[0,1] is cheap

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/select.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index ffbbe31412ed2a..8aa50cc0f39c1a 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -1819,8 +1819,8 @@ define i32 @select_cst5(i1 zeroext %cond) {
   ret i32 %ret
 }
 
-define i32 @select_cst6(i1 zeroext %cond) {
-; RV32IM-LABEL: select_cst6:
+define i32 @select_cst5_invert(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst5_invert:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    bnez a0, .LBB48_2
 ; RV32IM-NEXT:  # %bb.1:
@@ -1831,7 +1831,7 @@ define i32 @select_cst6(i1 zeroext %cond) {
 ; RV32IM-NEXT:    addi a0, a0, -2047
 ; RV32IM-NEXT:    ret
 ;
-; RV64IM-LABEL: select_cst6:
+; RV64IM-LABEL: select_cst5_invert:
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    bnez a0, .LBB48_2
 ; RV64IM-NEXT:  # %bb.1:
@@ -1842,14 +1842,14 @@ define i32 @select_cst6(i1 zeroext %cond) {
 ; RV64IM-NEXT:    addiw a0, a0, -2047
 ; RV64IM-NEXT:    ret
 ;
-; RV64IMXVTCONDOPS-LABEL: select_cst6:
+; RV64IMXVTCONDOPS-LABEL: select_cst5_invert:
 ; RV64IMXVTCONDOPS:       # %bb.0:
 ; RV64IMXVTCONDOPS-NEXT:    li a1, 2
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    addi a0, a0, 2047
 ; RV64IMXVTCONDOPS-NEXT:    ret
 ;
-; CHECKZICOND-LABEL: select_cst6:
+; CHECKZICOND-LABEL: select_cst5_invert:
 ; CHECKZICOND:       # %bb.0:
 ; CHECKZICOND-NEXT:    li a1, 2
 ; CHECKZICOND-NEXT:    czero.eqz a0, a1, a0
@@ -1859,26 +1859,332 @@ define i32 @select_cst6(i1 zeroext %cond) {
   ret i32 %ret
 }
 
+define i32 @select_cst_
diff 2(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst_
diff 2:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    mv a1, a0
+; RV32IM-NEXT:    li a0, 120
+; RV32IM-NEXT:    bnez a1, .LBB49_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    li a0, 122
+; RV32IM-NEXT:  .LBB49_2:
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_cst_
diff 2:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mv a1, a0
+; RV64IM-NEXT:    li a0, 120
+; RV64IM-NEXT:    bnez a1, .LBB49_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    li a0, 122
+; RV64IM-NEXT:  .LBB49_2:
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst_
diff 2:
+; RV64IMXVTCONDOPS:       # %bb.0:
+; RV64IMXVTCONDOPS-NEXT:    li a1, 2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    addi a0, a0, 120
+; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_cst_
diff 2:
+; CHECKZICOND:       # %bb.0:
+; CHECKZICOND-NEXT:    li a1, 2
+; CHECKZICOND-NEXT:    czero.nez a0, a1, a0
+; CHECKZICOND-NEXT:    addi a0, a0, 120
+; CHECKZICOND-NEXT:    ret
+  %ret = select i1 %cond, i32 120, i32 122
+  ret i32 %ret
+}
+
+define i32 @select_cst_
diff 2_invert(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst_
diff 2_invert:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    mv a1, a0
+; RV32IM-NEXT:    li a0, 122
+; RV32IM-NEXT:    bnez a1, .LBB50_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    li a0, 120
+; RV32IM-NEXT:  .LBB50_2:
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_cst_
diff 2_invert:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mv a1, a0
+; RV64IM-NEXT:    li a0, 122
+; RV64IM-NEXT:    bnez a1, .LBB50_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    li a0, 120
+; RV64IM-NEXT:  .LBB50_2:
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst_
diff 2_invert:
+; RV64IMXVTCONDOPS:       # %bb.0:
+; RV64IMXVTCONDOPS-NEXT:    li a1, -2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    addi a0, a0, 122
+; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_cst_
diff 2_invert:
+; CHECKZICOND:       # %bb.0:
+; CHECKZICOND-NEXT:    li a1, -2
+; CHECKZICOND-NEXT:    czero.nez a0, a1, a0
+; CHECKZICOND-NEXT:    addi a0, a0, 122
+; CHECKZICOND-NEXT:    ret
+  %ret = select i1 %cond, i32 122, i32 120
+  ret i32 %ret
+}
+
+define i32 @select_cst_
diff 4(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst_
diff 4:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    mv a1, a0
+; RV32IM-NEXT:    li a0, 10
+; RV32IM-NEXT:    bnez a1, .LBB51_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    li a0, 6
+; RV32IM-NEXT:  .LBB51_2:
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_cst_
diff 4:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mv a1, a0
+; RV64IM-NEXT:    li a0, 10
+; RV64IM-NEXT:    bnez a1, .LBB51_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    li a0, 6
+; RV64IM-NEXT:  .LBB51_2:
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst_
diff 4:
+; RV64IMXVTCONDOPS:       # %bb.0:
+; RV64IMXVTCONDOPS-NEXT:    li a1, -4
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    addi a0, a0, 10
+; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_cst_
diff 4:
+; CHECKZICOND:       # %bb.0:
+; CHECKZICOND-NEXT:    li a1, -4
+; CHECKZICOND-NEXT:    czero.nez a0, a1, a0
+; CHECKZICOND-NEXT:    addi a0, a0, 10
+; CHECKZICOND-NEXT:    ret
+  %ret = select i1 %cond, i32 10, i32 6
+  ret i32 %ret
+}
+
+define i32 @select_cst_
diff 4_invert(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst_
diff 4_invert:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    mv a1, a0
+; RV32IM-NEXT:    li a0, 6
+; RV32IM-NEXT:    bnez a1, .LBB52_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    li a0, 10
+; RV32IM-NEXT:  .LBB52_2:
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_cst_
diff 4_invert:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mv a1, a0
+; RV64IM-NEXT:    li a0, 6
+; RV64IM-NEXT:    bnez a1, .LBB52_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    li a0, 10
+; RV64IM-NEXT:  .LBB52_2:
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst_
diff 4_invert:
+; RV64IMXVTCONDOPS:       # %bb.0:
+; RV64IMXVTCONDOPS-NEXT:    li a1, 4
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    addi a0, a0, 6
+; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_cst_
diff 4_invert:
+; CHECKZICOND:       # %bb.0:
+; CHECKZICOND-NEXT:    li a1, 4
+; CHECKZICOND-NEXT:    czero.nez a0, a1, a0
+; CHECKZICOND-NEXT:    addi a0, a0, 6
+; CHECKZICOND-NEXT:    ret
+  %ret = select i1 %cond, i32 6, i32 10
+  ret i32 %ret
+}
+
+define i32 @select_cst_
diff 8(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst_
diff 8:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    mv a1, a0
+; RV32IM-NEXT:    li a0, 14
+; RV32IM-NEXT:    bnez a1, .LBB53_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    li a0, 6
+; RV32IM-NEXT:  .LBB53_2:
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_cst_
diff 8:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mv a1, a0
+; RV64IM-NEXT:    li a0, 14
+; RV64IM-NEXT:    bnez a1, .LBB53_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    li a0, 6
+; RV64IM-NEXT:  .LBB53_2:
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst_
diff 8:
+; RV64IMXVTCONDOPS:       # %bb.0:
+; RV64IMXVTCONDOPS-NEXT:    li a1, -8
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    addi a0, a0, 14
+; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_cst_
diff 8:
+; CHECKZICOND:       # %bb.0:
+; CHECKZICOND-NEXT:    li a1, -8
+; CHECKZICOND-NEXT:    czero.nez a0, a1, a0
+; CHECKZICOND-NEXT:    addi a0, a0, 14
+; CHECKZICOND-NEXT:    ret
+  %ret = select i1 %cond, i32 14, i32 6
+  ret i32 %ret
+}
+
+define i32 @select_cst_
diff 8_invert(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst_
diff 8_invert:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    mv a1, a0
+; RV32IM-NEXT:    li a0, 6
+; RV32IM-NEXT:    bnez a1, .LBB54_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    li a0, 14
+; RV32IM-NEXT:  .LBB54_2:
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_cst_
diff 8_invert:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mv a1, a0
+; RV64IM-NEXT:    li a0, 6
+; RV64IM-NEXT:    bnez a1, .LBB54_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    li a0, 14
+; RV64IM-NEXT:  .LBB54_2:
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst_
diff 8_invert:
+; RV64IMXVTCONDOPS:       # %bb.0:
+; RV64IMXVTCONDOPS-NEXT:    li a1, 8
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    addi a0, a0, 6
+; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_cst_
diff 8_invert:
+; CHECKZICOND:       # %bb.0:
+; CHECKZICOND-NEXT:    li a1, 8
+; CHECKZICOND-NEXT:    czero.nez a0, a1, a0
+; CHECKZICOND-NEXT:    addi a0, a0, 6
+; CHECKZICOND-NEXT:    ret
+  %ret = select i1 %cond, i32 6, i32 14
+  ret i32 %ret
+}
+
+
+define i32 @select_cst_
diff 1024(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst_
diff 1024:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    mv a1, a0
+; RV32IM-NEXT:    li a0, 1030
+; RV32IM-NEXT:    bnez a1, .LBB55_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    li a0, 6
+; RV32IM-NEXT:  .LBB55_2:
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_cst_
diff 1024:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mv a1, a0
+; RV64IM-NEXT:    li a0, 1030
+; RV64IM-NEXT:    bnez a1, .LBB55_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    li a0, 6
+; RV64IM-NEXT:  .LBB55_2:
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst_
diff 1024:
+; RV64IMXVTCONDOPS:       # %bb.0:
+; RV64IMXVTCONDOPS-NEXT:    li a1, -1024
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    addi a0, a0, 1030
+; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_cst_
diff 1024:
+; CHECKZICOND:       # %bb.0:
+; CHECKZICOND-NEXT:    li a1, -1024
+; CHECKZICOND-NEXT:    czero.nez a0, a1, a0
+; CHECKZICOND-NEXT:    addi a0, a0, 1030
+; CHECKZICOND-NEXT:    ret
+  %ret = select i1 %cond, i32 1030, i32 6
+  ret i32 %ret
+}
+
+define i32 @select_cst_
diff 1024_invert(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst_
diff 1024_invert:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    mv a1, a0
+; RV32IM-NEXT:    li a0, 6
+; RV32IM-NEXT:    bnez a1, .LBB56_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    li a0, 1030
+; RV32IM-NEXT:  .LBB56_2:
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_cst_
diff 1024_invert:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mv a1, a0
+; RV64IM-NEXT:    li a0, 6
+; RV64IM-NEXT:    bnez a1, .LBB56_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    li a0, 1030
+; RV64IM-NEXT:  .LBB56_2:
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst_
diff 1024_invert:
+; RV64IMXVTCONDOPS:       # %bb.0:
+; RV64IMXVTCONDOPS-NEXT:    li a1, 1024
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    addi a0, a0, 6
+; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_cst_
diff 1024_invert:
+; CHECKZICOND:       # %bb.0:
+; CHECKZICOND-NEXT:    li a1, 1024
+; CHECKZICOND-NEXT:    czero.nez a0, a1, a0
+; CHECKZICOND-NEXT:    addi a0, a0, 6
+; CHECKZICOND-NEXT:    ret
+  %ret = select i1 %cond, i32 6, i32 1030
+  ret i32 %ret
+}
+
+
 @select_redundant_czero_eqz_data = global i32 0, align 4
 
 define void @select_redundant_czero_eqz1(ptr %0, ptr %1) {
 ; RV32IM-LABEL: select_redundant_czero_eqz1:
 ; RV32IM:       # %bb.0: # %entry
-; RV32IM-NEXT:    bnez a0, .LBB49_2
+; RV32IM-NEXT:    bnez a0, .LBB57_2
 ; RV32IM-NEXT:  # %bb.1:
 ; RV32IM-NEXT:    lui a0, %hi(select_redundant_czero_eqz_data)
 ; RV32IM-NEXT:    addi a0, a0, %lo(select_redundant_czero_eqz_data)
-; RV32IM-NEXT:  .LBB49_2: # %entry
+; RV32IM-NEXT:  .LBB57_2: # %entry
 ; RV32IM-NEXT:    sw a0, 0(a1)
 ; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_redundant_czero_eqz1:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    bnez a0, .LBB49_2
+; RV64IM-NEXT:    bnez a0, .LBB57_2
 ; RV64IM-NEXT:  # %bb.1:
 ; RV64IM-NEXT:    lui a0, %hi(select_redundant_czero_eqz_data)
 ; RV64IM-NEXT:    addi a0, a0, %lo(select_redundant_czero_eqz_data)
-; RV64IM-NEXT:  .LBB49_2: # %entry
+; RV64IM-NEXT:  .LBB57_2: # %entry
 ; RV64IM-NEXT:    sd a0, 0(a1)
 ; RV64IM-NEXT:    ret
 ;
@@ -1918,21 +2224,21 @@ entry:
 define void @select_redundant_czero_eqz2(ptr %0, ptr %1) {
 ; RV32IM-LABEL: select_redundant_czero_eqz2:
 ; RV32IM:       # %bb.0: # %entry
-; RV32IM-NEXT:    bnez a0, .LBB50_2
+; RV32IM-NEXT:    bnez a0, .LBB58_2
 ; RV32IM-NEXT:  # %bb.1: # %entry
 ; RV32IM-NEXT:    lui a0, %hi(select_redundant_czero_eqz_data)
 ; RV32IM-NEXT:    addi a0, a0, %lo(select_redundant_czero_eqz_data)
-; RV32IM-NEXT:  .LBB50_2: # %entry
+; RV32IM-NEXT:  .LBB58_2: # %entry
 ; RV32IM-NEXT:    sw a0, 0(a1)
 ; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_redundant_czero_eqz2:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    bnez a0, .LBB50_2
+; RV64IM-NEXT:    bnez a0, .LBB58_2
 ; RV64IM-NEXT:  # %bb.1: # %entry
 ; RV64IM-NEXT:    lui a0, %hi(select_redundant_czero_eqz_data)
 ; RV64IM-NEXT:    addi a0, a0, %lo(select_redundant_czero_eqz_data)
-; RV64IM-NEXT:  .LBB50_2: # %entry
+; RV64IM-NEXT:  .LBB58_2: # %entry
 ; RV64IM-NEXT:    sd a0, 0(a1)
 ; RV64IM-NEXT:    ret
 ;


        


More information about the llvm-commits mailing list