[llvm] e47ab56 - [RISCV][test] Add tests for optimization with SH*ADD in the zba extension

Ben Shi via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 2 02:30:26 PDT 2021


Author: Ben Shi
Date: 2021-09-02T17:30:03+08:00
New Revision: e47ab56398c3ad45836c2a1cb75482698790c771

URL: https://github.com/llvm/llvm-project/commit/e47ab56398c3ad45836c2a1cb75482698790c771
DIFF: https://github.com/llvm/llvm-project/commit/e47ab56398c3ad45836c2a1cb75482698790c771.diff

LOG: [RISCV][test] Add tests for optimization with SH*ADD in the zba extension

Reviewed By: asb

Differential Revision: https://reviews.llvm.org/D108915

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rv32zba.ll
    llvm/test/CodeGen/RISCV/rv64zba.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index 6ee346d2029c..2423691a2bf0 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -785,3 +785,84 @@ define i32 @add8208(i32 %a) {
   %c = add i32 %a, 8208
   ret i32 %c
 }
+
+define i32 @addshl_5_6(i32 %a, i32 %b) {
+; RV32I-LABEL: addshl_5_6:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 5
+; RV32I-NEXT:    slli a1, a1, 6
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32B-LABEL: addshl_5_6:
+; RV32B:       # %bb.0:
+; RV32B-NEXT:    slli a0, a0, 5
+; RV32B-NEXT:    slli a1, a1, 6
+; RV32B-NEXT:    add a0, a0, a1
+; RV32B-NEXT:    ret
+;
+; RV32ZBA-LABEL: addshl_5_6:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    slli a0, a0, 5
+; RV32ZBA-NEXT:    slli a1, a1, 6
+; RV32ZBA-NEXT:    add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = shl i32 %a, 5
+  %d = shl i32 %b, 6
+  %e = add i32 %c, %d
+  ret i32 %e
+}
+
+define i32 @addshl_5_7(i32 %a, i32 %b) {
+; RV32I-LABEL: addshl_5_7:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 5
+; RV32I-NEXT:    slli a1, a1, 7
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32B-LABEL: addshl_5_7:
+; RV32B:       # %bb.0:
+; RV32B-NEXT:    slli a0, a0, 5
+; RV32B-NEXT:    slli a1, a1, 7
+; RV32B-NEXT:    add a0, a0, a1
+; RV32B-NEXT:    ret
+;
+; RV32ZBA-LABEL: addshl_5_7:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    slli a0, a0, 5
+; RV32ZBA-NEXT:    slli a1, a1, 7
+; RV32ZBA-NEXT:    add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = shl i32 %a, 5
+  %d = shl i32 %b, 7
+  %e = add i32 %c, %d
+  ret i32 %e
+}
+
+define i32 @addshl_5_8(i32 %a, i32 %b) {
+; RV32I-LABEL: addshl_5_8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 5
+; RV32I-NEXT:    slli a1, a1, 8
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32B-LABEL: addshl_5_8:
+; RV32B:       # %bb.0:
+; RV32B-NEXT:    slli a0, a0, 5
+; RV32B-NEXT:    slli a1, a1, 8
+; RV32B-NEXT:    add a0, a0, a1
+; RV32B-NEXT:    ret
+;
+; RV32ZBA-LABEL: addshl_5_8:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    slli a0, a0, 5
+; RV32ZBA-NEXT:    slli a1, a1, 8
+; RV32ZBA-NEXT:    add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = shl i32 %a, 5
+  %d = shl i32 %b, 8
+  %e = add i32 %c, %d
+  ret i32 %e
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 7dcf41fd1e63..ad388105e7ed 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -1399,3 +1399,165 @@ define i64 @add8208(i64 %a) {
   %c = add i64 %a, 8208
   ret i64 %c
 }
+
+define signext i32 @addshl32_5_6(i32 signext %a, i32 signext %b) {
+; RV64I-LABEL: addshl32_5_6:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slliw a0, a0, 5
+; RV64I-NEXT:    slliw a1, a1, 6
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64B-LABEL: addshl32_5_6:
+; RV64B:       # %bb.0:
+; RV64B-NEXT:    slliw a0, a0, 5
+; RV64B-NEXT:    slliw a1, a1, 6
+; RV64B-NEXT:    addw a0, a0, a1
+; RV64B-NEXT:    ret
+;
+; RV64ZBA-LABEL: addshl32_5_6:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    slliw a0, a0, 5
+; RV64ZBA-NEXT:    slliw a1, a1, 6
+; RV64ZBA-NEXT:    addw a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = shl i32 %a, 5
+  %d = shl i32 %b, 6
+  %e = add i32 %c, %d
+  ret i32 %e
+}
+
+define i64 @addshl64_5_6(i64 %a, i64 %b) {
+; RV64I-LABEL: addshl64_5_6:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 5
+; RV64I-NEXT:    slli a1, a1, 6
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64B-LABEL: addshl64_5_6:
+; RV64B:       # %bb.0:
+; RV64B-NEXT:    slli a0, a0, 5
+; RV64B-NEXT:    slli a1, a1, 6
+; RV64B-NEXT:    add a0, a0, a1
+; RV64B-NEXT:    ret
+;
+; RV64ZBA-LABEL: addshl64_5_6:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    slli a0, a0, 5
+; RV64ZBA-NEXT:    slli a1, a1, 6
+; RV64ZBA-NEXT:    add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = shl i64 %a, 5
+  %d = shl i64 %b, 6
+  %e = add i64 %c, %d
+  ret i64 %e
+}
+
+define signext i32 @addshl32_5_7(i32 signext %a, i32 signext %b) {
+; RV64I-LABEL: addshl32_5_7:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slliw a0, a0, 5
+; RV64I-NEXT:    slliw a1, a1, 7
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64B-LABEL: addshl32_5_7:
+; RV64B:       # %bb.0:
+; RV64B-NEXT:    slliw a0, a0, 5
+; RV64B-NEXT:    slliw a1, a1, 7
+; RV64B-NEXT:    addw a0, a0, a1
+; RV64B-NEXT:    ret
+;
+; RV64ZBA-LABEL: addshl32_5_7:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    slliw a0, a0, 5
+; RV64ZBA-NEXT:    slliw a1, a1, 7
+; RV64ZBA-NEXT:    addw a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = shl i32 %a, 5
+  %d = shl i32 %b, 7
+  %e = add i32 %c, %d
+  ret i32 %e
+}
+
+define i64 @addshl64_5_7(i64 %a, i64 %b) {
+; RV64I-LABEL: addshl64_5_7:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 5
+; RV64I-NEXT:    slli a1, a1, 7
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64B-LABEL: addshl64_5_7:
+; RV64B:       # %bb.0:
+; RV64B-NEXT:    slli a0, a0, 5
+; RV64B-NEXT:    slli a1, a1, 7
+; RV64B-NEXT:    add a0, a0, a1
+; RV64B-NEXT:    ret
+;
+; RV64ZBA-LABEL: addshl64_5_7:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    slli a0, a0, 5
+; RV64ZBA-NEXT:    slli a1, a1, 7
+; RV64ZBA-NEXT:    add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = shl i64 %a, 5
+  %d = shl i64 %b, 7
+  %e = add i64 %c, %d
+  ret i64 %e
+}
+
+define signext i32 @addshl32_5_8(i32 signext %a, i32 signext %b) {
+; RV64I-LABEL: addshl32_5_8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slliw a0, a0, 5
+; RV64I-NEXT:    slliw a1, a1, 8
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64B-LABEL: addshl32_5_8:
+; RV64B:       # %bb.0:
+; RV64B-NEXT:    slliw a0, a0, 5
+; RV64B-NEXT:    slliw a1, a1, 8
+; RV64B-NEXT:    addw a0, a0, a1
+; RV64B-NEXT:    ret
+;
+; RV64ZBA-LABEL: addshl32_5_8:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    slliw a0, a0, 5
+; RV64ZBA-NEXT:    slliw a1, a1, 8
+; RV64ZBA-NEXT:    addw a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = shl i32 %a, 5
+  %d = shl i32 %b, 8
+  %e = add i32 %c, %d
+  ret i32 %e
+}
+
+define i64 @addshl64_5_8(i64 %a, i64 %b) {
+; RV64I-LABEL: addshl64_5_8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 5
+; RV64I-NEXT:    slli a1, a1, 8
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64B-LABEL: addshl64_5_8:
+; RV64B:       # %bb.0:
+; RV64B-NEXT:    slli a0, a0, 5
+; RV64B-NEXT:    slli a1, a1, 8
+; RV64B-NEXT:    add a0, a0, a1
+; RV64B-NEXT:    ret
+;
+; RV64ZBA-LABEL: addshl64_5_8:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    slli a0, a0, 5
+; RV64ZBA-NEXT:    slli a1, a1, 8
+; RV64ZBA-NEXT:    add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = shl i64 %a, 5
+  %d = shl i64 %b, 8
+  %e = add i64 %c, %d
+  ret i64 %e
+}


        


More information about the llvm-commits mailing list