[llvm] 3119073 - [RISCV][test] Add new tests for add-mul optimization in the zba extension with SH*ADD
Ben Shi via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 18 23:31:13 PDT 2021
Author: Ben Shi
Date: 2021-06-19T14:31:01+08:00
New Revision: 31190738c024298745d877a47ef95a5263ea5c9e
URL: https://github.com/llvm/llvm-project/commit/31190738c024298745d877a47ef95a5263ea5c9e
DIFF: https://github.com/llvm/llvm-project/commit/31190738c024298745d877a47ef95a5263ea5c9e.diff
LOG: [RISCV][test] Add new tests for add-mul optimization in the zba extension with SH*ADD
These tests will show the following optimization by future patches.
Rx + Ry * 18 => (SH1ADD (SH3ADD Rx, Rx), Ry)
Rx + Ry * 20 => (SH2ADD (SH2ADD Rx, Rx), Ry)
Rx + Ry * 24 => (SH3ADD (SH1ADD Rx, Rx), Ry)
Rx + Ry * 36 => (SH2ADD (SH3ADD Rx, Rx), Ry)
Rx + Ry * 40 => (SH3ADD (SH2ADD Rx, Rx), Ry)
Rx + Ry * 72 => (SH3ADD (SH3ADD Rx, Rx), Ry)
Rx * (3 << C) => (SLLI (SH1ADD Rx, Rx), C)
Rx * (5 << C) => (SLLI (SH2ADD Rx, Rx), C)
Rx * (9 << C) => (SLLI (SH3ADD Rx, Rx), C)
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D104507
Added:
Modified:
llvm/test/CodeGen/RISCV/rv32zba.ll
llvm/test/CodeGen/RISCV/rv64zba.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index cf4b98d5c5ab6..9f114467e40a7 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -158,3 +158,225 @@ define i32 @addmul12(i32 %a, i32 %b) {
%d = add i32 %c, %b
ret i32 %d
}
+
+define i32 @addmul18(i32 %a, i32 %b) {
+; RV32I-LABEL: addmul18:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a2, zero, 18
+; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: addmul18:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: addi a2, zero, 18
+; RV32IB-NEXT: mul a0, a0, a2
+; RV32IB-NEXT: add a0, a0, a1
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: addmul18:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: addi a2, zero, 18
+; RV32IBA-NEXT: mul a0, a0, a2
+; RV32IBA-NEXT: add a0, a0, a1
+; RV32IBA-NEXT: ret
+ %c = mul i32 %a, 18
+ %d = add i32 %c, %b
+ ret i32 %d
+}
+
+define i32 @addmul20(i32 %a, i32 %b) {
+; RV32I-LABEL: addmul20:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a2, zero, 20
+; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: addmul20:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: addi a2, zero, 20
+; RV32IB-NEXT: mul a0, a0, a2
+; RV32IB-NEXT: add a0, a0, a1
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: addmul20:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: addi a2, zero, 20
+; RV32IBA-NEXT: mul a0, a0, a2
+; RV32IBA-NEXT: add a0, a0, a1
+; RV32IBA-NEXT: ret
+ %c = mul i32 %a, 20
+ %d = add i32 %c, %b
+ ret i32 %d
+}
+
+define i32 @addmul24(i32 %a, i32 %b) {
+; RV32I-LABEL: addmul24:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a2, zero, 24
+; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: addmul24:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: addi a2, zero, 24
+; RV32IB-NEXT: mul a0, a0, a2
+; RV32IB-NEXT: add a0, a0, a1
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: addmul24:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: addi a2, zero, 24
+; RV32IBA-NEXT: mul a0, a0, a2
+; RV32IBA-NEXT: add a0, a0, a1
+; RV32IBA-NEXT: ret
+ %c = mul i32 %a, 24
+ %d = add i32 %c, %b
+ ret i32 %d
+}
+
+define i32 @addmul36(i32 %a, i32 %b) {
+; RV32I-LABEL: addmul36:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a2, zero, 36
+; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: addmul36:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: addi a2, zero, 36
+; RV32IB-NEXT: mul a0, a0, a2
+; RV32IB-NEXT: add a0, a0, a1
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: addmul36:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: addi a2, zero, 36
+; RV32IBA-NEXT: mul a0, a0, a2
+; RV32IBA-NEXT: add a0, a0, a1
+; RV32IBA-NEXT: ret
+ %c = mul i32 %a, 36
+ %d = add i32 %c, %b
+ ret i32 %d
+}
+
+define i32 @addmul40(i32 %a, i32 %b) {
+; RV32I-LABEL: addmul40:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a2, zero, 40
+; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: addmul40:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: addi a2, zero, 40
+; RV32IB-NEXT: mul a0, a0, a2
+; RV32IB-NEXT: add a0, a0, a1
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: addmul40:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: addi a2, zero, 40
+; RV32IBA-NEXT: mul a0, a0, a2
+; RV32IBA-NEXT: add a0, a0, a1
+; RV32IBA-NEXT: ret
+ %c = mul i32 %a, 40
+ %d = add i32 %c, %b
+ ret i32 %d
+}
+
+define i32 @addmul72(i32 %a, i32 %b) {
+; RV32I-LABEL: addmul72:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a2, zero, 72
+; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: addmul72:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: addi a2, zero, 72
+; RV32IB-NEXT: mul a0, a0, a2
+; RV32IB-NEXT: add a0, a0, a1
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: addmul72:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: addi a2, zero, 72
+; RV32IBA-NEXT: mul a0, a0, a2
+; RV32IBA-NEXT: add a0, a0, a1
+; RV32IBA-NEXT: ret
+ %c = mul i32 %a, 72
+ %d = add i32 %c, %b
+ ret i32 %d
+}
+
+define i32 @mul96(i32 %a) {
+; RV32I-LABEL: mul96:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a1, zero, 96
+; RV32I-NEXT: mul a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: mul96:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: addi a1, zero, 96
+; RV32IB-NEXT: mul a0, a0, a1
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: mul96:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: addi a1, zero, 96
+; RV32IBA-NEXT: mul a0, a0, a1
+; RV32IBA-NEXT: ret
+ %c = mul i32 %a, 96
+ ret i32 %c
+}
+
+define i32 @mul160(i32 %a) {
+; RV32I-LABEL: mul160:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a1, zero, 160
+; RV32I-NEXT: mul a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: mul160:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: addi a1, zero, 160
+; RV32IB-NEXT: mul a0, a0, a1
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: mul160:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: addi a1, zero, 160
+; RV32IBA-NEXT: mul a0, a0, a1
+; RV32IBA-NEXT: ret
+ %c = mul i32 %a, 160
+ ret i32 %c
+}
+
+define i32 @mul288(i32 %a) {
+; RV32I-LABEL: mul288:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a1, zero, 288
+; RV32I-NEXT: mul a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: mul288:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: addi a1, zero, 288
+; RV32IB-NEXT: mul a0, a0, a1
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: mul288:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: addi a1, zero, 288
+; RV32IBA-NEXT: mul a0, a0, a1
+; RV32IBA-NEXT: ret
+ %c = mul i32 %a, 288
+ ret i32 %c
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 0b008955277da..153a5492d4f2a 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -449,80 +449,224 @@ define i64 @addmul12(i64 %a, i64 %b) {
ret i64 %d
}
-define i32 @addmulw6(i32 signext %a, i32 signext %b) {
-; RV64I-LABEL: addmulw6:
+define i64 @addmul18(i64 %a, i64 %b) {
+; RV64I-LABEL: addmul18:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi a2, zero, 6
+; RV64I-NEXT: addi a2, zero, 18
; RV64I-NEXT: mul a0, a0, a2
-; RV64I-NEXT: addw a0, a0, a1
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
-; RV64IB-LABEL: addmulw6:
+; RV64IB-LABEL: addmul18:
; RV64IB: # %bb.0:
-; RV64IB-NEXT: addi a2, zero, 6
+; RV64IB-NEXT: addi a2, zero, 18
; RV64IB-NEXT: mul a0, a0, a2
-; RV64IB-NEXT: addw a0, a0, a1
+; RV64IB-NEXT: add a0, a0, a1
; RV64IB-NEXT: ret
;
-; RV64IBA-LABEL: addmulw6:
+; RV64IBA-LABEL: addmul18:
; RV64IBA: # %bb.0:
-; RV64IBA-NEXT: addi a2, zero, 6
+; RV64IBA-NEXT: addi a2, zero, 18
; RV64IBA-NEXT: mul a0, a0, a2
-; RV64IBA-NEXT: addw a0, a0, a1
+; RV64IBA-NEXT: add a0, a0, a1
; RV64IBA-NEXT: ret
- %c = mul i32 %a, 6
- %d = add i32 %c, %b
- ret i32 %d
+ %c = mul i64 %a, 18
+ %d = add i64 %c, %b
+ ret i64 %d
}
-define i32 @addmulw10(i32 signext %a, i32 signext %b) {
-; RV64I-LABEL: addmulw10:
+define i64 @addmul20(i64 %a, i64 %b) {
+; RV64I-LABEL: addmul20:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi a2, zero, 10
+; RV64I-NEXT: addi a2, zero, 20
; RV64I-NEXT: mul a0, a0, a2
-; RV64I-NEXT: addw a0, a0, a1
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
-; RV64IB-LABEL: addmulw10:
+; RV64IB-LABEL: addmul20:
; RV64IB: # %bb.0:
-; RV64IB-NEXT: addi a2, zero, 10
+; RV64IB-NEXT: addi a2, zero, 20
; RV64IB-NEXT: mul a0, a0, a2
-; RV64IB-NEXT: addw a0, a0, a1
+; RV64IB-NEXT: add a0, a0, a1
; RV64IB-NEXT: ret
;
-; RV64IBA-LABEL: addmulw10:
+; RV64IBA-LABEL: addmul20:
; RV64IBA: # %bb.0:
-; RV64IBA-NEXT: addi a2, zero, 10
+; RV64IBA-NEXT: addi a2, zero, 20
; RV64IBA-NEXT: mul a0, a0, a2
-; RV64IBA-NEXT: addw a0, a0, a1
+; RV64IBA-NEXT: add a0, a0, a1
; RV64IBA-NEXT: ret
- %c = mul i32 %a, 10
- %d = add i32 %c, %b
- ret i32 %d
+ %c = mul i64 %a, 20
+ %d = add i64 %c, %b
+ ret i64 %d
}
-define i32 @addmulw12(i32 signext %a, i32 signext %b) {
-; RV64I-LABEL: addmulw12:
+define i64 @addmul24(i64 %a, i64 %b) {
+; RV64I-LABEL: addmul24:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi a2, zero, 12
+; RV64I-NEXT: addi a2, zero, 24
; RV64I-NEXT: mul a0, a0, a2
-; RV64I-NEXT: addw a0, a0, a1
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
-; RV64IB-LABEL: addmulw12:
+; RV64IB-LABEL: addmul24:
; RV64IB: # %bb.0:
-; RV64IB-NEXT: addi a2, zero, 12
+; RV64IB-NEXT: addi a2, zero, 24
; RV64IB-NEXT: mul a0, a0, a2
-; RV64IB-NEXT: addw a0, a0, a1
+; RV64IB-NEXT: add a0, a0, a1
; RV64IB-NEXT: ret
;
-; RV64IBA-LABEL: addmulw12:
+; RV64IBA-LABEL: addmul24:
; RV64IBA: # %bb.0:
-; RV64IBA-NEXT: addi a2, zero, 12
+; RV64IBA-NEXT: addi a2, zero, 24
+; RV64IBA-NEXT: mul a0, a0, a2
+; RV64IBA-NEXT: add a0, a0, a1
+; RV64IBA-NEXT: ret
+ %c = mul i64 %a, 24
+ %d = add i64 %c, %b
+ ret i64 %d
+}
+
+define i64 @addmul36(i64 %a, i64 %b) {
+; RV64I-LABEL: addmul36:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a2, zero, 36
+; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: addmul36:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: addi a2, zero, 36
+; RV64IB-NEXT: mul a0, a0, a2
+; RV64IB-NEXT: add a0, a0, a1
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: addmul36:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: addi a2, zero, 36
+; RV64IBA-NEXT: mul a0, a0, a2
+; RV64IBA-NEXT: add a0, a0, a1
+; RV64IBA-NEXT: ret
+ %c = mul i64 %a, 36
+ %d = add i64 %c, %b
+ ret i64 %d
+}
+
+define i64 @addmul40(i64 %a, i64 %b) {
+; RV64I-LABEL: addmul40:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a2, zero, 40
+; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: addmul40:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: addi a2, zero, 40
+; RV64IB-NEXT: mul a0, a0, a2
+; RV64IB-NEXT: add a0, a0, a1
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: addmul40:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: addi a2, zero, 40
; RV64IBA-NEXT: mul a0, a0, a2
-; RV64IBA-NEXT: addw a0, a0, a1
+; RV64IBA-NEXT: add a0, a0, a1
+; RV64IBA-NEXT: ret
+ %c = mul i64 %a, 40
+ %d = add i64 %c, %b
+ ret i64 %d
+}
+
+define i64 @addmul72(i64 %a, i64 %b) {
+; RV64I-LABEL: addmul72:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a2, zero, 72
+; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: addmul72:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: addi a2, zero, 72
+; RV64IB-NEXT: mul a0, a0, a2
+; RV64IB-NEXT: add a0, a0, a1
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: addmul72:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: addi a2, zero, 72
+; RV64IBA-NEXT: mul a0, a0, a2
+; RV64IBA-NEXT: add a0, a0, a1
+; RV64IBA-NEXT: ret
+ %c = mul i64 %a, 72
+ %d = add i64 %c, %b
+ ret i64 %d
+}
+
+define i64 @mul96(i64 %a) {
+; RV64I-LABEL: mul96:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a1, zero, 96
+; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: mul96:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: addi a1, zero, 96
+; RV64IB-NEXT: mul a0, a0, a1
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: mul96:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: addi a1, zero, 96
+; RV64IBA-NEXT: mul a0, a0, a1
+; RV64IBA-NEXT: ret
+ %c = mul i64 %a, 96
+ ret i64 %c
+}
+
+define i64 @mul160(i64 %a) {
+; RV64I-LABEL: mul160:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a1, zero, 160
+; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: mul160:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: addi a1, zero, 160
+; RV64IB-NEXT: mul a0, a0, a1
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: mul160:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: addi a1, zero, 160
+; RV64IBA-NEXT: mul a0, a0, a1
+; RV64IBA-NEXT: ret
+ %c = mul i64 %a, 160
+ ret i64 %c
+}
+
+define i64 @mul288(i64 %a) {
+; RV64I-LABEL: mul288:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a1, zero, 288
+; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: mul288:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: addi a1, zero, 288
+; RV64IB-NEXT: mul a0, a0, a1
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: mul288:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: addi a1, zero, 288
+; RV64IBA-NEXT: mul a0, a0, a1
; RV64IBA-NEXT: ret
- %c = mul i32 %a, 12
- %d = add i32 %c, %b
- ret i32 %d
+ %c = mul i64 %a, 288
+ ret i64 %c
}
More information about the llvm-commits
mailing list