[llvm] 0799057 - [RISCV][test] Add new tests of SH*ADD in the zba extension

Ben Shi via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 16 16:02:42 PDT 2021


Author: Ben Shi
Date: 2021-06-17T07:02:33+08:00
New Revision: 07990571812df2a702daa2d7cbcd7c3f4e09cdda

URL: https://github.com/llvm/llvm-project/commit/07990571812df2a702daa2d7cbcd7c3f4e09cdda
DIFF: https://github.com/llvm/llvm-project/commit/07990571812df2a702daa2d7cbcd7c3f4e09cdda.diff

LOG: [RISCV][test] Add new tests of SH*ADD in the zba extension

These tests will show the following optimization by future patches.

Rx + Ry * 6  => (SH1ADD (SH2ADD Rx, Ry), Ry)
Rx + Ry * 10 => (SH1ADD (SH3ADD Rx, Ry), Ry)
Rx + Ry * 12 => (SH2ADD (SH3ADD Rx, Ry), Ry)

Reviewed By: MaskRay

Differential Revision: https://reviews.llvm.org/D104210

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rv32zba.ll
    llvm/test/CodeGen/RISCV/rv64zba.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index b4e4ffc5ca72b..cf4b98d5c5ab6 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32I
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-b -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-b -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32IB
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zba -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-zba -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32IBA
 
 define signext i16 @sh1add(i64 %0, i16* %1) {
@@ -80,3 +80,81 @@ define i64 @sh3add(i64 %0, i64* %1) {
   %4 = load i64, i64* %3
   ret i64 %4
 }
+
+define i32 @addmul6(i32 %a, i32 %b) {
+; RV32I-LABEL: addmul6:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a2, zero, 6
+; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: addmul6:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    addi a2, zero, 6
+; RV32IB-NEXT:    mul a0, a0, a2
+; RV32IB-NEXT:    add a0, a0, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBA-LABEL: addmul6:
+; RV32IBA:       # %bb.0:
+; RV32IBA-NEXT:    addi a2, zero, 6
+; RV32IBA-NEXT:    mul a0, a0, a2
+; RV32IBA-NEXT:    add a0, a0, a1
+; RV32IBA-NEXT:    ret
+  %c = mul i32 %a, 6
+  %d = add i32 %c, %b
+  ret i32 %d
+}
+
+define i32 @addmul10(i32 %a, i32 %b) {
+; RV32I-LABEL: addmul10:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a2, zero, 10
+; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: addmul10:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    addi a2, zero, 10
+; RV32IB-NEXT:    mul a0, a0, a2
+; RV32IB-NEXT:    add a0, a0, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBA-LABEL: addmul10:
+; RV32IBA:       # %bb.0:
+; RV32IBA-NEXT:    addi a2, zero, 10
+; RV32IBA-NEXT:    mul a0, a0, a2
+; RV32IBA-NEXT:    add a0, a0, a1
+; RV32IBA-NEXT:    ret
+  %c = mul i32 %a, 10
+  %d = add i32 %c, %b
+  ret i32 %d
+}
+
+define i32 @addmul12(i32 %a, i32 %b) {
+; RV32I-LABEL: addmul12:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a2, zero, 12
+; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: addmul12:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    addi a2, zero, 12
+; RV32IB-NEXT:    mul a0, a0, a2
+; RV32IB-NEXT:    add a0, a0, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBA-LABEL: addmul12:
+; RV32IBA:       # %bb.0:
+; RV32IBA-NEXT:    addi a2, zero, 12
+; RV32IBA-NEXT:    mul a0, a0, a2
+; RV32IBA-NEXT:    add a0, a0, a1
+; RV32IBA-NEXT:    ret
+  %c = mul i32 %a, 12
+  %d = add i32 %c, %b
+  ret i32 %d
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 1f25ef3cef9bb..0b008955277da 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64I
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-b -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-b -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64IB
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zba -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-zba -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64IBA
 
 define i64 @slliuw(i64 %a) nounwind {
@@ -370,3 +370,159 @@ define i64 @sh3adduw_2(i64 %0, i64 %1) {
   %5 = add i64 %4, %1
   ret i64 %5
 }
+
+define i64 @addmul6(i64 %a, i64 %b) {
+; RV64I-LABEL: addmul6:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 6
+; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: addmul6:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    addi a2, zero, 6
+; RV64IB-NEXT:    mul a0, a0, a2
+; RV64IB-NEXT:    add a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBA-LABEL: addmul6:
+; RV64IBA:       # %bb.0:
+; RV64IBA-NEXT:    addi a2, zero, 6
+; RV64IBA-NEXT:    mul a0, a0, a2
+; RV64IBA-NEXT:    add a0, a0, a1
+; RV64IBA-NEXT:    ret
+  %c = mul i64 %a, 6
+  %d = add i64 %c, %b
+  ret i64 %d
+}
+
+define i64 @addmul10(i64 %a, i64 %b) {
+; RV64I-LABEL: addmul10:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 10
+; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: addmul10:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    addi a2, zero, 10
+; RV64IB-NEXT:    mul a0, a0, a2
+; RV64IB-NEXT:    add a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBA-LABEL: addmul10:
+; RV64IBA:       # %bb.0:
+; RV64IBA-NEXT:    addi a2, zero, 10
+; RV64IBA-NEXT:    mul a0, a0, a2
+; RV64IBA-NEXT:    add a0, a0, a1
+; RV64IBA-NEXT:    ret
+  %c = mul i64 %a, 10
+  %d = add i64 %c, %b
+  ret i64 %d
+}
+
+define i64 @addmul12(i64 %a, i64 %b) {
+; RV64I-LABEL: addmul12:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 12
+; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: addmul12:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    addi a2, zero, 12
+; RV64IB-NEXT:    mul a0, a0, a2
+; RV64IB-NEXT:    add a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBA-LABEL: addmul12:
+; RV64IBA:       # %bb.0:
+; RV64IBA-NEXT:    addi a2, zero, 12
+; RV64IBA-NEXT:    mul a0, a0, a2
+; RV64IBA-NEXT:    add a0, a0, a1
+; RV64IBA-NEXT:    ret
+  %c = mul i64 %a, 12
+  %d = add i64 %c, %b
+  ret i64 %d
+}
+
+define i32 @addmulw6(i32 signext %a, i32 signext %b) {
+; RV64I-LABEL: addmulw6:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 6
+; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: addmulw6:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    addi a2, zero, 6
+; RV64IB-NEXT:    mul a0, a0, a2
+; RV64IB-NEXT:    addw a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBA-LABEL: addmulw6:
+; RV64IBA:       # %bb.0:
+; RV64IBA-NEXT:    addi a2, zero, 6
+; RV64IBA-NEXT:    mul a0, a0, a2
+; RV64IBA-NEXT:    addw a0, a0, a1
+; RV64IBA-NEXT:    ret
+  %c = mul i32 %a, 6
+  %d = add i32 %c, %b
+  ret i32 %d
+}
+
+define i32 @addmulw10(i32 signext %a, i32 signext %b) {
+; RV64I-LABEL: addmulw10:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 10
+; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: addmulw10:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    addi a2, zero, 10
+; RV64IB-NEXT:    mul a0, a0, a2
+; RV64IB-NEXT:    addw a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBA-LABEL: addmulw10:
+; RV64IBA:       # %bb.0:
+; RV64IBA-NEXT:    addi a2, zero, 10
+; RV64IBA-NEXT:    mul a0, a0, a2
+; RV64IBA-NEXT:    addw a0, a0, a1
+; RV64IBA-NEXT:    ret
+  %c = mul i32 %a, 10
+  %d = add i32 %c, %b
+  ret i32 %d
+}
+
+define i32 @addmulw12(i32 signext %a, i32 signext %b) {
+; RV64I-LABEL: addmulw12:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 12
+; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: addmulw12:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    addi a2, zero, 12
+; RV64IB-NEXT:    mul a0, a0, a2
+; RV64IB-NEXT:    addw a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBA-LABEL: addmulw12:
+; RV64IBA:       # %bb.0:
+; RV64IBA-NEXT:    addi a2, zero, 12
+; RV64IBA-NEXT:    mul a0, a0, a2
+; RV64IBA-NEXT:    addw a0, a0, a1
+; RV64IBA-NEXT:    ret
+  %c = mul i32 %a, 12
+  %d = add i32 %c, %b
+  ret i32 %d
+}


        


More information about the llvm-commits mailing list