[llvm] 6644a61 - [RISCV] Add tests for suboptimal handling of negative constants on the LHS of i32 shifts/rotates/subtracts on RV64. NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Jul 11 11:55:07 PDT 2021
Author: Craig Topper
Date: 2021-07-11T11:54:34-07:00
New Revision: 6644a611213c7457574bc24b7098ae04753c49ae
URL: https://github.com/llvm/llvm-project/commit/6644a611213c7457574bc24b7098ae04753c49ae
DIFF: https://github.com/llvm/llvm-project/commit/6644a611213c7457574bc24b7098ae04753c49ae.diff
LOG: [RISCV] Add tests for suboptimal handling of negative constants on the LHS of i32 shifts/rotates/subtracts on RV64. NFC
The constants end up getting zero extended to i64, but sign extend
would be better for constant materialization. We're using W
instructions so either behavior is correct since the upper bits
aren't read.
Added:
Modified:
llvm/test/CodeGen/RISCV/alu32.ll
llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/alu32.ll b/llvm/test/CodeGen/RISCV/alu32.ll
index 90a00c21feff..0779435f2bb3 100644
--- a/llvm/test/CodeGen/RISCV/alu32.ll
+++ b/llvm/test/CodeGen/RISCV/alu32.ll
@@ -189,6 +189,24 @@ define i32 @sub(i32 %a, i32 %b) nounwind {
ret i32 %1
}
+define i32 @sub_negative_constant_lhs(i32 %a) nounwind {
+; RV32I-LABEL: sub_negative_constant_lhs:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a1, zero, -2
+; RV32I-NEXT: sub a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: sub_negative_constant_lhs:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a1, zero, 1
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: addi a1, a1, -2
+; RV64I-NEXT: subw a0, a1, a0
+; RV64I-NEXT: ret
+ %1 = sub i32 -2, %a
+ ret i32 %1
+}
+
define i32 @sll(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: sll:
; RV32I: # %bb.0:
@@ -203,6 +221,23 @@ define i32 @sll(i32 %a, i32 %b) nounwind {
ret i32 %1
}
+define i32 @sll_negative_constant_lhs(i32 %a) nounwind {
+; RV32I-LABEL: sll_negative_constant_lhs:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a1, zero, -1
+; RV32I-NEXT: sll a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: sll_negative_constant_lhs:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a1, zero, -1
+; RV64I-NEXT: srli a1, a1, 32
+; RV64I-NEXT: sllw a0, a1, a0
+; RV64I-NEXT: ret
+ %1 = shl i32 -1, %a
+ ret i32 %1
+}
+
define i32 @slt(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: slt:
; RV32I: # %bb.0:
@@ -265,6 +300,23 @@ define i32 @srl(i32 %a, i32 %b) nounwind {
ret i32 %1
}
+define i32 @srl_negative_constant_lhs(i32 %a) nounwind {
+; RV32I-LABEL: srl_negative_constant_lhs:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a1, zero, -1
+; RV32I-NEXT: srl a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: srl_negative_constant_lhs:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a1, zero, -1
+; RV64I-NEXT: srli a1, a1, 32
+; RV64I-NEXT: srlw a0, a1, a0
+; RV64I-NEXT: ret
+ %1 = lshr i32 -1, %a
+ ret i32 %1
+}
+
define i32 @sra(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: sra:
; RV32I: # %bb.0:
@@ -279,6 +331,23 @@ define i32 @sra(i32 %a, i32 %b) nounwind {
ret i32 %1
}
+define i32 @sra_negative_constant_lhs(i32 %a) nounwind {
+; RV32I-LABEL: sra_negative_constant_lhs:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a1, 524288
+; RV32I-NEXT: sra a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: sra_negative_constant_lhs:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a1, zero, 1
+; RV64I-NEXT: slli a1, a1, 31
+; RV64I-NEXT: sraw a0, a1, a0
+; RV64I-NEXT: ret
+ %1 = ashr i32 2147483648, %a
+ ret i32 %1
+}
+
define i32 @or(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: or:
; RV32I: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
index 804c5465b14d..cce4b50e8a32 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
@@ -226,6 +226,45 @@ define void @rol_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
ret void
}
+define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind {
+; RV64I-LABEL: rol_i32_neg_constant_rhs:
+; RV64I: # %bb.0:
+; RV64I-NEXT: neg a1, a0
+; RV64I-NEXT: addi a2, zero, 1
+; RV64I-NEXT: slli a2, a2, 32
+; RV64I-NEXT: addi a2, a2, -2
+; RV64I-NEXT: srlw a1, a2, a1
+; RV64I-NEXT: sllw a0, a2, a0
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: rol_i32_neg_constant_rhs:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: addi a1, zero, 1
+; RV64IB-NEXT: slli a1, a1, 32
+; RV64IB-NEXT: addi a1, a1, -2
+; RV64IB-NEXT: rolw a0, a1, a0
+; RV64IB-NEXT: ret
+;
+; RV64IBB-LABEL: rol_i32_neg_constant_rhs:
+; RV64IBB: # %bb.0:
+; RV64IBB-NEXT: addi a1, zero, 1
+; RV64IBB-NEXT: slli a1, a1, 32
+; RV64IBB-NEXT: addi a1, a1, -2
+; RV64IBB-NEXT: rolw a0, a1, a0
+; RV64IBB-NEXT: ret
+;
+; RV64IBP-LABEL: rol_i32_neg_constant_rhs:
+; RV64IBP: # %bb.0:
+; RV64IBP-NEXT: addi a1, zero, 1
+; RV64IBP-NEXT: slli a1, a1, 32
+; RV64IBP-NEXT: addi a1, a1, -2
+; RV64IBP-NEXT: rolw a0, a1, a0
+; RV64IBP-NEXT: ret
+ %1 = tail call i32 @llvm.fshl.i32(i32 -2, i32 -2, i32 %a)
+ ret i32 %1
+}
+
declare i64 @llvm.fshl.i64(i64, i64, i64)
define i64 @rol_i64(i64 %a, i64 %b) nounwind {
@@ -317,6 +356,45 @@ define void @ror_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
ret void
}
+define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind {
+; RV64I-LABEL: ror_i32_neg_constant_rhs:
+; RV64I: # %bb.0:
+; RV64I-NEXT: neg a1, a0
+; RV64I-NEXT: addi a2, zero, 1
+; RV64I-NEXT: slli a2, a2, 32
+; RV64I-NEXT: addi a2, a2, -2
+; RV64I-NEXT: sllw a1, a2, a1
+; RV64I-NEXT: srlw a0, a2, a0
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: ror_i32_neg_constant_rhs:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: addi a1, zero, 1
+; RV64IB-NEXT: slli a1, a1, 32
+; RV64IB-NEXT: addi a1, a1, -2
+; RV64IB-NEXT: rorw a0, a1, a0
+; RV64IB-NEXT: ret
+;
+; RV64IBB-LABEL: ror_i32_neg_constant_rhs:
+; RV64IBB: # %bb.0:
+; RV64IBB-NEXT: addi a1, zero, 1
+; RV64IBB-NEXT: slli a1, a1, 32
+; RV64IBB-NEXT: addi a1, a1, -2
+; RV64IBB-NEXT: rorw a0, a1, a0
+; RV64IBB-NEXT: ret
+;
+; RV64IBP-LABEL: ror_i32_neg_constant_rhs:
+; RV64IBP: # %bb.0:
+; RV64IBP-NEXT: addi a1, zero, 1
+; RV64IBP-NEXT: slli a1, a1, 32
+; RV64IBP-NEXT: addi a1, a1, -2
+; RV64IBP-NEXT: rorw a0, a1, a0
+; RV64IBP-NEXT: ret
+ %1 = tail call i32 @llvm.fshr.i32(i32 -2, i32 -2, i32 %a)
+ ret i32 %1
+}
+
declare i64 @llvm.fshr.i64(i64, i64, i64)
define i64 @ror_i64(i64 %a, i64 %b) nounwind {
More information about the llvm-commits
mailing list