[llvm] 9e8ecce - [DAGCombine] Transform `shl X, cttz(Y)` to `mul (Y & -Y), X` if cttz is unsupported (#85066)
via llvm-commits
llvm-commits at lists.llvm.org
Wed May 29 03:26:58 PDT 2024
Author: Yingwei Zheng
Date: 2024-05-29T18:26:54+08:00
New Revision: 9e8ecce88ef65a2953db8071746720dd78bd1632
URL: https://github.com/llvm/llvm-project/commit/9e8ecce88ef65a2953db8071746720dd78bd1632
DIFF: https://github.com/llvm/llvm-project/commit/9e8ecce88ef65a2953db8071746720dd78bd1632.diff
LOG: [DAGCombine] Transform `shl X, cttz(Y)` to `mul (Y & -Y), X` if cttz is unsupported (#85066)
This patch fold `shl X, cttz(Y)` to `mul (Y & -Y), X` if cttz is
unsupported by the target.
Alive2: https://alive2.llvm.org/ce/z/AtLN5Y
Fixes https://github.com/llvm/llvm-project/issues/84763.
Added:
llvm/test/CodeGen/RISCV/shl-cttz.ll
Modified:
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 2f4fdf5208d07..42e861e61201c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10107,6 +10107,18 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
if (SDValue NewSHL = visitShiftByConstant(N))
return NewSHL;
+ // fold (shl X, cttz(Y)) -> (mul (Y & -Y), X) if cttz is unsupported on the
+ // target.
+ if ((N1.getOpcode() == ISD::CTTZ || N1.getOpcode() == ISD::CTTZ_ZERO_UNDEF) &&
+ N1.hasOneUse() && !TLI.isOperationLegalOrCustom(ISD::CTTZ, VT) &&
+ TLI.isOperationLegalOrCustom(ISD::MUL, VT)) {
+ SDValue Y = N1.getOperand(0);
+ SDLoc DL(N);
+ SDValue NegY = DAG.getNegative(Y, DL, VT);
+ SDValue And = DAG.getNode(ISD::AND, DL, VT, Y, NegY);
+ return DAG.getNode(ISD::MUL, DL, VT, And, N0);
+ }
+
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
diff --git a/llvm/test/CodeGen/RISCV/shl-cttz.ll b/llvm/test/CodeGen/RISCV/shl-cttz.ll
new file mode 100644
index 0000000000000..0eeb8b04c7e5d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/shl-cttz.ll
@@ -0,0 +1,807 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+m < %s \
+; RUN: | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zbb < %s \
+; RUN: | FileCheck %s -check-prefix=RV32ZBB
+; RUN: llc -mtriple=riscv64 -mattr=+m < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64I,RV64IILLEGALI32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64ZBB,RV64ZBBILLEGALI32
+; RUN: llc -mtriple=riscv64 -mattr=+m -riscv-experimental-rv64-legal-i32 < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64I,RV64ILEGALI32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -riscv-experimental-rv64-legal-i32 < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64ZBB,RV64ZBBLEGALI32
+
+define i8 @shl_cttz_i8(i8 %x, i8 %y) {
+; RV32I-LABEL: shl_cttz_i8:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi a2, a1, -1
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: srli a2, a1, 1
+; RV32I-NEXT: andi a2, a2, 85
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: andi a2, a1, 51
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: andi a1, a1, 51
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: srli a2, a1, 4
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: andi a1, a1, 15
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_i8:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a1, a1
+; RV32ZBB-NEXT: sll a0, a0, a1
+; RV32ZBB-NEXT: ret
+;
+; RV64IILLEGALI32-LABEL: shl_cttz_i8:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: addi a2, a1, -1
+; RV64IILLEGALI32-NEXT: not a1, a1
+; RV64IILLEGALI32-NEXT: and a1, a1, a2
+; RV64IILLEGALI32-NEXT: srli a2, a1, 1
+; RV64IILLEGALI32-NEXT: andi a2, a2, 85
+; RV64IILLEGALI32-NEXT: subw a1, a1, a2
+; RV64IILLEGALI32-NEXT: andi a2, a1, 51
+; RV64IILLEGALI32-NEXT: srli a1, a1, 2
+; RV64IILLEGALI32-NEXT: andi a1, a1, 51
+; RV64IILLEGALI32-NEXT: add a1, a2, a1
+; RV64IILLEGALI32-NEXT: srli a2, a1, 4
+; RV64IILLEGALI32-NEXT: add a1, a1, a2
+; RV64IILLEGALI32-NEXT: andi a1, a1, 15
+; RV64IILLEGALI32-NEXT: sll a0, a0, a1
+; RV64IILLEGALI32-NEXT: ret
+;
+; RV64ZBBILLEGALI32-LABEL: shl_cttz_i8:
+; RV64ZBBILLEGALI32: # %bb.0: # %entry
+; RV64ZBBILLEGALI32-NEXT: ctz a1, a1
+; RV64ZBBILLEGALI32-NEXT: sll a0, a0, a1
+; RV64ZBBILLEGALI32-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_i8:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: addi a2, a1, -1
+; RV64ILEGALI32-NEXT: not a1, a1
+; RV64ILEGALI32-NEXT: and a1, a1, a2
+; RV64ILEGALI32-NEXT: srliw a2, a1, 1
+; RV64ILEGALI32-NEXT: andi a2, a2, 85
+; RV64ILEGALI32-NEXT: subw a1, a1, a2
+; RV64ILEGALI32-NEXT: andi a2, a1, 51
+; RV64ILEGALI32-NEXT: srliw a1, a1, 2
+; RV64ILEGALI32-NEXT: andi a1, a1, 51
+; RV64ILEGALI32-NEXT: add a1, a2, a1
+; RV64ILEGALI32-NEXT: srliw a2, a1, 4
+; RV64ILEGALI32-NEXT: add a1, a1, a2
+; RV64ILEGALI32-NEXT: andi a1, a1, 15
+; RV64ILEGALI32-NEXT: sllw a0, a0, a1
+; RV64ILEGALI32-NEXT: ret
+;
+; RV64ZBBLEGALI32-LABEL: shl_cttz_i8:
+; RV64ZBBLEGALI32: # %bb.0: # %entry
+; RV64ZBBLEGALI32-NEXT: ctzw a1, a1
+; RV64ZBBLEGALI32-NEXT: sllw a0, a0, a1
+; RV64ZBBLEGALI32-NEXT: ret
+entry:
+ %cttz = call i8 @llvm.cttz.i8(i8 %y, i1 true)
+ %res = shl i8 %x, %cttz
+ ret i8 %res
+}
+
+define i8 @shl_cttz_constant_i8(i8 %y) {
+; RV32I-LABEL: shl_cttz_constant_i8:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: andi a1, a1, 85
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: andi a1, a0, 51
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: andi a0, a0, 51
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: srli a1, a0, 4
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: andi a0, a0, 15
+; RV32I-NEXT: li a1, 4
+; RV32I-NEXT: sll a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_constant_i8:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a0, a0
+; RV32ZBB-NEXT: li a1, 4
+; RV32ZBB-NEXT: sll a0, a1, a0
+; RV32ZBB-NEXT: ret
+;
+; RV64IILLEGALI32-LABEL: shl_cttz_constant_i8:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: addi a1, a0, -1
+; RV64IILLEGALI32-NEXT: not a0, a0
+; RV64IILLEGALI32-NEXT: and a0, a0, a1
+; RV64IILLEGALI32-NEXT: srli a1, a0, 1
+; RV64IILLEGALI32-NEXT: andi a1, a1, 85
+; RV64IILLEGALI32-NEXT: subw a0, a0, a1
+; RV64IILLEGALI32-NEXT: andi a1, a0, 51
+; RV64IILLEGALI32-NEXT: srli a0, a0, 2
+; RV64IILLEGALI32-NEXT: andi a0, a0, 51
+; RV64IILLEGALI32-NEXT: add a0, a1, a0
+; RV64IILLEGALI32-NEXT: srli a1, a0, 4
+; RV64IILLEGALI32-NEXT: add a0, a0, a1
+; RV64IILLEGALI32-NEXT: andi a0, a0, 15
+; RV64IILLEGALI32-NEXT: li a1, 4
+; RV64IILLEGALI32-NEXT: sll a0, a1, a0
+; RV64IILLEGALI32-NEXT: ret
+;
+; RV64ZBBILLEGALI32-LABEL: shl_cttz_constant_i8:
+; RV64ZBBILLEGALI32: # %bb.0: # %entry
+; RV64ZBBILLEGALI32-NEXT: ctz a0, a0
+; RV64ZBBILLEGALI32-NEXT: li a1, 4
+; RV64ZBBILLEGALI32-NEXT: sll a0, a1, a0
+; RV64ZBBILLEGALI32-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_constant_i8:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: addi a1, a0, -1
+; RV64ILEGALI32-NEXT: not a0, a0
+; RV64ILEGALI32-NEXT: and a0, a0, a1
+; RV64ILEGALI32-NEXT: srliw a1, a0, 1
+; RV64ILEGALI32-NEXT: andi a1, a1, 85
+; RV64ILEGALI32-NEXT: subw a0, a0, a1
+; RV64ILEGALI32-NEXT: andi a1, a0, 51
+; RV64ILEGALI32-NEXT: srliw a0, a0, 2
+; RV64ILEGALI32-NEXT: andi a0, a0, 51
+; RV64ILEGALI32-NEXT: add a0, a1, a0
+; RV64ILEGALI32-NEXT: srliw a1, a0, 4
+; RV64ILEGALI32-NEXT: add a0, a0, a1
+; RV64ILEGALI32-NEXT: andi a0, a0, 15
+; RV64ILEGALI32-NEXT: li a1, 4
+; RV64ILEGALI32-NEXT: sllw a0, a1, a0
+; RV64ILEGALI32-NEXT: ret
+;
+; RV64ZBBLEGALI32-LABEL: shl_cttz_constant_i8:
+; RV64ZBBLEGALI32: # %bb.0: # %entry
+; RV64ZBBLEGALI32-NEXT: ctzw a0, a0
+; RV64ZBBLEGALI32-NEXT: li a1, 4
+; RV64ZBBLEGALI32-NEXT: sllw a0, a1, a0
+; RV64ZBBLEGALI32-NEXT: ret
+entry:
+ %cttz = call i8 @llvm.cttz.i8(i8 %y, i1 true)
+ %res = shl i8 4, %cttz
+ ret i8 %res
+}
+
+define i16 @shl_cttz_i16(i16 %x, i16 %y) {
+; RV32I-LABEL: shl_cttz_i16:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi a2, a1, -1
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: srli a2, a1, 1
+; RV32I-NEXT: lui a3, 5
+; RV32I-NEXT: addi a3, a3, 1365
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: lui a2, 3
+; RV32I-NEXT: addi a2, a2, 819
+; RV32I-NEXT: and a3, a1, a2
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: add a1, a3, a1
+; RV32I-NEXT: srli a2, a1, 4
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: andi a2, a1, 15
+; RV32I-NEXT: slli a1, a1, 20
+; RV32I-NEXT: srli a1, a1, 28
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_i16:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a1, a1
+; RV32ZBB-NEXT: sll a0, a0, a1
+; RV32ZBB-NEXT: ret
+;
+; RV64IILLEGALI32-LABEL: shl_cttz_i16:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: addi a2, a1, -1
+; RV64IILLEGALI32-NEXT: not a1, a1
+; RV64IILLEGALI32-NEXT: and a1, a1, a2
+; RV64IILLEGALI32-NEXT: srli a2, a1, 1
+; RV64IILLEGALI32-NEXT: lui a3, 5
+; RV64IILLEGALI32-NEXT: addiw a3, a3, 1365
+; RV64IILLEGALI32-NEXT: and a2, a2, a3
+; RV64IILLEGALI32-NEXT: sub a1, a1, a2
+; RV64IILLEGALI32-NEXT: lui a2, 3
+; RV64IILLEGALI32-NEXT: addiw a2, a2, 819
+; RV64IILLEGALI32-NEXT: and a3, a1, a2
+; RV64IILLEGALI32-NEXT: srli a1, a1, 2
+; RV64IILLEGALI32-NEXT: and a1, a1, a2
+; RV64IILLEGALI32-NEXT: add a1, a3, a1
+; RV64IILLEGALI32-NEXT: srli a2, a1, 4
+; RV64IILLEGALI32-NEXT: add a1, a1, a2
+; RV64IILLEGALI32-NEXT: andi a2, a1, 15
+; RV64IILLEGALI32-NEXT: slli a1, a1, 52
+; RV64IILLEGALI32-NEXT: srli a1, a1, 60
+; RV64IILLEGALI32-NEXT: add a1, a2, a1
+; RV64IILLEGALI32-NEXT: sll a0, a0, a1
+; RV64IILLEGALI32-NEXT: ret
+;
+; RV64ZBBILLEGALI32-LABEL: shl_cttz_i16:
+; RV64ZBBILLEGALI32: # %bb.0: # %entry
+; RV64ZBBILLEGALI32-NEXT: ctz a1, a1
+; RV64ZBBILLEGALI32-NEXT: sll a0, a0, a1
+; RV64ZBBILLEGALI32-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_i16:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: addi a2, a1, -1
+; RV64ILEGALI32-NEXT: not a1, a1
+; RV64ILEGALI32-NEXT: and a1, a1, a2
+; RV64ILEGALI32-NEXT: srliw a2, a1, 1
+; RV64ILEGALI32-NEXT: lui a3, 5
+; RV64ILEGALI32-NEXT: addi a3, a3, 1365
+; RV64ILEGALI32-NEXT: and a2, a2, a3
+; RV64ILEGALI32-NEXT: subw a1, a1, a2
+; RV64ILEGALI32-NEXT: lui a2, 3
+; RV64ILEGALI32-NEXT: addi a2, a2, 819
+; RV64ILEGALI32-NEXT: and a3, a1, a2
+; RV64ILEGALI32-NEXT: srliw a1, a1, 2
+; RV64ILEGALI32-NEXT: and a1, a1, a2
+; RV64ILEGALI32-NEXT: add a1, a3, a1
+; RV64ILEGALI32-NEXT: srliw a2, a1, 4
+; RV64ILEGALI32-NEXT: add a1, a1, a2
+; RV64ILEGALI32-NEXT: andi a2, a1, 15
+; RV64ILEGALI32-NEXT: slli a1, a1, 52
+; RV64ILEGALI32-NEXT: srli a1, a1, 60
+; RV64ILEGALI32-NEXT: add a1, a2, a1
+; RV64ILEGALI32-NEXT: sllw a0, a0, a1
+; RV64ILEGALI32-NEXT: ret
+;
+; RV64ZBBLEGALI32-LABEL: shl_cttz_i16:
+; RV64ZBBLEGALI32: # %bb.0: # %entry
+; RV64ZBBLEGALI32-NEXT: ctzw a1, a1
+; RV64ZBBLEGALI32-NEXT: sllw a0, a0, a1
+; RV64ZBBLEGALI32-NEXT: ret
+entry:
+ %cttz = call i16 @llvm.cttz.i16(i16 %y, i1 true)
+ %res = shl i16 %x, %cttz
+ ret i16 %res
+}
+
+define i16 @shl_cttz_constant_i16(i16 %y) {
+; RV32I-LABEL: shl_cttz_constant_i16:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: lui a2, 5
+; RV32I-NEXT: addi a2, a2, 1365
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: lui a1, 3
+; RV32I-NEXT: addi a1, a1, 819
+; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: srli a1, a0, 4
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: andi a1, a0, 15
+; RV32I-NEXT: slli a0, a0, 20
+; RV32I-NEXT: srli a0, a0, 28
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: li a1, 4
+; RV32I-NEXT: sll a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_constant_i16:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a0, a0
+; RV32ZBB-NEXT: li a1, 4
+; RV32ZBB-NEXT: sll a0, a1, a0
+; RV32ZBB-NEXT: ret
+;
+; RV64IILLEGALI32-LABEL: shl_cttz_constant_i16:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: addi a1, a0, -1
+; RV64IILLEGALI32-NEXT: not a0, a0
+; RV64IILLEGALI32-NEXT: and a0, a0, a1
+; RV64IILLEGALI32-NEXT: srli a1, a0, 1
+; RV64IILLEGALI32-NEXT: lui a2, 5
+; RV64IILLEGALI32-NEXT: addiw a2, a2, 1365
+; RV64IILLEGALI32-NEXT: and a1, a1, a2
+; RV64IILLEGALI32-NEXT: sub a0, a0, a1
+; RV64IILLEGALI32-NEXT: lui a1, 3
+; RV64IILLEGALI32-NEXT: addiw a1, a1, 819
+; RV64IILLEGALI32-NEXT: and a2, a0, a1
+; RV64IILLEGALI32-NEXT: srli a0, a0, 2
+; RV64IILLEGALI32-NEXT: and a0, a0, a1
+; RV64IILLEGALI32-NEXT: add a0, a2, a0
+; RV64IILLEGALI32-NEXT: srli a1, a0, 4
+; RV64IILLEGALI32-NEXT: add a0, a0, a1
+; RV64IILLEGALI32-NEXT: andi a1, a0, 15
+; RV64IILLEGALI32-NEXT: slli a0, a0, 52
+; RV64IILLEGALI32-NEXT: srli a0, a0, 60
+; RV64IILLEGALI32-NEXT: add a0, a1, a0
+; RV64IILLEGALI32-NEXT: li a1, 4
+; RV64IILLEGALI32-NEXT: sll a0, a1, a0
+; RV64IILLEGALI32-NEXT: ret
+;
+; RV64ZBBILLEGALI32-LABEL: shl_cttz_constant_i16:
+; RV64ZBBILLEGALI32: # %bb.0: # %entry
+; RV64ZBBILLEGALI32-NEXT: ctz a0, a0
+; RV64ZBBILLEGALI32-NEXT: li a1, 4
+; RV64ZBBILLEGALI32-NEXT: sll a0, a1, a0
+; RV64ZBBILLEGALI32-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_constant_i16:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: addi a1, a0, -1
+; RV64ILEGALI32-NEXT: not a0, a0
+; RV64ILEGALI32-NEXT: and a0, a0, a1
+; RV64ILEGALI32-NEXT: srliw a1, a0, 1
+; RV64ILEGALI32-NEXT: lui a2, 5
+; RV64ILEGALI32-NEXT: addi a2, a2, 1365
+; RV64ILEGALI32-NEXT: and a1, a1, a2
+; RV64ILEGALI32-NEXT: subw a0, a0, a1
+; RV64ILEGALI32-NEXT: lui a1, 3
+; RV64ILEGALI32-NEXT: addi a1, a1, 819
+; RV64ILEGALI32-NEXT: and a2, a0, a1
+; RV64ILEGALI32-NEXT: srliw a0, a0, 2
+; RV64ILEGALI32-NEXT: and a0, a0, a1
+; RV64ILEGALI32-NEXT: add a0, a2, a0
+; RV64ILEGALI32-NEXT: srliw a1, a0, 4
+; RV64ILEGALI32-NEXT: add a0, a0, a1
+; RV64ILEGALI32-NEXT: andi a1, a0, 15
+; RV64ILEGALI32-NEXT: slli a0, a0, 52
+; RV64ILEGALI32-NEXT: srli a0, a0, 60
+; RV64ILEGALI32-NEXT: add a0, a1, a0
+; RV64ILEGALI32-NEXT: li a1, 4
+; RV64ILEGALI32-NEXT: sllw a0, a1, a0
+; RV64ILEGALI32-NEXT: ret
+;
+; RV64ZBBLEGALI32-LABEL: shl_cttz_constant_i16:
+; RV64ZBBLEGALI32: # %bb.0: # %entry
+; RV64ZBBLEGALI32-NEXT: ctzw a0, a0
+; RV64ZBBLEGALI32-NEXT: li a1, 4
+; RV64ZBBLEGALI32-NEXT: sllw a0, a1, a0
+; RV64ZBBLEGALI32-NEXT: ret
+entry:
+ %cttz = call i16 @llvm.cttz.i16(i16 %y, i1 true)
+ %res = shl i16 4, %cttz
+ ret i16 %res
+}
+
+define i32 @shl_cttz_i32(i32 %x, i32 %y) {
+; RV32I-LABEL: shl_cttz_i32:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: neg a2, a1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: mul a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_i32:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a1, a1
+; RV32ZBB-NEXT: sll a0, a0, a1
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_i32:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: negw a2, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: lui a2, 30667
+; RV64I-NEXT: addi a2, a2, 1329
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 27
+; RV64I-NEXT: lui a2, %hi(.LCPI4_0)
+; RV64I-NEXT: addi a2, a2, %lo(.LCPI4_0)
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: lbu a1, 0(a1)
+; RV64I-NEXT: sllw a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_i32:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctzw a1, a1
+; RV64ZBB-NEXT: sllw a0, a0, a1
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+ %res = shl i32 %x, %cttz
+ ret i32 %res
+}
+
+define i32 @shl_cttz_i32_zero_is_defined(i32 %x, i32 %y) {
+; RV32I-LABEL: shl_cttz_i32_zero_is_defined:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: beqz a1, .LBB5_2
+; RV32I-NEXT: # %bb.1: # %cond.false
+; RV32I-NEXT: neg a2, a1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: lui a2, 30667
+; RV32I-NEXT: addi a2, a2, 1329
+; RV32I-NEXT: mul a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 27
+; RV32I-NEXT: lui a2, %hi(.LCPI5_0)
+; RV32I-NEXT: addi a2, a2, %lo(.LCPI5_0)
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB5_2:
+; RV32I-NEXT: li a1, 32
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_i32_zero_is_defined:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a1, a1
+; RV32ZBB-NEXT: sll a0, a0, a1
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_i32_zero_is_defined:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: sext.w a2, a1
+; RV64I-NEXT: beqz a2, .LBB5_2
+; RV64I-NEXT: # %bb.1: # %cond.false
+; RV64I-NEXT: negw a2, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: lui a2, 30667
+; RV64I-NEXT: addi a2, a2, 1329
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 27
+; RV64I-NEXT: lui a2, %hi(.LCPI5_0)
+; RV64I-NEXT: addi a2, a2, %lo(.LCPI5_0)
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: lbu a1, 0(a1)
+; RV64I-NEXT: sllw a0, a0, a1
+; RV64I-NEXT: ret
+; RV64I-NEXT: .LBB5_2:
+; RV64I-NEXT: li a1, 32
+; RV64I-NEXT: sllw a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_i32_zero_is_defined:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctzw a1, a1
+; RV64ZBB-NEXT: sllw a0, a0, a1
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 false)
+ %res = shl i32 %x, %cttz
+ ret i32 %res
+}
+
+define i32 @shl_cttz_constant_i32(i32 %y) {
+; RV32I-LABEL: shl_cttz_constant_i32:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: neg a1, a0
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: slli a0, a0, 2
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_constant_i32:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a0, a0
+; RV32ZBB-NEXT: li a1, 4
+; RV32ZBB-NEXT: sll a0, a1, a0
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_constant_i32:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: negw a1, a0
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: lui a1, 30667
+; RV64I-NEXT: addi a1, a1, 1329
+; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: srliw a0, a0, 27
+; RV64I-NEXT: lui a1, %hi(.LCPI6_0)
+; RV64I-NEXT: addi a1, a1, %lo(.LCPI6_0)
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lbu a0, 0(a0)
+; RV64I-NEXT: li a1, 4
+; RV64I-NEXT: sllw a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_constant_i32:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctzw a0, a0
+; RV64ZBB-NEXT: li a1, 4
+; RV64ZBB-NEXT: sllw a0, a1, a0
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+ %res = shl i32 4, %cttz
+ ret i32 %res
+}
+
+define i32 @shl_cttz_multiuse_i32(i32 %x, i32 %y) {
+; RV32I-LABEL: shl_cttz_multiuse_i32:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: neg a2, a1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: lui a2, 30667
+; RV32I-NEXT: addi a2, a2, 1329
+; RV32I-NEXT: mul a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 27
+; RV32I-NEXT: lui a2, %hi(.LCPI7_0)
+; RV32I-NEXT: addi a2, a2, %lo(.LCPI7_0)
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: lbu s0, 0(a1)
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: call use32
+; RV32I-NEXT: sll a0, s1, s0
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_multiuse_i32:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: addi sp, sp, -16
+; RV32ZBB-NEXT: .cfi_def_cfa_offset 16
+; RV32ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT: .cfi_offset ra, -4
+; RV32ZBB-NEXT: .cfi_offset s0, -8
+; RV32ZBB-NEXT: .cfi_offset s1, -12
+; RV32ZBB-NEXT: mv s0, a0
+; RV32ZBB-NEXT: ctz s1, a1
+; RV32ZBB-NEXT: mv a0, s1
+; RV32ZBB-NEXT: call use32
+; RV32ZBB-NEXT: sll a0, s0, s1
+; RV32ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT: addi sp, sp, 16
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_multiuse_i32:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi sp, sp, -32
+; RV64I-NEXT: .cfi_def_cfa_offset 32
+; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: negw a2, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: lui a2, 30667
+; RV64I-NEXT: addi a2, a2, 1329
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 27
+; RV64I-NEXT: lui a2, %hi(.LCPI7_0)
+; RV64I-NEXT: addi a2, a2, %lo(.LCPI7_0)
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: lbu s0, 0(a1)
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: mv a0, s0
+; RV64I-NEXT: call use32
+; RV64I-NEXT: sllw a0, s1, s0
+; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 32
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_multiuse_i32:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: addi sp, sp, -32
+; RV64ZBB-NEXT: .cfi_def_cfa_offset 32
+; RV64ZBB-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT: .cfi_offset ra, -8
+; RV64ZBB-NEXT: .cfi_offset s0, -16
+; RV64ZBB-NEXT: .cfi_offset s1, -24
+; RV64ZBB-NEXT: mv s0, a0
+; RV64ZBB-NEXT: ctzw s1, a1
+; RV64ZBB-NEXT: mv a0, s1
+; RV64ZBB-NEXT: call use32
+; RV64ZBB-NEXT: sllw a0, s0, s1
+; RV64ZBB-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT: addi sp, sp, 32
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+ call void @use32(i32 %cttz)
+ %res = shl i32 %x, %cttz
+ ret i32 %res
+}
+
+define i64 @shl_cttz_i64(i64 %x, i64 %y) {
+; RV32I-LABEL: shl_cttz_i64:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: lui a4, 30667
+; RV32I-NEXT: addi a5, a4, 1329
+; RV32I-NEXT: lui a4, %hi(.LCPI8_0)
+; RV32I-NEXT: addi a4, a4, %lo(.LCPI8_0)
+; RV32I-NEXT: bnez a2, .LBB8_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: neg a2, a3
+; RV32I-NEXT: and a2, a3, a2
+; RV32I-NEXT: mul a2, a2, a5
+; RV32I-NEXT: srli a2, a2, 27
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: lbu a2, 0(a2)
+; RV32I-NEXT: addi a4, a2, 32
+; RV32I-NEXT: j .LBB8_3
+; RV32I-NEXT: .LBB8_2:
+; RV32I-NEXT: neg a3, a2
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: mul a2, a2, a5
+; RV32I-NEXT: srli a2, a2, 27
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: lbu a4, 0(a2)
+; RV32I-NEXT: .LBB8_3: # %entry
+; RV32I-NEXT: addi a3, a4, -32
+; RV32I-NEXT: sll a2, a0, a4
+; RV32I-NEXT: bltz a3, .LBB8_5
+; RV32I-NEXT: # %bb.4: # %entry
+; RV32I-NEXT: mv a1, a2
+; RV32I-NEXT: j .LBB8_6
+; RV32I-NEXT: .LBB8_5:
+; RV32I-NEXT: sll a1, a1, a4
+; RV32I-NEXT: not a4, a4
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: srl a0, a0, a4
+; RV32I-NEXT: or a1, a1, a0
+; RV32I-NEXT: .LBB8_6: # %entry
+; RV32I-NEXT: srai a0, a3, 31
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_i64:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: bnez a2, .LBB8_2
+; RV32ZBB-NEXT: # %bb.1: # %entry
+; RV32ZBB-NEXT: ctz a2, a3
+; RV32ZBB-NEXT: addi a4, a2, 32
+; RV32ZBB-NEXT: j .LBB8_3
+; RV32ZBB-NEXT: .LBB8_2:
+; RV32ZBB-NEXT: ctz a4, a2
+; RV32ZBB-NEXT: .LBB8_3: # %entry
+; RV32ZBB-NEXT: addi a3, a4, -32
+; RV32ZBB-NEXT: sll a2, a0, a4
+; RV32ZBB-NEXT: bltz a3, .LBB8_5
+; RV32ZBB-NEXT: # %bb.4: # %entry
+; RV32ZBB-NEXT: mv a1, a2
+; RV32ZBB-NEXT: j .LBB8_6
+; RV32ZBB-NEXT: .LBB8_5:
+; RV32ZBB-NEXT: sll a1, a1, a4
+; RV32ZBB-NEXT: not a4, a4
+; RV32ZBB-NEXT: srli a0, a0, 1
+; RV32ZBB-NEXT: srl a0, a0, a4
+; RV32ZBB-NEXT: or a1, a1, a0
+; RV32ZBB-NEXT: .LBB8_6: # %entry
+; RV32ZBB-NEXT: srai a0, a3, 31
+; RV32ZBB-NEXT: and a0, a0, a2
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_i64:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: neg a2, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: mul a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_i64:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctz a1, a1
+; RV64ZBB-NEXT: sll a0, a0, a1
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+ %res = shl i64 %x, %cttz
+ ret i64 %res
+}
+
+define i64 @shl_cttz_constant_i64(i64 %y) {
+; RV32I-LABEL: shl_cttz_constant_i64:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: lui a2, 30667
+; RV32I-NEXT: addi a3, a2, 1329
+; RV32I-NEXT: lui a2, %hi(.LCPI9_0)
+; RV32I-NEXT: addi a2, a2, %lo(.LCPI9_0)
+; RV32I-NEXT: bnez a0, .LBB9_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: neg a0, a1
+; RV32I-NEXT: and a0, a1, a0
+; RV32I-NEXT: mul a0, a0, a3
+; RV32I-NEXT: srli a0, a0, 27
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: lbu a0, 0(a0)
+; RV32I-NEXT: addi a1, a0, 32
+; RV32I-NEXT: j .LBB9_3
+; RV32I-NEXT: .LBB9_2:
+; RV32I-NEXT: neg a1, a0
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: mul a0, a0, a3
+; RV32I-NEXT: srli a0, a0, 27
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: lbu a1, 0(a0)
+; RV32I-NEXT: .LBB9_3: # %entry
+; RV32I-NEXT: li a0, 4
+; RV32I-NEXT: addi a2, a1, -32
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: bltz a2, .LBB9_5
+; RV32I-NEXT: # %bb.4: # %entry
+; RV32I-NEXT: mv a1, a0
+; RV32I-NEXT: j .LBB9_6
+; RV32I-NEXT: .LBB9_5:
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: li a3, 2
+; RV32I-NEXT: srl a1, a3, a1
+; RV32I-NEXT: .LBB9_6: # %entry
+; RV32I-NEXT: srai a2, a2, 31
+; RV32I-NEXT: and a0, a2, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_constant_i64:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: bnez a0, .LBB9_2
+; RV32ZBB-NEXT: # %bb.1: # %entry
+; RV32ZBB-NEXT: ctz a0, a1
+; RV32ZBB-NEXT: addi a1, a0, 32
+; RV32ZBB-NEXT: j .LBB9_3
+; RV32ZBB-NEXT: .LBB9_2:
+; RV32ZBB-NEXT: ctz a1, a0
+; RV32ZBB-NEXT: .LBB9_3: # %entry
+; RV32ZBB-NEXT: li a0, 4
+; RV32ZBB-NEXT: addi a2, a1, -32
+; RV32ZBB-NEXT: sll a0, a0, a1
+; RV32ZBB-NEXT: bltz a2, .LBB9_5
+; RV32ZBB-NEXT: # %bb.4: # %entry
+; RV32ZBB-NEXT: mv a1, a0
+; RV32ZBB-NEXT: j .LBB9_6
+; RV32ZBB-NEXT: .LBB9_5:
+; RV32ZBB-NEXT: not a1, a1
+; RV32ZBB-NEXT: li a3, 2
+; RV32ZBB-NEXT: srl a1, a3, a1
+; RV32ZBB-NEXT: .LBB9_6: # %entry
+; RV32ZBB-NEXT: srai a2, a2, 31
+; RV32ZBB-NEXT: and a0, a2, a0
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_constant_i64:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: neg a1, a0
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: slli a0, a0, 2
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_constant_i64:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctz a0, a0
+; RV64ZBB-NEXT: li a1, 4
+; RV64ZBB-NEXT: sll a0, a1, a0
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+ %res = shl i64 4, %cttz
+ ret i64 %res
+}
+
+declare void @use32(i32 signext)
More information about the llvm-commits
mailing list