[llvm] [CodeGenPrepare] Transform `shl X, cttz(Y)` to `mul (Y & -Y), X` if cttz is unsupported (PR #85066)
Yingwei Zheng via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 21 07:42:34 PDT 2024
https://github.com/dtcxzyw updated https://github.com/llvm/llvm-project/pull/85066
>From 0a519f4a595335f01af9525a748a696d525dda17 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Thu, 21 Mar 2024 22:32:25 +0800
Subject: [PATCH 1/2] [CodeGenPrepare] Add pre-commit tests. NFC.
---
llvm/test/CodeGen/RISCV/shl-cttz.ll | 1233 +++++++++++++++++++++++++++
1 file changed, 1233 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/shl-cttz.ll
diff --git a/llvm/test/CodeGen/RISCV/shl-cttz.ll b/llvm/test/CodeGen/RISCV/shl-cttz.ll
new file mode 100644
index 00000000000000..56aa4ee2d96899
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/shl-cttz.ll
@@ -0,0 +1,1233 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zbb -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefix=RV32ZBB
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64I,RV64IILLEGALI32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64ZBB,RV64ZBBILLEGALI32
+; RUN: llc -mtriple=riscv64 -mattr=+m -riscv-experimental-rv64-legal-i32 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64I,RV64ILEGALI32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -riscv-experimental-rv64-legal-i32 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64ZBB,RV64ZBBLEGALI32
+
+define i8 @shl_cttz_i8(i8 %x, i8 %y) {
+; RV32I-LABEL: shl_cttz_i8:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi a2, a1, -1
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: srli a2, a1, 1
+; RV32I-NEXT: andi a2, a2, 85
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: andi a2, a1, 51
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: andi a1, a1, 51
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: srli a2, a1, 4
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: andi a1, a1, 15
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_i8:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a1, a1
+; RV32ZBB-NEXT: sll a0, a0, a1
+; RV32ZBB-NEXT: ret
+;
+; RV64IILLEGALI32-LABEL: shl_cttz_i8:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: addi a2, a1, -1
+; RV64IILLEGALI32-NEXT: not a1, a1
+; RV64IILLEGALI32-NEXT: and a1, a1, a2
+; RV64IILLEGALI32-NEXT: srli a2, a1, 1
+; RV64IILLEGALI32-NEXT: andi a2, a2, 85
+; RV64IILLEGALI32-NEXT: subw a1, a1, a2
+; RV64IILLEGALI32-NEXT: andi a2, a1, 51
+; RV64IILLEGALI32-NEXT: srli a1, a1, 2
+; RV64IILLEGALI32-NEXT: andi a1, a1, 51
+; RV64IILLEGALI32-NEXT: add a1, a2, a1
+; RV64IILLEGALI32-NEXT: srli a2, a1, 4
+; RV64IILLEGALI32-NEXT: add a1, a1, a2
+; RV64IILLEGALI32-NEXT: andi a1, a1, 15
+; RV64IILLEGALI32-NEXT: sll a0, a0, a1
+; RV64IILLEGALI32-NEXT: ret
+;
+; RV64ZBBILLEGALI32-LABEL: shl_cttz_i8:
+; RV64ZBBILLEGALI32: # %bb.0: # %entry
+; RV64ZBBILLEGALI32-NEXT: ctz a1, a1
+; RV64ZBBILLEGALI32-NEXT: sll a0, a0, a1
+; RV64ZBBILLEGALI32-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_i8:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: addi a2, a1, -1
+; RV64ILEGALI32-NEXT: not a1, a1
+; RV64ILEGALI32-NEXT: and a1, a1, a2
+; RV64ILEGALI32-NEXT: srliw a2, a1, 1
+; RV64ILEGALI32-NEXT: andi a2, a2, 85
+; RV64ILEGALI32-NEXT: subw a1, a1, a2
+; RV64ILEGALI32-NEXT: andi a2, a1, 51
+; RV64ILEGALI32-NEXT: srliw a1, a1, 2
+; RV64ILEGALI32-NEXT: andi a1, a1, 51
+; RV64ILEGALI32-NEXT: add a1, a2, a1
+; RV64ILEGALI32-NEXT: srliw a2, a1, 4
+; RV64ILEGALI32-NEXT: add a1, a1, a2
+; RV64ILEGALI32-NEXT: andi a1, a1, 15
+; RV64ILEGALI32-NEXT: sllw a0, a0, a1
+; RV64ILEGALI32-NEXT: ret
+;
+; RV64ZBBLEGALI32-LABEL: shl_cttz_i8:
+; RV64ZBBLEGALI32: # %bb.0: # %entry
+; RV64ZBBLEGALI32-NEXT: ctzw a1, a1
+; RV64ZBBLEGALI32-NEXT: sllw a0, a0, a1
+; RV64ZBBLEGALI32-NEXT: ret
+entry:
+ %cttz = call i8 @llvm.cttz.i8(i8 %y, i1 true)
+ %res = shl i8 %x, %cttz
+ ret i8 %res
+}
+
+define i8 @shl_cttz_constant_i8(i8 %y) {
+; RV32I-LABEL: shl_cttz_constant_i8:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: andi a1, a1, 85
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: andi a1, a0, 51
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: andi a0, a0, 51
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: srli a1, a0, 4
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: andi a0, a0, 15
+; RV32I-NEXT: li a1, 4
+; RV32I-NEXT: sll a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_constant_i8:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a0, a0
+; RV32ZBB-NEXT: li a1, 4
+; RV32ZBB-NEXT: sll a0, a1, a0
+; RV32ZBB-NEXT: ret
+;
+; RV64IILLEGALI32-LABEL: shl_cttz_constant_i8:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: addi a1, a0, -1
+; RV64IILLEGALI32-NEXT: not a0, a0
+; RV64IILLEGALI32-NEXT: and a0, a0, a1
+; RV64IILLEGALI32-NEXT: srli a1, a0, 1
+; RV64IILLEGALI32-NEXT: andi a1, a1, 85
+; RV64IILLEGALI32-NEXT: subw a0, a0, a1
+; RV64IILLEGALI32-NEXT: andi a1, a0, 51
+; RV64IILLEGALI32-NEXT: srli a0, a0, 2
+; RV64IILLEGALI32-NEXT: andi a0, a0, 51
+; RV64IILLEGALI32-NEXT: add a0, a1, a0
+; RV64IILLEGALI32-NEXT: srli a1, a0, 4
+; RV64IILLEGALI32-NEXT: add a0, a0, a1
+; RV64IILLEGALI32-NEXT: andi a0, a0, 15
+; RV64IILLEGALI32-NEXT: li a1, 4
+; RV64IILLEGALI32-NEXT: sll a0, a1, a0
+; RV64IILLEGALI32-NEXT: ret
+;
+; RV64ZBBILLEGALI32-LABEL: shl_cttz_constant_i8:
+; RV64ZBBILLEGALI32: # %bb.0: # %entry
+; RV64ZBBILLEGALI32-NEXT: ctz a0, a0
+; RV64ZBBILLEGALI32-NEXT: li a1, 4
+; RV64ZBBILLEGALI32-NEXT: sll a0, a1, a0
+; RV64ZBBILLEGALI32-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_constant_i8:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: addi a1, a0, -1
+; RV64ILEGALI32-NEXT: not a0, a0
+; RV64ILEGALI32-NEXT: and a0, a0, a1
+; RV64ILEGALI32-NEXT: srliw a1, a0, 1
+; RV64ILEGALI32-NEXT: andi a1, a1, 85
+; RV64ILEGALI32-NEXT: subw a0, a0, a1
+; RV64ILEGALI32-NEXT: andi a1, a0, 51
+; RV64ILEGALI32-NEXT: srliw a0, a0, 2
+; RV64ILEGALI32-NEXT: andi a0, a0, 51
+; RV64ILEGALI32-NEXT: add a0, a1, a0
+; RV64ILEGALI32-NEXT: srliw a1, a0, 4
+; RV64ILEGALI32-NEXT: add a0, a0, a1
+; RV64ILEGALI32-NEXT: andi a0, a0, 15
+; RV64ILEGALI32-NEXT: li a1, 4
+; RV64ILEGALI32-NEXT: sllw a0, a1, a0
+; RV64ILEGALI32-NEXT: ret
+;
+; RV64ZBBLEGALI32-LABEL: shl_cttz_constant_i8:
+; RV64ZBBLEGALI32: # %bb.0: # %entry
+; RV64ZBBLEGALI32-NEXT: ctzw a0, a0
+; RV64ZBBLEGALI32-NEXT: li a1, 4
+; RV64ZBBLEGALI32-NEXT: sllw a0, a1, a0
+; RV64ZBBLEGALI32-NEXT: ret
+entry:
+ %cttz = call i8 @llvm.cttz.i8(i8 %y, i1 true)
+ %res = shl i8 4, %cttz
+ ret i8 %res
+}
+
+define i16 @shl_cttz_i16(i16 %x, i16 %y) {
+; RV32I-LABEL: shl_cttz_i16:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi a2, a1, -1
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: srli a2, a1, 1
+; RV32I-NEXT: lui a3, 5
+; RV32I-NEXT: addi a3, a3, 1365
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: lui a2, 3
+; RV32I-NEXT: addi a2, a2, 819
+; RV32I-NEXT: and a3, a1, a2
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: add a1, a3, a1
+; RV32I-NEXT: srli a2, a1, 4
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: andi a2, a1, 15
+; RV32I-NEXT: slli a1, a1, 20
+; RV32I-NEXT: srli a1, a1, 28
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_i16:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a1, a1
+; RV32ZBB-NEXT: sll a0, a0, a1
+; RV32ZBB-NEXT: ret
+;
+; RV64IILLEGALI32-LABEL: shl_cttz_i16:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: addi a2, a1, -1
+; RV64IILLEGALI32-NEXT: not a1, a1
+; RV64IILLEGALI32-NEXT: and a1, a1, a2
+; RV64IILLEGALI32-NEXT: srli a2, a1, 1
+; RV64IILLEGALI32-NEXT: lui a3, 5
+; RV64IILLEGALI32-NEXT: addiw a3, a3, 1365
+; RV64IILLEGALI32-NEXT: and a2, a2, a3
+; RV64IILLEGALI32-NEXT: sub a1, a1, a2
+; RV64IILLEGALI32-NEXT: lui a2, 3
+; RV64IILLEGALI32-NEXT: addiw a2, a2, 819
+; RV64IILLEGALI32-NEXT: and a3, a1, a2
+; RV64IILLEGALI32-NEXT: srli a1, a1, 2
+; RV64IILLEGALI32-NEXT: and a1, a1, a2
+; RV64IILLEGALI32-NEXT: add a1, a3, a1
+; RV64IILLEGALI32-NEXT: srli a2, a1, 4
+; RV64IILLEGALI32-NEXT: add a1, a1, a2
+; RV64IILLEGALI32-NEXT: andi a2, a1, 15
+; RV64IILLEGALI32-NEXT: slli a1, a1, 52
+; RV64IILLEGALI32-NEXT: srli a1, a1, 60
+; RV64IILLEGALI32-NEXT: add a1, a2, a1
+; RV64IILLEGALI32-NEXT: sll a0, a0, a1
+; RV64IILLEGALI32-NEXT: ret
+;
+; RV64ZBBILLEGALI32-LABEL: shl_cttz_i16:
+; RV64ZBBILLEGALI32: # %bb.0: # %entry
+; RV64ZBBILLEGALI32-NEXT: ctz a1, a1
+; RV64ZBBILLEGALI32-NEXT: sll a0, a0, a1
+; RV64ZBBILLEGALI32-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_i16:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: addi a2, a1, -1
+; RV64ILEGALI32-NEXT: not a1, a1
+; RV64ILEGALI32-NEXT: and a1, a1, a2
+; RV64ILEGALI32-NEXT: srliw a2, a1, 1
+; RV64ILEGALI32-NEXT: lui a3, 5
+; RV64ILEGALI32-NEXT: addi a3, a3, 1365
+; RV64ILEGALI32-NEXT: and a2, a2, a3
+; RV64ILEGALI32-NEXT: subw a1, a1, a2
+; RV64ILEGALI32-NEXT: lui a2, 3
+; RV64ILEGALI32-NEXT: addi a2, a2, 819
+; RV64ILEGALI32-NEXT: and a3, a1, a2
+; RV64ILEGALI32-NEXT: srliw a1, a1, 2
+; RV64ILEGALI32-NEXT: and a1, a1, a2
+; RV64ILEGALI32-NEXT: add a1, a3, a1
+; RV64ILEGALI32-NEXT: srliw a2, a1, 4
+; RV64ILEGALI32-NEXT: add a1, a1, a2
+; RV64ILEGALI32-NEXT: andi a2, a1, 15
+; RV64ILEGALI32-NEXT: slli a1, a1, 52
+; RV64ILEGALI32-NEXT: srli a1, a1, 60
+; RV64ILEGALI32-NEXT: add a1, a2, a1
+; RV64ILEGALI32-NEXT: sllw a0, a0, a1
+; RV64ILEGALI32-NEXT: ret
+;
+; RV64ZBBLEGALI32-LABEL: shl_cttz_i16:
+; RV64ZBBLEGALI32: # %bb.0: # %entry
+; RV64ZBBLEGALI32-NEXT: ctzw a1, a1
+; RV64ZBBLEGALI32-NEXT: sllw a0, a0, a1
+; RV64ZBBLEGALI32-NEXT: ret
+entry:
+ %cttz = call i16 @llvm.cttz.i16(i16 %y, i1 true)
+ %res = shl i16 %x, %cttz
+ ret i16 %res
+}
+
+define i16 @shl_cttz_constant_i16(i16 %y) {
+; RV32I-LABEL: shl_cttz_constant_i16:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: lui a2, 5
+; RV32I-NEXT: addi a2, a2, 1365
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: lui a1, 3
+; RV32I-NEXT: addi a1, a1, 819
+; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: srli a1, a0, 4
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: andi a1, a0, 15
+; RV32I-NEXT: slli a0, a0, 20
+; RV32I-NEXT: srli a0, a0, 28
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: li a1, 4
+; RV32I-NEXT: sll a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_constant_i16:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a0, a0
+; RV32ZBB-NEXT: li a1, 4
+; RV32ZBB-NEXT: sll a0, a1, a0
+; RV32ZBB-NEXT: ret
+;
+; RV64IILLEGALI32-LABEL: shl_cttz_constant_i16:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: addi a1, a0, -1
+; RV64IILLEGALI32-NEXT: not a0, a0
+; RV64IILLEGALI32-NEXT: and a0, a0, a1
+; RV64IILLEGALI32-NEXT: srli a1, a0, 1
+; RV64IILLEGALI32-NEXT: lui a2, 5
+; RV64IILLEGALI32-NEXT: addiw a2, a2, 1365
+; RV64IILLEGALI32-NEXT: and a1, a1, a2
+; RV64IILLEGALI32-NEXT: sub a0, a0, a1
+; RV64IILLEGALI32-NEXT: lui a1, 3
+; RV64IILLEGALI32-NEXT: addiw a1, a1, 819
+; RV64IILLEGALI32-NEXT: and a2, a0, a1
+; RV64IILLEGALI32-NEXT: srli a0, a0, 2
+; RV64IILLEGALI32-NEXT: and a0, a0, a1
+; RV64IILLEGALI32-NEXT: add a0, a2, a0
+; RV64IILLEGALI32-NEXT: srli a1, a0, 4
+; RV64IILLEGALI32-NEXT: add a0, a0, a1
+; RV64IILLEGALI32-NEXT: andi a1, a0, 15
+; RV64IILLEGALI32-NEXT: slli a0, a0, 52
+; RV64IILLEGALI32-NEXT: srli a0, a0, 60
+; RV64IILLEGALI32-NEXT: add a0, a1, a0
+; RV64IILLEGALI32-NEXT: li a1, 4
+; RV64IILLEGALI32-NEXT: sll a0, a1, a0
+; RV64IILLEGALI32-NEXT: ret
+;
+; RV64ZBBILLEGALI32-LABEL: shl_cttz_constant_i16:
+; RV64ZBBILLEGALI32: # %bb.0: # %entry
+; RV64ZBBILLEGALI32-NEXT: ctz a0, a0
+; RV64ZBBILLEGALI32-NEXT: li a1, 4
+; RV64ZBBILLEGALI32-NEXT: sll a0, a1, a0
+; RV64ZBBILLEGALI32-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_constant_i16:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: addi a1, a0, -1
+; RV64ILEGALI32-NEXT: not a0, a0
+; RV64ILEGALI32-NEXT: and a0, a0, a1
+; RV64ILEGALI32-NEXT: srliw a1, a0, 1
+; RV64ILEGALI32-NEXT: lui a2, 5
+; RV64ILEGALI32-NEXT: addi a2, a2, 1365
+; RV64ILEGALI32-NEXT: and a1, a1, a2
+; RV64ILEGALI32-NEXT: subw a0, a0, a1
+; RV64ILEGALI32-NEXT: lui a1, 3
+; RV64ILEGALI32-NEXT: addi a1, a1, 819
+; RV64ILEGALI32-NEXT: and a2, a0, a1
+; RV64ILEGALI32-NEXT: srliw a0, a0, 2
+; RV64ILEGALI32-NEXT: and a0, a0, a1
+; RV64ILEGALI32-NEXT: add a0, a2, a0
+; RV64ILEGALI32-NEXT: srliw a1, a0, 4
+; RV64ILEGALI32-NEXT: add a0, a0, a1
+; RV64ILEGALI32-NEXT: andi a1, a0, 15
+; RV64ILEGALI32-NEXT: slli a0, a0, 52
+; RV64ILEGALI32-NEXT: srli a0, a0, 60
+; RV64ILEGALI32-NEXT: add a0, a1, a0
+; RV64ILEGALI32-NEXT: li a1, 4
+; RV64ILEGALI32-NEXT: sllw a0, a1, a0
+; RV64ILEGALI32-NEXT: ret
+;
+; RV64ZBBLEGALI32-LABEL: shl_cttz_constant_i16:
+; RV64ZBBLEGALI32: # %bb.0: # %entry
+; RV64ZBBLEGALI32-NEXT: ctzw a0, a0
+; RV64ZBBLEGALI32-NEXT: li a1, 4
+; RV64ZBBLEGALI32-NEXT: sllw a0, a1, a0
+; RV64ZBBLEGALI32-NEXT: ret
+entry:
+ %cttz = call i16 @llvm.cttz.i16(i16 %y, i1 true)
+ %res = shl i16 4, %cttz
+ ret i16 %res
+}
+
+define i32 @shl_cttz_i32(i32 %x, i32 %y) {
+; RV32I-LABEL: shl_cttz_i32:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: neg a2, a1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: lui a2, 30667
+; RV32I-NEXT: addi a2, a2, 1329
+; RV32I-NEXT: mul a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 27
+; RV32I-NEXT: lui a2, %hi(.LCPI4_0)
+; RV32I-NEXT: addi a2, a2, %lo(.LCPI4_0)
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_i32:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a1, a1
+; RV32ZBB-NEXT: sll a0, a0, a1
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_i32:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: negw a2, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: lui a2, 30667
+; RV64I-NEXT: addi a2, a2, 1329
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 27
+; RV64I-NEXT: lui a2, %hi(.LCPI4_0)
+; RV64I-NEXT: addi a2, a2, %lo(.LCPI4_0)
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: lbu a1, 0(a1)
+; RV64I-NEXT: sllw a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_i32:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctzw a1, a1
+; RV64ZBB-NEXT: sllw a0, a0, a1
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+ %res = shl i32 %x, %cttz
+ ret i32 %res
+}
+
+define i32 @shl_cttz_constant_i32(i32 %y) {
+; RV32I-LABEL: shl_cttz_constant_i32:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: neg a1, a0
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: lui a1, 30667
+; RV32I-NEXT: addi a1, a1, 1329
+; RV32I-NEXT: mul a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 27
+; RV32I-NEXT: lui a1, %hi(.LCPI5_0)
+; RV32I-NEXT: addi a1, a1, %lo(.LCPI5_0)
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: lbu a0, 0(a0)
+; RV32I-NEXT: li a1, 4
+; RV32I-NEXT: sll a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_constant_i32:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a0, a0
+; RV32ZBB-NEXT: li a1, 4
+; RV32ZBB-NEXT: sll a0, a1, a0
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_constant_i32:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: negw a1, a0
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: lui a1, 30667
+; RV64I-NEXT: addi a1, a1, 1329
+; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: srliw a0, a0, 27
+; RV64I-NEXT: lui a1, %hi(.LCPI5_0)
+; RV64I-NEXT: addi a1, a1, %lo(.LCPI5_0)
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lbu a0, 0(a0)
+; RV64I-NEXT: li a1, 4
+; RV64I-NEXT: sllw a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_constant_i32:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctzw a0, a0
+; RV64ZBB-NEXT: li a1, 4
+; RV64ZBB-NEXT: sllw a0, a1, a0
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+ %res = shl i32 4, %cttz
+ ret i32 %res
+}
+
+define i32 @shl_cttz_nuw_i32(i32 %x, i32 %y) {
+; RV32I-LABEL: shl_cttz_nuw_i32:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: neg a2, a1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: lui a2, 30667
+; RV32I-NEXT: addi a2, a2, 1329
+; RV32I-NEXT: mul a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 27
+; RV32I-NEXT: lui a2, %hi(.LCPI6_0)
+; RV32I-NEXT: addi a2, a2, %lo(.LCPI6_0)
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_nuw_i32:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a1, a1
+; RV32ZBB-NEXT: sll a0, a0, a1
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_nuw_i32:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: negw a2, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: lui a2, 30667
+; RV64I-NEXT: addi a2, a2, 1329
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 27
+; RV64I-NEXT: lui a2, %hi(.LCPI6_0)
+; RV64I-NEXT: addi a2, a2, %lo(.LCPI6_0)
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: lbu a1, 0(a1)
+; RV64I-NEXT: sllw a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_nuw_i32:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctzw a1, a1
+; RV64ZBB-NEXT: sllw a0, a0, a1
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+ %res = shl nuw i32 %x, %cttz
+ ret i32 %res
+}
+
+define i32 @shl_cttz_nsw_i32(i32 %x, i32 %y) {
+; RV32I-LABEL: shl_cttz_nsw_i32:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: neg a2, a1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: lui a2, 30667
+; RV32I-NEXT: addi a2, a2, 1329
+; RV32I-NEXT: mul a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 27
+; RV32I-NEXT: lui a2, %hi(.LCPI7_0)
+; RV32I-NEXT: addi a2, a2, %lo(.LCPI7_0)
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_nsw_i32:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: ctz a1, a1
+; RV32ZBB-NEXT: sll a0, a0, a1
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_nsw_i32:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: negw a2, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: lui a2, 30667
+; RV64I-NEXT: addi a2, a2, 1329
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 27
+; RV64I-NEXT: lui a2, %hi(.LCPI7_0)
+; RV64I-NEXT: addi a2, a2, %lo(.LCPI7_0)
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: lbu a1, 0(a1)
+; RV64I-NEXT: sllw a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_nsw_i32:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctzw a1, a1
+; RV64ZBB-NEXT: sllw a0, a0, a1
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+ %res = shl nsw i32 %x, %cttz
+ ret i32 %res
+}
+
+define i32 @shl_cttz_multiuse_i32(i32 %x, i32 %y) {
+; RV32I-LABEL: shl_cttz_multiuse_i32:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: neg a2, a1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: lui a2, 30667
+; RV32I-NEXT: addi a2, a2, 1329
+; RV32I-NEXT: mul a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 27
+; RV32I-NEXT: lui a2, %hi(.LCPI8_0)
+; RV32I-NEXT: addi a2, a2, %lo(.LCPI8_0)
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: lbu s0, 0(a1)
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: call use32
+; RV32I-NEXT: sll a0, s1, s0
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_multiuse_i32:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: addi sp, sp, -16
+; RV32ZBB-NEXT: .cfi_def_cfa_offset 16
+; RV32ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT: .cfi_offset ra, -4
+; RV32ZBB-NEXT: .cfi_offset s0, -8
+; RV32ZBB-NEXT: .cfi_offset s1, -12
+; RV32ZBB-NEXT: mv s0, a0
+; RV32ZBB-NEXT: ctz s1, a1
+; RV32ZBB-NEXT: mv a0, s1
+; RV32ZBB-NEXT: call use32
+; RV32ZBB-NEXT: sll a0, s0, s1
+; RV32ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT: addi sp, sp, 16
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_multiuse_i32:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi sp, sp, -32
+; RV64I-NEXT: .cfi_def_cfa_offset 32
+; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: negw a2, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: lui a2, 30667
+; RV64I-NEXT: addi a2, a2, 1329
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 27
+; RV64I-NEXT: lui a2, %hi(.LCPI8_0)
+; RV64I-NEXT: addi a2, a2, %lo(.LCPI8_0)
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: lbu s0, 0(a1)
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: mv a0, s0
+; RV64I-NEXT: call use32
+; RV64I-NEXT: sllw a0, s1, s0
+; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 32
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_multiuse_i32:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: addi sp, sp, -32
+; RV64ZBB-NEXT: .cfi_def_cfa_offset 32
+; RV64ZBB-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT: .cfi_offset ra, -8
+; RV64ZBB-NEXT: .cfi_offset s0, -16
+; RV64ZBB-NEXT: .cfi_offset s1, -24
+; RV64ZBB-NEXT: mv s0, a0
+; RV64ZBB-NEXT: ctzw s1, a1
+; RV64ZBB-NEXT: mv a0, s1
+; RV64ZBB-NEXT: call use32
+; RV64ZBB-NEXT: sllw a0, s0, s1
+; RV64ZBB-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT: addi sp, sp, 32
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+ call void @use32(i32 %cttz)
+ %res = shl i32 %x, %cttz
+ ret i32 %res
+}
+
+define i64 @shl_cttz_i64(i64 %x, i64 %y) {
+; RV32I-LABEL: shl_cttz_i64:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: lui a4, 30667
+; RV32I-NEXT: addi a5, a4, 1329
+; RV32I-NEXT: lui a4, %hi(.LCPI9_0)
+; RV32I-NEXT: addi a4, a4, %lo(.LCPI9_0)
+; RV32I-NEXT: bnez a2, .LBB9_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: neg a2, a3
+; RV32I-NEXT: and a2, a3, a2
+; RV32I-NEXT: mul a2, a2, a5
+; RV32I-NEXT: srli a2, a2, 27
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: lbu a2, 0(a2)
+; RV32I-NEXT: addi a4, a2, 32
+; RV32I-NEXT: j .LBB9_3
+; RV32I-NEXT: .LBB9_2:
+; RV32I-NEXT: neg a3, a2
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: mul a2, a2, a5
+; RV32I-NEXT: srli a2, a2, 27
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: lbu a4, 0(a2)
+; RV32I-NEXT: .LBB9_3: # %entry
+; RV32I-NEXT: addi a3, a4, -32
+; RV32I-NEXT: sll a2, a0, a4
+; RV32I-NEXT: bltz a3, .LBB9_5
+; RV32I-NEXT: # %bb.4: # %entry
+; RV32I-NEXT: mv a1, a2
+; RV32I-NEXT: j .LBB9_6
+; RV32I-NEXT: .LBB9_5:
+; RV32I-NEXT: sll a1, a1, a4
+; RV32I-NEXT: not a4, a4
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: srl a0, a0, a4
+; RV32I-NEXT: or a1, a1, a0
+; RV32I-NEXT: .LBB9_6: # %entry
+; RV32I-NEXT: srai a0, a3, 31
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_i64:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: bnez a2, .LBB9_2
+; RV32ZBB-NEXT: # %bb.1: # %entry
+; RV32ZBB-NEXT: ctz a2, a3
+; RV32ZBB-NEXT: addi a4, a2, 32
+; RV32ZBB-NEXT: j .LBB9_3
+; RV32ZBB-NEXT: .LBB9_2:
+; RV32ZBB-NEXT: ctz a4, a2
+; RV32ZBB-NEXT: .LBB9_3: # %entry
+; RV32ZBB-NEXT: addi a3, a4, -32
+; RV32ZBB-NEXT: sll a2, a0, a4
+; RV32ZBB-NEXT: bltz a3, .LBB9_5
+; RV32ZBB-NEXT: # %bb.4: # %entry
+; RV32ZBB-NEXT: mv a1, a2
+; RV32ZBB-NEXT: j .LBB9_6
+; RV32ZBB-NEXT: .LBB9_5:
+; RV32ZBB-NEXT: sll a1, a1, a4
+; RV32ZBB-NEXT: not a4, a4
+; RV32ZBB-NEXT: srli a0, a0, 1
+; RV32ZBB-NEXT: srl a0, a0, a4
+; RV32ZBB-NEXT: or a1, a1, a0
+; RV32ZBB-NEXT: .LBB9_6: # %entry
+; RV32ZBB-NEXT: srai a0, a3, 31
+; RV32ZBB-NEXT: and a0, a0, a2
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_i64:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: lui a2, %hi(.LCPI9_0)
+; RV64I-NEXT: ld a2, %lo(.LCPI9_0)(a2)
+; RV64I-NEXT: neg a3, a1
+; RV64I-NEXT: and a1, a1, a3
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: srli a1, a1, 58
+; RV64I-NEXT: lui a2, %hi(.LCPI9_1)
+; RV64I-NEXT: addi a2, a2, %lo(.LCPI9_1)
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: lbu a1, 0(a1)
+; RV64I-NEXT: sll a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_i64:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctz a1, a1
+; RV64ZBB-NEXT: sll a0, a0, a1
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+ %res = shl i64 %x, %cttz
+ ret i64 %res
+}
+
+define i64 @shl_cttz_constant_i64(i64 %y) {
+; RV32I-LABEL: shl_cttz_constant_i64:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: lui a2, 30667
+; RV32I-NEXT: addi a3, a2, 1329
+; RV32I-NEXT: lui a2, %hi(.LCPI10_0)
+; RV32I-NEXT: addi a2, a2, %lo(.LCPI10_0)
+; RV32I-NEXT: bnez a0, .LBB10_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: neg a0, a1
+; RV32I-NEXT: and a0, a1, a0
+; RV32I-NEXT: mul a0, a0, a3
+; RV32I-NEXT: srli a0, a0, 27
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: lbu a0, 0(a0)
+; RV32I-NEXT: addi a1, a0, 32
+; RV32I-NEXT: j .LBB10_3
+; RV32I-NEXT: .LBB10_2:
+; RV32I-NEXT: neg a1, a0
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: mul a0, a0, a3
+; RV32I-NEXT: srli a0, a0, 27
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: lbu a1, 0(a0)
+; RV32I-NEXT: .LBB10_3: # %entry
+; RV32I-NEXT: li a0, 4
+; RV32I-NEXT: addi a2, a1, -32
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: bltz a2, .LBB10_5
+; RV32I-NEXT: # %bb.4: # %entry
+; RV32I-NEXT: mv a1, a0
+; RV32I-NEXT: j .LBB10_6
+; RV32I-NEXT: .LBB10_5:
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: li a3, 2
+; RV32I-NEXT: srl a1, a3, a1
+; RV32I-NEXT: .LBB10_6: # %entry
+; RV32I-NEXT: srai a2, a2, 31
+; RV32I-NEXT: and a0, a2, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_constant_i64:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: bnez a0, .LBB10_2
+; RV32ZBB-NEXT: # %bb.1: # %entry
+; RV32ZBB-NEXT: ctz a0, a1
+; RV32ZBB-NEXT: addi a1, a0, 32
+; RV32ZBB-NEXT: j .LBB10_3
+; RV32ZBB-NEXT: .LBB10_2:
+; RV32ZBB-NEXT: ctz a1, a0
+; RV32ZBB-NEXT: .LBB10_3: # %entry
+; RV32ZBB-NEXT: li a0, 4
+; RV32ZBB-NEXT: addi a2, a1, -32
+; RV32ZBB-NEXT: sll a0, a0, a1
+; RV32ZBB-NEXT: bltz a2, .LBB10_5
+; RV32ZBB-NEXT: # %bb.4: # %entry
+; RV32ZBB-NEXT: mv a1, a0
+; RV32ZBB-NEXT: j .LBB10_6
+; RV32ZBB-NEXT: .LBB10_5:
+; RV32ZBB-NEXT: not a1, a1
+; RV32ZBB-NEXT: li a3, 2
+; RV32ZBB-NEXT: srl a1, a3, a1
+; RV32ZBB-NEXT: .LBB10_6: # %entry
+; RV32ZBB-NEXT: srai a2, a2, 31
+; RV32ZBB-NEXT: and a0, a2, a0
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_constant_i64:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: lui a1, %hi(.LCPI10_0)
+; RV64I-NEXT: ld a1, %lo(.LCPI10_0)(a1)
+; RV64I-NEXT: neg a2, a0
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: srli a0, a0, 58
+; RV64I-NEXT: lui a1, %hi(.LCPI10_1)
+; RV64I-NEXT: addi a1, a1, %lo(.LCPI10_1)
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lbu a0, 0(a0)
+; RV64I-NEXT: li a1, 4
+; RV64I-NEXT: sll a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_constant_i64:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctz a0, a0
+; RV64ZBB-NEXT: li a1, 4
+; RV64ZBB-NEXT: sll a0, a1, a0
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+ %res = shl i64 4, %cttz
+ ret i64 %res
+}
+
+define i64 @shl_cttz_nuw_i64(i64 %x, i64 %y) {
+; RV32I-LABEL: shl_cttz_nuw_i64:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: lui a4, 30667
+; RV32I-NEXT: addi a5, a4, 1329
+; RV32I-NEXT: lui a4, %hi(.LCPI11_0)
+; RV32I-NEXT: addi a4, a4, %lo(.LCPI11_0)
+; RV32I-NEXT: bnez a2, .LBB11_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: neg a2, a3
+; RV32I-NEXT: and a2, a3, a2
+; RV32I-NEXT: mul a2, a2, a5
+; RV32I-NEXT: srli a2, a2, 27
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: lbu a2, 0(a2)
+; RV32I-NEXT: addi a4, a2, 32
+; RV32I-NEXT: j .LBB11_3
+; RV32I-NEXT: .LBB11_2:
+; RV32I-NEXT: neg a3, a2
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: mul a2, a2, a5
+; RV32I-NEXT: srli a2, a2, 27
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: lbu a4, 0(a2)
+; RV32I-NEXT: .LBB11_3: # %entry
+; RV32I-NEXT: addi a3, a4, -32
+; RV32I-NEXT: sll a2, a0, a4
+; RV32I-NEXT: bltz a3, .LBB11_5
+; RV32I-NEXT: # %bb.4: # %entry
+; RV32I-NEXT: mv a1, a2
+; RV32I-NEXT: j .LBB11_6
+; RV32I-NEXT: .LBB11_5:
+; RV32I-NEXT: sll a1, a1, a4
+; RV32I-NEXT: not a4, a4
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: srl a0, a0, a4
+; RV32I-NEXT: or a1, a1, a0
+; RV32I-NEXT: .LBB11_6: # %entry
+; RV32I-NEXT: srai a0, a3, 31
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_nuw_i64:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: bnez a2, .LBB11_2
+; RV32ZBB-NEXT: # %bb.1: # %entry
+; RV32ZBB-NEXT: ctz a2, a3
+; RV32ZBB-NEXT: addi a4, a2, 32
+; RV32ZBB-NEXT: j .LBB11_3
+; RV32ZBB-NEXT: .LBB11_2:
+; RV32ZBB-NEXT: ctz a4, a2
+; RV32ZBB-NEXT: .LBB11_3: # %entry
+; RV32ZBB-NEXT: addi a3, a4, -32
+; RV32ZBB-NEXT: sll a2, a0, a4
+; RV32ZBB-NEXT: bltz a3, .LBB11_5
+; RV32ZBB-NEXT: # %bb.4: # %entry
+; RV32ZBB-NEXT: mv a1, a2
+; RV32ZBB-NEXT: j .LBB11_6
+; RV32ZBB-NEXT: .LBB11_5:
+; RV32ZBB-NEXT: sll a1, a1, a4
+; RV32ZBB-NEXT: not a4, a4
+; RV32ZBB-NEXT: srli a0, a0, 1
+; RV32ZBB-NEXT: srl a0, a0, a4
+; RV32ZBB-NEXT: or a1, a1, a0
+; RV32ZBB-NEXT: .LBB11_6: # %entry
+; RV32ZBB-NEXT: srai a0, a3, 31
+; RV32ZBB-NEXT: and a0, a0, a2
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_nuw_i64:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: lui a2, %hi(.LCPI11_0)
+; RV64I-NEXT: ld a2, %lo(.LCPI11_0)(a2)
+; RV64I-NEXT: neg a3, a1
+; RV64I-NEXT: and a1, a1, a3
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: srli a1, a1, 58
+; RV64I-NEXT: lui a2, %hi(.LCPI11_1)
+; RV64I-NEXT: addi a2, a2, %lo(.LCPI11_1)
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: lbu a1, 0(a1)
+; RV64I-NEXT: sll a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_nuw_i64:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctz a1, a1
+; RV64ZBB-NEXT: sll a0, a0, a1
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+ %res = shl nuw i64 %x, %cttz
+ ret i64 %res
+}
+
+define i64 @shl_cttz_nsw_i64(i64 %x, i64 %y) {
+; RV32I-LABEL: shl_cttz_nsw_i64:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: lui a4, 30667
+; RV32I-NEXT: addi a5, a4, 1329
+; RV32I-NEXT: lui a4, %hi(.LCPI12_0)
+; RV32I-NEXT: addi a4, a4, %lo(.LCPI12_0)
+; RV32I-NEXT: bnez a2, .LBB12_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: neg a2, a3
+; RV32I-NEXT: and a2, a3, a2
+; RV32I-NEXT: mul a2, a2, a5
+; RV32I-NEXT: srli a2, a2, 27
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: lbu a2, 0(a2)
+; RV32I-NEXT: addi a4, a2, 32
+; RV32I-NEXT: j .LBB12_3
+; RV32I-NEXT: .LBB12_2:
+; RV32I-NEXT: neg a3, a2
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: mul a2, a2, a5
+; RV32I-NEXT: srli a2, a2, 27
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: lbu a4, 0(a2)
+; RV32I-NEXT: .LBB12_3: # %entry
+; RV32I-NEXT: addi a3, a4, -32
+; RV32I-NEXT: sll a2, a0, a4
+; RV32I-NEXT: bltz a3, .LBB12_5
+; RV32I-NEXT: # %bb.4: # %entry
+; RV32I-NEXT: mv a1, a2
+; RV32I-NEXT: j .LBB12_6
+; RV32I-NEXT: .LBB12_5:
+; RV32I-NEXT: sll a1, a1, a4
+; RV32I-NEXT: not a4, a4
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: srl a0, a0, a4
+; RV32I-NEXT: or a1, a1, a0
+; RV32I-NEXT: .LBB12_6: # %entry
+; RV32I-NEXT: srai a0, a3, 31
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_nsw_i64:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: bnez a2, .LBB12_2
+; RV32ZBB-NEXT: # %bb.1: # %entry
+; RV32ZBB-NEXT: ctz a2, a3
+; RV32ZBB-NEXT: addi a4, a2, 32
+; RV32ZBB-NEXT: j .LBB12_3
+; RV32ZBB-NEXT: .LBB12_2:
+; RV32ZBB-NEXT: ctz a4, a2
+; RV32ZBB-NEXT: .LBB12_3: # %entry
+; RV32ZBB-NEXT: addi a3, a4, -32
+; RV32ZBB-NEXT: sll a2, a0, a4
+; RV32ZBB-NEXT: bltz a3, .LBB12_5
+; RV32ZBB-NEXT: # %bb.4: # %entry
+; RV32ZBB-NEXT: mv a1, a2
+; RV32ZBB-NEXT: j .LBB12_6
+; RV32ZBB-NEXT: .LBB12_5:
+; RV32ZBB-NEXT: sll a1, a1, a4
+; RV32ZBB-NEXT: not a4, a4
+; RV32ZBB-NEXT: srli a0, a0, 1
+; RV32ZBB-NEXT: srl a0, a0, a4
+; RV32ZBB-NEXT: or a1, a1, a0
+; RV32ZBB-NEXT: .LBB12_6: # %entry
+; RV32ZBB-NEXT: srai a0, a3, 31
+; RV32ZBB-NEXT: and a0, a0, a2
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_nsw_i64:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: lui a2, %hi(.LCPI12_0)
+; RV64I-NEXT: ld a2, %lo(.LCPI12_0)(a2)
+; RV64I-NEXT: neg a3, a1
+; RV64I-NEXT: and a1, a1, a3
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: srli a1, a1, 58
+; RV64I-NEXT: lui a2, %hi(.LCPI12_1)
+; RV64I-NEXT: addi a2, a2, %lo(.LCPI12_1)
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: lbu a1, 0(a1)
+; RV64I-NEXT: sll a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_nsw_i64:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: ctz a1, a1
+; RV64ZBB-NEXT: sll a0, a0, a1
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+ %res = shl nsw i64 %x, %cttz
+ ret i64 %res
+}
+
+define i64 @shl_cttz_multiuse_i64(i64 %x, i64 %y) {
+; RV32I-LABEL: shl_cttz_multiuse_i64:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: mv s1, a1
+; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: lui a0, 30667
+; RV32I-NEXT: addi a1, a0, 1329
+; RV32I-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32I-NEXT: addi a0, a0, %lo(.LCPI13_0)
+; RV32I-NEXT: bnez a2, .LBB13_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: neg a2, a3
+; RV32I-NEXT: and a2, a3, a2
+; RV32I-NEXT: mul a1, a2, a1
+; RV32I-NEXT: srli a1, a1, 27
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lbu a0, 0(a0)
+; RV32I-NEXT: addi s2, a0, 32
+; RV32I-NEXT: j .LBB13_3
+; RV32I-NEXT: .LBB13_2:
+; RV32I-NEXT: neg a3, a2
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: mul a1, a2, a1
+; RV32I-NEXT: srli a1, a1, 27
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lbu s2, 0(a0)
+; RV32I-NEXT: .LBB13_3: # %entry
+; RV32I-NEXT: mv a0, s2
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: call use64
+; RV32I-NEXT: addi a2, s2, -32
+; RV32I-NEXT: sll a0, s0, s2
+; RV32I-NEXT: bltz a2, .LBB13_5
+; RV32I-NEXT: # %bb.4: # %entry
+; RV32I-NEXT: mv a1, a0
+; RV32I-NEXT: j .LBB13_6
+; RV32I-NEXT: .LBB13_5:
+; RV32I-NEXT: sll a1, s1, s2
+; RV32I-NEXT: not a3, s2
+; RV32I-NEXT: srli s0, s0, 1
+; RV32I-NEXT: srl a3, s0, a3
+; RV32I-NEXT: or a1, a1, a3
+; RV32I-NEXT: .LBB13_6: # %entry
+; RV32I-NEXT: srai a2, a2, 31
+; RV32I-NEXT: and a0, a2, a0
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32ZBB-LABEL: shl_cttz_multiuse_i64:
+; RV32ZBB: # %bb.0: # %entry
+; RV32ZBB-NEXT: addi sp, sp, -16
+; RV32ZBB-NEXT: .cfi_def_cfa_offset 16
+; RV32ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT: .cfi_offset ra, -4
+; RV32ZBB-NEXT: .cfi_offset s0, -8
+; RV32ZBB-NEXT: .cfi_offset s1, -12
+; RV32ZBB-NEXT: .cfi_offset s2, -16
+; RV32ZBB-NEXT: mv s1, a1
+; RV32ZBB-NEXT: mv s0, a0
+; RV32ZBB-NEXT: bnez a2, .LBB13_2
+; RV32ZBB-NEXT: # %bb.1: # %entry
+; RV32ZBB-NEXT: ctz a0, a3
+; RV32ZBB-NEXT: addi s2, a0, 32
+; RV32ZBB-NEXT: j .LBB13_3
+; RV32ZBB-NEXT: .LBB13_2:
+; RV32ZBB-NEXT: ctz s2, a2
+; RV32ZBB-NEXT: .LBB13_3: # %entry
+; RV32ZBB-NEXT: mv a0, s2
+; RV32ZBB-NEXT: li a1, 0
+; RV32ZBB-NEXT: call use64
+; RV32ZBB-NEXT: addi a2, s2, -32
+; RV32ZBB-NEXT: sll a0, s0, s2
+; RV32ZBB-NEXT: bltz a2, .LBB13_5
+; RV32ZBB-NEXT: # %bb.4: # %entry
+; RV32ZBB-NEXT: mv a1, a0
+; RV32ZBB-NEXT: j .LBB13_6
+; RV32ZBB-NEXT: .LBB13_5:
+; RV32ZBB-NEXT: sll a1, s1, s2
+; RV32ZBB-NEXT: not a3, s2
+; RV32ZBB-NEXT: srli s0, s0, 1
+; RV32ZBB-NEXT: srl a3, s0, a3
+; RV32ZBB-NEXT: or a1, a1, a3
+; RV32ZBB-NEXT: .LBB13_6: # %entry
+; RV32ZBB-NEXT: srai a2, a2, 31
+; RV32ZBB-NEXT: and a0, a2, a0
+; RV32ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT: addi sp, sp, 16
+; RV32ZBB-NEXT: ret
+;
+; RV64I-LABEL: shl_cttz_multiuse_i64:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi sp, sp, -32
+; RV64I-NEXT: .cfi_def_cfa_offset 32
+; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: lui a2, %hi(.LCPI13_0)
+; RV64I-NEXT: ld a2, %lo(.LCPI13_0)(a2)
+; RV64I-NEXT: neg a3, a1
+; RV64I-NEXT: and a1, a1, a3
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: srli a1, a1, 58
+; RV64I-NEXT: lui a2, %hi(.LCPI13_1)
+; RV64I-NEXT: addi a2, a2, %lo(.LCPI13_1)
+; RV64I-NEXT: add a1, a2, a1
+; RV64I-NEXT: lbu s0, 0(a1)
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: mv a0, s0
+; RV64I-NEXT: call use64
+; RV64I-NEXT: sll a0, s1, s0
+; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 32
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: shl_cttz_multiuse_i64:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: addi sp, sp, -32
+; RV64ZBB-NEXT: .cfi_def_cfa_offset 32
+; RV64ZBB-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT: .cfi_offset ra, -8
+; RV64ZBB-NEXT: .cfi_offset s0, -16
+; RV64ZBB-NEXT: .cfi_offset s1, -24
+; RV64ZBB-NEXT: mv s0, a0
+; RV64ZBB-NEXT: ctz s1, a1
+; RV64ZBB-NEXT: mv a0, s1
+; RV64ZBB-NEXT: call use64
+; RV64ZBB-NEXT: sll a0, s0, s1
+; RV64ZBB-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT: addi sp, sp, 32
+; RV64ZBB-NEXT: ret
+entry:
+ %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+ call void @use64(i64 %cttz)
+ %res = shl i64 %x, %cttz
+ ret i64 %res
+}
+
+declare void @use32(i32 signext)
+declare void @use64(i64)
>From 43e36d8f990eb7835ba9aad9afedb437ea1c6e97 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Thu, 21 Mar 2024 22:33:38 +0800
Subject: [PATCH 2/2] [CodeGenPrepare] Transform shl X, cttz(Y) to mul (Y &
-Y), X if cttz is unsupported
---
llvm/lib/CodeGen/CodeGenPrepare.cpp | 24 ++-
llvm/test/CodeGen/RISCV/shl-cttz.ll | 240 ++++++++++++----------------
2 files changed, 124 insertions(+), 140 deletions(-)
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 9f99bb7e693f7e..f67c48243646da 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -8433,7 +8433,29 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) {
return true;
switch (I->getOpcode()) {
- case Instruction::Shl:
+ case Instruction::Shl: {
+ // shl X, cttz(Y) -> mul (Y & -Y), X if cttz is unsupported on the target.
+ Value *Y;
+ if (match(I->getOperand(1),
+ m_OneUse(m_Intrinsic<Intrinsic::cttz>(m_Value(Y))))) {
+ EVT VT = TLI->getValueType(*DL, Y->getType());
+ if (!TLI->isOperationLegalOrCustom(ISD::CTTZ, VT) &&
+ TLI->isOperationLegalOrCustom(ISD::MUL, VT)) {
+ IRBuilder<> Builder(I);
+ Value *NegY = Builder.CreateNeg(Y);
+ Value *Power2 = Builder.CreateAnd(Y, NegY);
+ Value *New = Builder.CreateMul(Power2, I->getOperand(0), "",
+ /*HasNUW=*/I->hasNoUnsignedWrap(),
+ /*HasNSW=*/false);
+ replaceAllUsesWith(I, New, FreshBBs, IsHugeFunc);
+ RecursivelyDeleteTriviallyDeadInstructions(
+ I, TLInfo, nullptr,
+ [&](Value *V) { removeAllAssertingVHReferences(V); });
+ return true;
+ }
+ }
+ }
+ [[fallthrough]];
case Instruction::LShr:
case Instruction::AShr:
return optimizeShiftInst(cast<BinaryOperator>(I));
diff --git a/llvm/test/CodeGen/RISCV/shl-cttz.ll b/llvm/test/CodeGen/RISCV/shl-cttz.ll
index 56aa4ee2d96899..1c416ed381fb5a 100644
--- a/llvm/test/CodeGen/RISCV/shl-cttz.ll
+++ b/llvm/test/CodeGen/RISCV/shl-cttz.ll
@@ -383,15 +383,7 @@ define i32 @shl_cttz_i32(i32 %x, i32 %y) {
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: neg a2, a1
; RV32I-NEXT: and a1, a1, a2
-; RV32I-NEXT: lui a2, 30667
-; RV32I-NEXT: addi a2, a2, 1329
-; RV32I-NEXT: mul a1, a1, a2
-; RV32I-NEXT: srli a1, a1, 27
-; RV32I-NEXT: lui a2, %hi(.LCPI4_0)
-; RV32I-NEXT: addi a2, a2, %lo(.LCPI4_0)
-; RV32I-NEXT: add a1, a2, a1
-; RV32I-NEXT: lbu a1, 0(a1)
-; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: mul a0, a1, a0
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: shl_cttz_i32:
@@ -400,26 +392,33 @@ define i32 @shl_cttz_i32(i32 %x, i32 %y) {
; RV32ZBB-NEXT: sll a0, a0, a1
; RV32ZBB-NEXT: ret
;
-; RV64I-LABEL: shl_cttz_i32:
-; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: negw a2, a1
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: lui a2, 30667
-; RV64I-NEXT: addi a2, a2, 1329
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: srliw a1, a1, 27
-; RV64I-NEXT: lui a2, %hi(.LCPI4_0)
-; RV64I-NEXT: addi a2, a2, %lo(.LCPI4_0)
-; RV64I-NEXT: add a1, a2, a1
-; RV64I-NEXT: lbu a1, 0(a1)
-; RV64I-NEXT: sllw a0, a0, a1
-; RV64I-NEXT: ret
+; RV64IILLEGALI32-LABEL: shl_cttz_i32:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: negw a2, a1
+; RV64IILLEGALI32-NEXT: and a1, a1, a2
+; RV64IILLEGALI32-NEXT: lui a2, 30667
+; RV64IILLEGALI32-NEXT: addi a2, a2, 1329
+; RV64IILLEGALI32-NEXT: mul a1, a1, a2
+; RV64IILLEGALI32-NEXT: srliw a1, a1, 27
+; RV64IILLEGALI32-NEXT: lui a2, %hi(.LCPI4_0)
+; RV64IILLEGALI32-NEXT: addi a2, a2, %lo(.LCPI4_0)
+; RV64IILLEGALI32-NEXT: add a1, a2, a1
+; RV64IILLEGALI32-NEXT: lbu a1, 0(a1)
+; RV64IILLEGALI32-NEXT: sllw a0, a0, a1
+; RV64IILLEGALI32-NEXT: ret
;
; RV64ZBB-LABEL: shl_cttz_i32:
; RV64ZBB: # %bb.0: # %entry
; RV64ZBB-NEXT: ctzw a1, a1
; RV64ZBB-NEXT: sllw a0, a0, a1
; RV64ZBB-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_i32:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: negw a2, a1
+; RV64ILEGALI32-NEXT: and a1, a1, a2
+; RV64ILEGALI32-NEXT: mulw a0, a1, a0
+; RV64ILEGALI32-NEXT: ret
entry:
%cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
%res = shl i32 %x, %cttz
@@ -431,16 +430,7 @@ define i32 @shl_cttz_constant_i32(i32 %y) {
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 30667
-; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: mul a0, a0, a1
-; RV32I-NEXT: srli a0, a0, 27
-; RV32I-NEXT: lui a1, %hi(.LCPI5_0)
-; RV32I-NEXT: addi a1, a1, %lo(.LCPI5_0)
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: lbu a0, 0(a0)
-; RV32I-NEXT: li a1, 4
-; RV32I-NEXT: sll a0, a1, a0
+; RV32I-NEXT: slli a0, a0, 2
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: shl_cttz_constant_i32:
@@ -450,21 +440,21 @@ define i32 @shl_cttz_constant_i32(i32 %y) {
; RV32ZBB-NEXT: sll a0, a1, a0
; RV32ZBB-NEXT: ret
;
-; RV64I-LABEL: shl_cttz_constant_i32:
-; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: negw a1, a0
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 30667
-; RV64I-NEXT: addi a1, a1, 1329
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: srliw a0, a0, 27
-; RV64I-NEXT: lui a1, %hi(.LCPI5_0)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI5_0)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: li a1, 4
-; RV64I-NEXT: sllw a0, a1, a0
-; RV64I-NEXT: ret
+; RV64IILLEGALI32-LABEL: shl_cttz_constant_i32:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: negw a1, a0
+; RV64IILLEGALI32-NEXT: and a0, a0, a1
+; RV64IILLEGALI32-NEXT: lui a1, 30667
+; RV64IILLEGALI32-NEXT: addi a1, a1, 1329
+; RV64IILLEGALI32-NEXT: mul a0, a0, a1
+; RV64IILLEGALI32-NEXT: srliw a0, a0, 27
+; RV64IILLEGALI32-NEXT: lui a1, %hi(.LCPI5_0)
+; RV64IILLEGALI32-NEXT: addi a1, a1, %lo(.LCPI5_0)
+; RV64IILLEGALI32-NEXT: add a0, a1, a0
+; RV64IILLEGALI32-NEXT: lbu a0, 0(a0)
+; RV64IILLEGALI32-NEXT: li a1, 4
+; RV64IILLEGALI32-NEXT: sllw a0, a1, a0
+; RV64IILLEGALI32-NEXT: ret
;
; RV64ZBB-LABEL: shl_cttz_constant_i32:
; RV64ZBB: # %bb.0: # %entry
@@ -472,6 +462,13 @@ define i32 @shl_cttz_constant_i32(i32 %y) {
; RV64ZBB-NEXT: li a1, 4
; RV64ZBB-NEXT: sllw a0, a1, a0
; RV64ZBB-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_constant_i32:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: negw a1, a0
+; RV64ILEGALI32-NEXT: and a0, a0, a1
+; RV64ILEGALI32-NEXT: slliw a0, a0, 2
+; RV64ILEGALI32-NEXT: ret
entry:
%cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
%res = shl i32 4, %cttz
@@ -483,15 +480,7 @@ define i32 @shl_cttz_nuw_i32(i32 %x, i32 %y) {
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: neg a2, a1
; RV32I-NEXT: and a1, a1, a2
-; RV32I-NEXT: lui a2, 30667
-; RV32I-NEXT: addi a2, a2, 1329
-; RV32I-NEXT: mul a1, a1, a2
-; RV32I-NEXT: srli a1, a1, 27
-; RV32I-NEXT: lui a2, %hi(.LCPI6_0)
-; RV32I-NEXT: addi a2, a2, %lo(.LCPI6_0)
-; RV32I-NEXT: add a1, a2, a1
-; RV32I-NEXT: lbu a1, 0(a1)
-; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: mul a0, a1, a0
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: shl_cttz_nuw_i32:
@@ -500,26 +489,33 @@ define i32 @shl_cttz_nuw_i32(i32 %x, i32 %y) {
; RV32ZBB-NEXT: sll a0, a0, a1
; RV32ZBB-NEXT: ret
;
-; RV64I-LABEL: shl_cttz_nuw_i32:
-; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: negw a2, a1
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: lui a2, 30667
-; RV64I-NEXT: addi a2, a2, 1329
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: srliw a1, a1, 27
-; RV64I-NEXT: lui a2, %hi(.LCPI6_0)
-; RV64I-NEXT: addi a2, a2, %lo(.LCPI6_0)
-; RV64I-NEXT: add a1, a2, a1
-; RV64I-NEXT: lbu a1, 0(a1)
-; RV64I-NEXT: sllw a0, a0, a1
-; RV64I-NEXT: ret
+; RV64IILLEGALI32-LABEL: shl_cttz_nuw_i32:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: negw a2, a1
+; RV64IILLEGALI32-NEXT: and a1, a1, a2
+; RV64IILLEGALI32-NEXT: lui a2, 30667
+; RV64IILLEGALI32-NEXT: addi a2, a2, 1329
+; RV64IILLEGALI32-NEXT: mul a1, a1, a2
+; RV64IILLEGALI32-NEXT: srliw a1, a1, 27
+; RV64IILLEGALI32-NEXT: lui a2, %hi(.LCPI6_0)
+; RV64IILLEGALI32-NEXT: addi a2, a2, %lo(.LCPI6_0)
+; RV64IILLEGALI32-NEXT: add a1, a2, a1
+; RV64IILLEGALI32-NEXT: lbu a1, 0(a1)
+; RV64IILLEGALI32-NEXT: sllw a0, a0, a1
+; RV64IILLEGALI32-NEXT: ret
;
; RV64ZBB-LABEL: shl_cttz_nuw_i32:
; RV64ZBB: # %bb.0: # %entry
; RV64ZBB-NEXT: ctzw a1, a1
; RV64ZBB-NEXT: sllw a0, a0, a1
; RV64ZBB-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_nuw_i32:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: negw a2, a1
+; RV64ILEGALI32-NEXT: and a1, a1, a2
+; RV64ILEGALI32-NEXT: mulw a0, a1, a0
+; RV64ILEGALI32-NEXT: ret
entry:
%cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
%res = shl nuw i32 %x, %cttz
@@ -531,15 +527,7 @@ define i32 @shl_cttz_nsw_i32(i32 %x, i32 %y) {
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: neg a2, a1
; RV32I-NEXT: and a1, a1, a2
-; RV32I-NEXT: lui a2, 30667
-; RV32I-NEXT: addi a2, a2, 1329
-; RV32I-NEXT: mul a1, a1, a2
-; RV32I-NEXT: srli a1, a1, 27
-; RV32I-NEXT: lui a2, %hi(.LCPI7_0)
-; RV32I-NEXT: addi a2, a2, %lo(.LCPI7_0)
-; RV32I-NEXT: add a1, a2, a1
-; RV32I-NEXT: lbu a1, 0(a1)
-; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: mul a0, a1, a0
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: shl_cttz_nsw_i32:
@@ -548,26 +536,33 @@ define i32 @shl_cttz_nsw_i32(i32 %x, i32 %y) {
; RV32ZBB-NEXT: sll a0, a0, a1
; RV32ZBB-NEXT: ret
;
-; RV64I-LABEL: shl_cttz_nsw_i32:
-; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: negw a2, a1
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: lui a2, 30667
-; RV64I-NEXT: addi a2, a2, 1329
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: srliw a1, a1, 27
-; RV64I-NEXT: lui a2, %hi(.LCPI7_0)
-; RV64I-NEXT: addi a2, a2, %lo(.LCPI7_0)
-; RV64I-NEXT: add a1, a2, a1
-; RV64I-NEXT: lbu a1, 0(a1)
-; RV64I-NEXT: sllw a0, a0, a1
-; RV64I-NEXT: ret
+; RV64IILLEGALI32-LABEL: shl_cttz_nsw_i32:
+; RV64IILLEGALI32: # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT: negw a2, a1
+; RV64IILLEGALI32-NEXT: and a1, a1, a2
+; RV64IILLEGALI32-NEXT: lui a2, 30667
+; RV64IILLEGALI32-NEXT: addi a2, a2, 1329
+; RV64IILLEGALI32-NEXT: mul a1, a1, a2
+; RV64IILLEGALI32-NEXT: srliw a1, a1, 27
+; RV64IILLEGALI32-NEXT: lui a2, %hi(.LCPI7_0)
+; RV64IILLEGALI32-NEXT: addi a2, a2, %lo(.LCPI7_0)
+; RV64IILLEGALI32-NEXT: add a1, a2, a1
+; RV64IILLEGALI32-NEXT: lbu a1, 0(a1)
+; RV64IILLEGALI32-NEXT: sllw a0, a0, a1
+; RV64IILLEGALI32-NEXT: ret
;
; RV64ZBB-LABEL: shl_cttz_nsw_i32:
; RV64ZBB: # %bb.0: # %entry
; RV64ZBB-NEXT: ctzw a1, a1
; RV64ZBB-NEXT: sllw a0, a0, a1
; RV64ZBB-NEXT: ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_nsw_i32:
+; RV64ILEGALI32: # %bb.0: # %entry
+; RV64ILEGALI32-NEXT: negw a2, a1
+; RV64ILEGALI32-NEXT: and a1, a1, a2
+; RV64ILEGALI32-NEXT: mulw a0, a1, a0
+; RV64ILEGALI32-NEXT: ret
entry:
%cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
%res = shl nsw i32 %x, %cttz
@@ -754,17 +749,9 @@ define i64 @shl_cttz_i64(i64 %x, i64 %y) {
;
; RV64I-LABEL: shl_cttz_i64:
; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: lui a2, %hi(.LCPI9_0)
-; RV64I-NEXT: ld a2, %lo(.LCPI9_0)(a2)
-; RV64I-NEXT: neg a3, a1
-; RV64I-NEXT: and a1, a1, a3
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: srli a1, a1, 58
-; RV64I-NEXT: lui a2, %hi(.LCPI9_1)
-; RV64I-NEXT: addi a2, a2, %lo(.LCPI9_1)
-; RV64I-NEXT: add a1, a2, a1
-; RV64I-NEXT: lbu a1, 0(a1)
-; RV64I-NEXT: sll a0, a0, a1
+; RV64I-NEXT: neg a2, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: mul a0, a1, a0
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: shl_cttz_i64:
@@ -847,18 +834,9 @@ define i64 @shl_cttz_constant_i64(i64 %y) {
;
; RV64I-LABEL: shl_cttz_constant_i64:
; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: lui a1, %hi(.LCPI10_0)
-; RV64I-NEXT: ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT: neg a2, a0
-; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: mul a0, a0, a1
-; RV64I-NEXT: srli a0, a0, 58
-; RV64I-NEXT: lui a1, %hi(.LCPI10_1)
-; RV64I-NEXT: addi a1, a1, %lo(.LCPI10_1)
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: lbu a0, 0(a0)
-; RV64I-NEXT: li a1, 4
-; RV64I-NEXT: sll a0, a1, a0
+; RV64I-NEXT: neg a1, a0
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: slli a0, a0, 2
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: shl_cttz_constant_i64:
@@ -944,17 +922,9 @@ define i64 @shl_cttz_nuw_i64(i64 %x, i64 %y) {
;
; RV64I-LABEL: shl_cttz_nuw_i64:
; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: lui a2, %hi(.LCPI11_0)
-; RV64I-NEXT: ld a2, %lo(.LCPI11_0)(a2)
-; RV64I-NEXT: neg a3, a1
-; RV64I-NEXT: and a1, a1, a3
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: srli a1, a1, 58
-; RV64I-NEXT: lui a2, %hi(.LCPI11_1)
-; RV64I-NEXT: addi a2, a2, %lo(.LCPI11_1)
-; RV64I-NEXT: add a1, a2, a1
-; RV64I-NEXT: lbu a1, 0(a1)
-; RV64I-NEXT: sll a0, a0, a1
+; RV64I-NEXT: neg a2, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: mul a0, a1, a0
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: shl_cttz_nuw_i64:
@@ -1039,17 +1009,9 @@ define i64 @shl_cttz_nsw_i64(i64 %x, i64 %y) {
;
; RV64I-LABEL: shl_cttz_nsw_i64:
; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: lui a2, %hi(.LCPI12_0)
-; RV64I-NEXT: ld a2, %lo(.LCPI12_0)(a2)
-; RV64I-NEXT: neg a3, a1
-; RV64I-NEXT: and a1, a1, a3
-; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: srli a1, a1, 58
-; RV64I-NEXT: lui a2, %hi(.LCPI12_1)
-; RV64I-NEXT: addi a2, a2, %lo(.LCPI12_1)
-; RV64I-NEXT: add a1, a2, a1
-; RV64I-NEXT: lbu a1, 0(a1)
-; RV64I-NEXT: sll a0, a0, a1
+; RV64I-NEXT: neg a2, a1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: mul a0, a1, a0
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: shl_cttz_nsw_i64:
More information about the llvm-commits
mailing list