[llvm] [CodeGenPrepare] Transform `shl X, cttz(Y)` to `mul (Y & -Y), X` if cttz is unsupported (PR #85066)

Yingwei Zheng via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 13 05:11:52 PDT 2024


https://github.com/dtcxzyw created https://github.com/llvm/llvm-project/pull/85066

This patch fold `shl X, cttz(Y)` to `mul (Y & -Y), X` if cttz is unsupported by the target.
Alive2: https://alive2.llvm.org/ce/z/AtLN5Y
Fixes https://github.com/llvm/llvm-project/issues/84763.


>From 8555432833b9ecc1c16919bf68981de84f33adef Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Wed, 13 Mar 2024 19:55:22 +0800
Subject: [PATCH 1/2] [CodeGenPrepare] Add pre-commit tests for PR84763. NFC.

---
 llvm/test/CodeGen/RISCV/shl-cttz.ll | 872 ++++++++++++++++++++++++++++
 1 file changed, 872 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/shl-cttz.ll

diff --git a/llvm/test/CodeGen/RISCV/shl-cttz.ll b/llvm/test/CodeGen/RISCV/shl-cttz.ll
new file mode 100644
index 00000000000000..b71b5e505f43a1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/shl-cttz.ll
@@ -0,0 +1,872 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zbb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZBB
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64I,RV64IILLEGALI32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64ZBB,RV64ZBBILLEGALI32
+; RUN: llc -mtriple=riscv64 -mattr=+m -riscv-experimental-rv64-legal-i32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64I,RV64ILEGALI32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -riscv-experimental-rv64-legal-i32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64ZBB,RV64ZBBLEGALI32
+
+define i32 @shl_cttz_i32(i32 %x, i32 %y) {
+; RV32I-LABEL: shl_cttz_i32:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    neg a2, a1
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    lui a2, 30667
+; RV32I-NEXT:    addi a2, a2, 1329
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    srli a1, a1, 27
+; RV32I-NEXT:    lui a2, %hi(.LCPI0_0)
+; RV32I-NEXT:    addi a2, a2, %lo(.LCPI0_0)
+; RV32I-NEXT:    add a1, a2, a1
+; RV32I-NEXT:    lbu a1, 0(a1)
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: shl_cttz_i32:
+; RV32ZBB:       # %bb.0: # %entry
+; RV32ZBB-NEXT:    ctz a1, a1
+; RV32ZBB-NEXT:    sll a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64I-LABEL: shl_cttz_i32:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    negw a2, a1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    lui a2, 30667
+; RV64I-NEXT:    addi a2, a2, 1329
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI0_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI0_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: shl_cttz_i32:
+; RV64ZBB:       # %bb.0: # %entry
+; RV64ZBB-NEXT:    ctzw a1, a1
+; RV64ZBB-NEXT:    sllw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+entry:
+  %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+  %res = shl i32 %x, %cttz
+  ret i32 %res
+}
+
+define i32 @shl_cttz_constant_i32(i32 %y) {
+; RV32I-LABEL: shl_cttz_constant_i32:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    neg a1, a0
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 30667
+; RV32I-NEXT:    addi a1, a1, 1329
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    srli a0, a0, 27
+; RV32I-NEXT:    lui a1, %hi(.LCPI1_0)
+; RV32I-NEXT:    addi a1, a1, %lo(.LCPI1_0)
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    li a1, 4
+; RV32I-NEXT:    sll a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: shl_cttz_constant_i32:
+; RV32ZBB:       # %bb.0: # %entry
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    li a1, 4
+; RV32ZBB-NEXT:    sll a0, a1, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64I-LABEL: shl_cttz_constant_i32:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    negw a1, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lui a1, 30667
+; RV64I-NEXT:    addi a1, a1, 1329
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    srliw a0, a0, 27
+; RV64I-NEXT:    lui a1, %hi(.LCPI1_0)
+; RV64I-NEXT:    addi a1, a1, %lo(.LCPI1_0)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    li a1, 4
+; RV64I-NEXT:    sllw a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: shl_cttz_constant_i32:
+; RV64ZBB:       # %bb.0: # %entry
+; RV64ZBB-NEXT:    ctzw a0, a0
+; RV64ZBB-NEXT:    li a1, 4
+; RV64ZBB-NEXT:    sllw a0, a1, a0
+; RV64ZBB-NEXT:    ret
+entry:
+  %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+  %res = shl i32 4, %cttz
+  ret i32 %res
+}
+
+define i32 @shl_cttz_nuw_i32(i32 %x, i32 %y) {
+; RV32I-LABEL: shl_cttz_nuw_i32:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    neg a2, a1
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    lui a2, 30667
+; RV32I-NEXT:    addi a2, a2, 1329
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    srli a1, a1, 27
+; RV32I-NEXT:    lui a2, %hi(.LCPI2_0)
+; RV32I-NEXT:    addi a2, a2, %lo(.LCPI2_0)
+; RV32I-NEXT:    add a1, a2, a1
+; RV32I-NEXT:    lbu a1, 0(a1)
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: shl_cttz_nuw_i32:
+; RV32ZBB:       # %bb.0: # %entry
+; RV32ZBB-NEXT:    ctz a1, a1
+; RV32ZBB-NEXT:    sll a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64I-LABEL: shl_cttz_nuw_i32:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    negw a2, a1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    lui a2, 30667
+; RV64I-NEXT:    addi a2, a2, 1329
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI2_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI2_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: shl_cttz_nuw_i32:
+; RV64ZBB:       # %bb.0: # %entry
+; RV64ZBB-NEXT:    ctzw a1, a1
+; RV64ZBB-NEXT:    sllw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+entry:
+  %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+  %res = shl nuw i32 %x, %cttz
+  ret i32 %res
+}
+
+define i32 @shl_cttz_nsw_i32(i32 %x, i32 %y) {
+; RV32I-LABEL: shl_cttz_nsw_i32:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    neg a2, a1
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    lui a2, 30667
+; RV32I-NEXT:    addi a2, a2, 1329
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    srli a1, a1, 27
+; RV32I-NEXT:    lui a2, %hi(.LCPI3_0)
+; RV32I-NEXT:    addi a2, a2, %lo(.LCPI3_0)
+; RV32I-NEXT:    add a1, a2, a1
+; RV32I-NEXT:    lbu a1, 0(a1)
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: shl_cttz_nsw_i32:
+; RV32ZBB:       # %bb.0: # %entry
+; RV32ZBB-NEXT:    ctz a1, a1
+; RV32ZBB-NEXT:    sll a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64I-LABEL: shl_cttz_nsw_i32:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    negw a2, a1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    lui a2, 30667
+; RV64I-NEXT:    addi a2, a2, 1329
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI3_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI3_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: shl_cttz_nsw_i32:
+; RV64ZBB:       # %bb.0: # %entry
+; RV64ZBB-NEXT:    ctzw a1, a1
+; RV64ZBB-NEXT:    sllw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+entry:
+  %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+  %res = shl nsw i32 %x, %cttz
+  ret i32 %res
+}
+
+define i32 @shl_cttz_multiuse_i32(i32 %x, i32 %y) {
+; RV32I-LABEL: shl_cttz_multiuse_i32:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    .cfi_def_cfa_offset 16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    .cfi_offset s0, -8
+; RV32I-NEXT:    .cfi_offset s1, -12
+; RV32I-NEXT:    neg a2, a1
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    lui a2, 30667
+; RV32I-NEXT:    addi a2, a2, 1329
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    srli a1, a1, 27
+; RV32I-NEXT:    lui a2, %hi(.LCPI4_0)
+; RV32I-NEXT:    addi a2, a2, %lo(.LCPI4_0)
+; RV32I-NEXT:    add a1, a2, a1
+; RV32I-NEXT:    lbu s0, 0(a1)
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call use32
+; RV32I-NEXT:    sll a0, s1, s0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: shl_cttz_multiuse_i32:
+; RV32ZBB:       # %bb.0: # %entry
+; RV32ZBB-NEXT:    addi sp, sp, -16
+; RV32ZBB-NEXT:    .cfi_def_cfa_offset 16
+; RV32ZBB-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    .cfi_offset ra, -4
+; RV32ZBB-NEXT:    .cfi_offset s0, -8
+; RV32ZBB-NEXT:    .cfi_offset s1, -12
+; RV32ZBB-NEXT:    mv s0, a0
+; RV32ZBB-NEXT:    ctz s1, a1
+; RV32ZBB-NEXT:    mv a0, s1
+; RV32ZBB-NEXT:    call use32
+; RV32ZBB-NEXT:    sll a0, s0, s1
+; RV32ZBB-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    addi sp, sp, 16
+; RV32ZBB-NEXT:    ret
+;
+; RV64I-LABEL: shl_cttz_multiuse_i32:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    .cfi_def_cfa_offset 32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    .cfi_offset s0, -16
+; RV64I-NEXT:    .cfi_offset s1, -24
+; RV64I-NEXT:    negw a2, a1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    lui a2, 30667
+; RV64I-NEXT:    addi a2, a2, 1329
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI4_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI4_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu s0, 0(a1)
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call use32
+; RV64I-NEXT:    sllw a0, s1, s0
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: shl_cttz_multiuse_i32:
+; RV64ZBB:       # %bb.0: # %entry
+; RV64ZBB-NEXT:    addi sp, sp, -32
+; RV64ZBB-NEXT:    .cfi_def_cfa_offset 32
+; RV64ZBB-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT:    .cfi_offset ra, -8
+; RV64ZBB-NEXT:    .cfi_offset s0, -16
+; RV64ZBB-NEXT:    .cfi_offset s1, -24
+; RV64ZBB-NEXT:    mv s0, a0
+; RV64ZBB-NEXT:    ctzw s1, a1
+; RV64ZBB-NEXT:    mv a0, s1
+; RV64ZBB-NEXT:    call use32
+; RV64ZBB-NEXT:    sllw a0, s0, s1
+; RV64ZBB-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT:    addi sp, sp, 32
+; RV64ZBB-NEXT:    ret
+entry:
+  %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
+  call void @use32(i32 %cttz)
+  %res = shl i32 %x, %cttz
+  ret i32 %res
+}
+
+define i64 @shl_cttz_i64(i64 %x, i64 %y) {
+; RV32I-LABEL: shl_cttz_i64:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a4, 30667
+; RV32I-NEXT:    addi a5, a4, 1329
+; RV32I-NEXT:    lui a4, %hi(.LCPI5_0)
+; RV32I-NEXT:    addi a4, a4, %lo(.LCPI5_0)
+; RV32I-NEXT:    bnez a2, .LBB5_2
+; RV32I-NEXT:  # %bb.1: # %entry
+; RV32I-NEXT:    neg a2, a3
+; RV32I-NEXT:    and a2, a3, a2
+; RV32I-NEXT:    mul a2, a2, a5
+; RV32I-NEXT:    srli a2, a2, 27
+; RV32I-NEXT:    add a2, a4, a2
+; RV32I-NEXT:    lbu a2, 0(a2)
+; RV32I-NEXT:    addi a4, a2, 32
+; RV32I-NEXT:    j .LBB5_3
+; RV32I-NEXT:  .LBB5_2:
+; RV32I-NEXT:    neg a3, a2
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    mul a2, a2, a5
+; RV32I-NEXT:    srli a2, a2, 27
+; RV32I-NEXT:    add a2, a4, a2
+; RV32I-NEXT:    lbu a4, 0(a2)
+; RV32I-NEXT:  .LBB5_3: # %entry
+; RV32I-NEXT:    addi a3, a4, -32
+; RV32I-NEXT:    sll a2, a0, a4
+; RV32I-NEXT:    bltz a3, .LBB5_5
+; RV32I-NEXT:  # %bb.4: # %entry
+; RV32I-NEXT:    mv a1, a2
+; RV32I-NEXT:    j .LBB5_6
+; RV32I-NEXT:  .LBB5_5:
+; RV32I-NEXT:    sll a1, a1, a4
+; RV32I-NEXT:    not a4, a4
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    srl a0, a0, a4
+; RV32I-NEXT:    or a1, a1, a0
+; RV32I-NEXT:  .LBB5_6: # %entry
+; RV32I-NEXT:    srai a0, a3, 31
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: shl_cttz_i64:
+; RV32ZBB:       # %bb.0: # %entry
+; RV32ZBB-NEXT:    bnez a2, .LBB5_2
+; RV32ZBB-NEXT:  # %bb.1: # %entry
+; RV32ZBB-NEXT:    ctz a2, a3
+; RV32ZBB-NEXT:    addi a4, a2, 32
+; RV32ZBB-NEXT:    j .LBB5_3
+; RV32ZBB-NEXT:  .LBB5_2:
+; RV32ZBB-NEXT:    ctz a4, a2
+; RV32ZBB-NEXT:  .LBB5_3: # %entry
+; RV32ZBB-NEXT:    addi a3, a4, -32
+; RV32ZBB-NEXT:    sll a2, a0, a4
+; RV32ZBB-NEXT:    bltz a3, .LBB5_5
+; RV32ZBB-NEXT:  # %bb.4: # %entry
+; RV32ZBB-NEXT:    mv a1, a2
+; RV32ZBB-NEXT:    j .LBB5_6
+; RV32ZBB-NEXT:  .LBB5_5:
+; RV32ZBB-NEXT:    sll a1, a1, a4
+; RV32ZBB-NEXT:    not a4, a4
+; RV32ZBB-NEXT:    srli a0, a0, 1
+; RV32ZBB-NEXT:    srl a0, a0, a4
+; RV32ZBB-NEXT:    or a1, a1, a0
+; RV32ZBB-NEXT:  .LBB5_6: # %entry
+; RV32ZBB-NEXT:    srai a0, a3, 31
+; RV32ZBB-NEXT:    and a0, a0, a2
+; RV32ZBB-NEXT:    ret
+;
+; RV64I-LABEL: shl_cttz_i64:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a2, %hi(.LCPI5_0)
+; RV64I-NEXT:    ld a2, %lo(.LCPI5_0)(a2)
+; RV64I-NEXT:    neg a3, a1
+; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    srli a1, a1, 58
+; RV64I-NEXT:    lui a2, %hi(.LCPI5_1)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI5_1)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: shl_cttz_i64:
+; RV64ZBB:       # %bb.0: # %entry
+; RV64ZBB-NEXT:    ctz a1, a1
+; RV64ZBB-NEXT:    sll a0, a0, a1
+; RV64ZBB-NEXT:    ret
+entry:
+  %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+  %res = shl i64 %x, %cttz
+  ret i64 %res
+}
+
+define i64 @shl_cttz_constant_i64(i64 %y) {
+; RV32I-LABEL: shl_cttz_constant_i64:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a2, 30667
+; RV32I-NEXT:    addi a3, a2, 1329
+; RV32I-NEXT:    lui a2, %hi(.LCPI6_0)
+; RV32I-NEXT:    addi a2, a2, %lo(.LCPI6_0)
+; RV32I-NEXT:    bnez a0, .LBB6_2
+; RV32I-NEXT:  # %bb.1: # %entry
+; RV32I-NEXT:    neg a0, a1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    mul a0, a0, a3
+; RV32I-NEXT:    srli a0, a0, 27
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    addi a1, a0, 32
+; RV32I-NEXT:    j .LBB6_3
+; RV32I-NEXT:  .LBB6_2:
+; RV32I-NEXT:    neg a1, a0
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    mul a0, a0, a3
+; RV32I-NEXT:    srli a0, a0, 27
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    lbu a1, 0(a0)
+; RV32I-NEXT:  .LBB6_3: # %entry
+; RV32I-NEXT:    li a0, 4
+; RV32I-NEXT:    addi a2, a1, -32
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    bltz a2, .LBB6_5
+; RV32I-NEXT:  # %bb.4: # %entry
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    j .LBB6_6
+; RV32I-NEXT:  .LBB6_5:
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    srl a1, a3, a1
+; RV32I-NEXT:  .LBB6_6: # %entry
+; RV32I-NEXT:    srai a2, a2, 31
+; RV32I-NEXT:    and a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: shl_cttz_constant_i64:
+; RV32ZBB:       # %bb.0: # %entry
+; RV32ZBB-NEXT:    bnez a0, .LBB6_2
+; RV32ZBB-NEXT:  # %bb.1: # %entry
+; RV32ZBB-NEXT:    ctz a0, a1
+; RV32ZBB-NEXT:    addi a1, a0, 32
+; RV32ZBB-NEXT:    j .LBB6_3
+; RV32ZBB-NEXT:  .LBB6_2:
+; RV32ZBB-NEXT:    ctz a1, a0
+; RV32ZBB-NEXT:  .LBB6_3: # %entry
+; RV32ZBB-NEXT:    li a0, 4
+; RV32ZBB-NEXT:    addi a2, a1, -32
+; RV32ZBB-NEXT:    sll a0, a0, a1
+; RV32ZBB-NEXT:    bltz a2, .LBB6_5
+; RV32ZBB-NEXT:  # %bb.4: # %entry
+; RV32ZBB-NEXT:    mv a1, a0
+; RV32ZBB-NEXT:    j .LBB6_6
+; RV32ZBB-NEXT:  .LBB6_5:
+; RV32ZBB-NEXT:    not a1, a1
+; RV32ZBB-NEXT:    li a3, 2
+; RV32ZBB-NEXT:    srl a1, a3, a1
+; RV32ZBB-NEXT:  .LBB6_6: # %entry
+; RV32ZBB-NEXT:    srai a2, a2, 31
+; RV32ZBB-NEXT:    and a0, a2, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64I-LABEL: shl_cttz_constant_i64:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI6_0)(a1)
+; RV64I-NEXT:    neg a2, a0
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    srli a0, a0, 58
+; RV64I-NEXT:    lui a1, %hi(.LCPI6_1)
+; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_1)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    li a1, 4
+; RV64I-NEXT:    sll a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: shl_cttz_constant_i64:
+; RV64ZBB:       # %bb.0: # %entry
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    li a1, 4
+; RV64ZBB-NEXT:    sll a0, a1, a0
+; RV64ZBB-NEXT:    ret
+entry:
+  %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+  %res = shl i64 4, %cttz
+  ret i64 %res
+}
+
+define i64 @shl_cttz_nuw_i64(i64 %x, i64 %y) {
+; RV32I-LABEL: shl_cttz_nuw_i64:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a4, 30667
+; RV32I-NEXT:    addi a5, a4, 1329
+; RV32I-NEXT:    lui a4, %hi(.LCPI7_0)
+; RV32I-NEXT:    addi a4, a4, %lo(.LCPI7_0)
+; RV32I-NEXT:    bnez a2, .LBB7_2
+; RV32I-NEXT:  # %bb.1: # %entry
+; RV32I-NEXT:    neg a2, a3
+; RV32I-NEXT:    and a2, a3, a2
+; RV32I-NEXT:    mul a2, a2, a5
+; RV32I-NEXT:    srli a2, a2, 27
+; RV32I-NEXT:    add a2, a4, a2
+; RV32I-NEXT:    lbu a2, 0(a2)
+; RV32I-NEXT:    addi a4, a2, 32
+; RV32I-NEXT:    j .LBB7_3
+; RV32I-NEXT:  .LBB7_2:
+; RV32I-NEXT:    neg a3, a2
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    mul a2, a2, a5
+; RV32I-NEXT:    srli a2, a2, 27
+; RV32I-NEXT:    add a2, a4, a2
+; RV32I-NEXT:    lbu a4, 0(a2)
+; RV32I-NEXT:  .LBB7_3: # %entry
+; RV32I-NEXT:    addi a3, a4, -32
+; RV32I-NEXT:    sll a2, a0, a4
+; RV32I-NEXT:    bltz a3, .LBB7_5
+; RV32I-NEXT:  # %bb.4: # %entry
+; RV32I-NEXT:    mv a1, a2
+; RV32I-NEXT:    j .LBB7_6
+; RV32I-NEXT:  .LBB7_5:
+; RV32I-NEXT:    sll a1, a1, a4
+; RV32I-NEXT:    not a4, a4
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    srl a0, a0, a4
+; RV32I-NEXT:    or a1, a1, a0
+; RV32I-NEXT:  .LBB7_6: # %entry
+; RV32I-NEXT:    srai a0, a3, 31
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: shl_cttz_nuw_i64:
+; RV32ZBB:       # %bb.0: # %entry
+; RV32ZBB-NEXT:    bnez a2, .LBB7_2
+; RV32ZBB-NEXT:  # %bb.1: # %entry
+; RV32ZBB-NEXT:    ctz a2, a3
+; RV32ZBB-NEXT:    addi a4, a2, 32
+; RV32ZBB-NEXT:    j .LBB7_3
+; RV32ZBB-NEXT:  .LBB7_2:
+; RV32ZBB-NEXT:    ctz a4, a2
+; RV32ZBB-NEXT:  .LBB7_3: # %entry
+; RV32ZBB-NEXT:    addi a3, a4, -32
+; RV32ZBB-NEXT:    sll a2, a0, a4
+; RV32ZBB-NEXT:    bltz a3, .LBB7_5
+; RV32ZBB-NEXT:  # %bb.4: # %entry
+; RV32ZBB-NEXT:    mv a1, a2
+; RV32ZBB-NEXT:    j .LBB7_6
+; RV32ZBB-NEXT:  .LBB7_5:
+; RV32ZBB-NEXT:    sll a1, a1, a4
+; RV32ZBB-NEXT:    not a4, a4
+; RV32ZBB-NEXT:    srli a0, a0, 1
+; RV32ZBB-NEXT:    srl a0, a0, a4
+; RV32ZBB-NEXT:    or a1, a1, a0
+; RV32ZBB-NEXT:  .LBB7_6: # %entry
+; RV32ZBB-NEXT:    srai a0, a3, 31
+; RV32ZBB-NEXT:    and a0, a0, a2
+; RV32ZBB-NEXT:    ret
+;
+; RV64I-LABEL: shl_cttz_nuw_i64:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a2, %hi(.LCPI7_0)
+; RV64I-NEXT:    ld a2, %lo(.LCPI7_0)(a2)
+; RV64I-NEXT:    neg a3, a1
+; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    srli a1, a1, 58
+; RV64I-NEXT:    lui a2, %hi(.LCPI7_1)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI7_1)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: shl_cttz_nuw_i64:
+; RV64ZBB:       # %bb.0: # %entry
+; RV64ZBB-NEXT:    ctz a1, a1
+; RV64ZBB-NEXT:    sll a0, a0, a1
+; RV64ZBB-NEXT:    ret
+entry:
+  %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+  %res = shl nuw i64 %x, %cttz
+  ret i64 %res
+}
+
+define i64 @shl_cttz_nsw_i64(i64 %x, i64 %y) {
+; RV32I-LABEL: shl_cttz_nsw_i64:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    lui a4, 30667
+; RV32I-NEXT:    addi a5, a4, 1329
+; RV32I-NEXT:    lui a4, %hi(.LCPI8_0)
+; RV32I-NEXT:    addi a4, a4, %lo(.LCPI8_0)
+; RV32I-NEXT:    bnez a2, .LBB8_2
+; RV32I-NEXT:  # %bb.1: # %entry
+; RV32I-NEXT:    neg a2, a3
+; RV32I-NEXT:    and a2, a3, a2
+; RV32I-NEXT:    mul a2, a2, a5
+; RV32I-NEXT:    srli a2, a2, 27
+; RV32I-NEXT:    add a2, a4, a2
+; RV32I-NEXT:    lbu a2, 0(a2)
+; RV32I-NEXT:    addi a4, a2, 32
+; RV32I-NEXT:    j .LBB8_3
+; RV32I-NEXT:  .LBB8_2:
+; RV32I-NEXT:    neg a3, a2
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    mul a2, a2, a5
+; RV32I-NEXT:    srli a2, a2, 27
+; RV32I-NEXT:    add a2, a4, a2
+; RV32I-NEXT:    lbu a4, 0(a2)
+; RV32I-NEXT:  .LBB8_3: # %entry
+; RV32I-NEXT:    addi a3, a4, -32
+; RV32I-NEXT:    sll a2, a0, a4
+; RV32I-NEXT:    bltz a3, .LBB8_5
+; RV32I-NEXT:  # %bb.4: # %entry
+; RV32I-NEXT:    mv a1, a2
+; RV32I-NEXT:    j .LBB8_6
+; RV32I-NEXT:  .LBB8_5:
+; RV32I-NEXT:    sll a1, a1, a4
+; RV32I-NEXT:    not a4, a4
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    srl a0, a0, a4
+; RV32I-NEXT:    or a1, a1, a0
+; RV32I-NEXT:  .LBB8_6: # %entry
+; RV32I-NEXT:    srai a0, a3, 31
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: shl_cttz_nsw_i64:
+; RV32ZBB:       # %bb.0: # %entry
+; RV32ZBB-NEXT:    bnez a2, .LBB8_2
+; RV32ZBB-NEXT:  # %bb.1: # %entry
+; RV32ZBB-NEXT:    ctz a2, a3
+; RV32ZBB-NEXT:    addi a4, a2, 32
+; RV32ZBB-NEXT:    j .LBB8_3
+; RV32ZBB-NEXT:  .LBB8_2:
+; RV32ZBB-NEXT:    ctz a4, a2
+; RV32ZBB-NEXT:  .LBB8_3: # %entry
+; RV32ZBB-NEXT:    addi a3, a4, -32
+; RV32ZBB-NEXT:    sll a2, a0, a4
+; RV32ZBB-NEXT:    bltz a3, .LBB8_5
+; RV32ZBB-NEXT:  # %bb.4: # %entry
+; RV32ZBB-NEXT:    mv a1, a2
+; RV32ZBB-NEXT:    j .LBB8_6
+; RV32ZBB-NEXT:  .LBB8_5:
+; RV32ZBB-NEXT:    sll a1, a1, a4
+; RV32ZBB-NEXT:    not a4, a4
+; RV32ZBB-NEXT:    srli a0, a0, 1
+; RV32ZBB-NEXT:    srl a0, a0, a4
+; RV32ZBB-NEXT:    or a1, a1, a0
+; RV32ZBB-NEXT:  .LBB8_6: # %entry
+; RV32ZBB-NEXT:    srai a0, a3, 31
+; RV32ZBB-NEXT:    and a0, a0, a2
+; RV32ZBB-NEXT:    ret
+;
+; RV64I-LABEL: shl_cttz_nsw_i64:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    lui a2, %hi(.LCPI8_0)
+; RV64I-NEXT:    ld a2, %lo(.LCPI8_0)(a2)
+; RV64I-NEXT:    neg a3, a1
+; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    srli a1, a1, 58
+; RV64I-NEXT:    lui a2, %hi(.LCPI8_1)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI8_1)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: shl_cttz_nsw_i64:
+; RV64ZBB:       # %bb.0: # %entry
+; RV64ZBB-NEXT:    ctz a1, a1
+; RV64ZBB-NEXT:    sll a0, a0, a1
+; RV64ZBB-NEXT:    ret
+entry:
+  %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+  %res = shl nsw i64 %x, %cttz
+  ret i64 %res
+}
+
+define i64 @shl_cttz_multiuse_i64(i64 %x, i64 %y) {
+; RV32I-LABEL: shl_cttz_multiuse_i64:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    .cfi_def_cfa_offset 16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    .cfi_offset s0, -8
+; RV32I-NEXT:    .cfi_offset s1, -12
+; RV32I-NEXT:    .cfi_offset s2, -16
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    lui a0, 30667
+; RV32I-NEXT:    addi a1, a0, 1329
+; RV32I-NEXT:    lui a0, %hi(.LCPI9_0)
+; RV32I-NEXT:    addi a0, a0, %lo(.LCPI9_0)
+; RV32I-NEXT:    bnez a2, .LBB9_2
+; RV32I-NEXT:  # %bb.1: # %entry
+; RV32I-NEXT:    neg a2, a3
+; RV32I-NEXT:    and a2, a3, a2
+; RV32I-NEXT:    mul a1, a2, a1
+; RV32I-NEXT:    srli a1, a1, 27
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    addi s2, a0, 32
+; RV32I-NEXT:    j .LBB9_3
+; RV32I-NEXT:  .LBB9_2:
+; RV32I-NEXT:    neg a3, a2
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    mul a1, a2, a1
+; RV32I-NEXT:    srli a1, a1, 27
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lbu s2, 0(a0)
+; RV32I-NEXT:  .LBB9_3: # %entry
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    call use64
+; RV32I-NEXT:    addi a2, s2, -32
+; RV32I-NEXT:    sll a0, s0, s2
+; RV32I-NEXT:    bltz a2, .LBB9_5
+; RV32I-NEXT:  # %bb.4: # %entry
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    j .LBB9_6
+; RV32I-NEXT:  .LBB9_5:
+; RV32I-NEXT:    sll a1, s1, s2
+; RV32I-NEXT:    not a3, s2
+; RV32I-NEXT:    srli s0, s0, 1
+; RV32I-NEXT:    srl a3, s0, a3
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:  .LBB9_6: # %entry
+; RV32I-NEXT:    srai a2, a2, 31
+; RV32I-NEXT:    and a0, a2, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: shl_cttz_multiuse_i64:
+; RV32ZBB:       # %bb.0: # %entry
+; RV32ZBB-NEXT:    addi sp, sp, -16
+; RV32ZBB-NEXT:    .cfi_def_cfa_offset 16
+; RV32ZBB-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    .cfi_offset ra, -4
+; RV32ZBB-NEXT:    .cfi_offset s0, -8
+; RV32ZBB-NEXT:    .cfi_offset s1, -12
+; RV32ZBB-NEXT:    .cfi_offset s2, -16
+; RV32ZBB-NEXT:    mv s1, a1
+; RV32ZBB-NEXT:    mv s0, a0
+; RV32ZBB-NEXT:    bnez a2, .LBB9_2
+; RV32ZBB-NEXT:  # %bb.1: # %entry
+; RV32ZBB-NEXT:    ctz a0, a3
+; RV32ZBB-NEXT:    addi s2, a0, 32
+; RV32ZBB-NEXT:    j .LBB9_3
+; RV32ZBB-NEXT:  .LBB9_2:
+; RV32ZBB-NEXT:    ctz s2, a2
+; RV32ZBB-NEXT:  .LBB9_3: # %entry
+; RV32ZBB-NEXT:    mv a0, s2
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    call use64
+; RV32ZBB-NEXT:    addi a2, s2, -32
+; RV32ZBB-NEXT:    sll a0, s0, s2
+; RV32ZBB-NEXT:    bltz a2, .LBB9_5
+; RV32ZBB-NEXT:  # %bb.4: # %entry
+; RV32ZBB-NEXT:    mv a1, a0
+; RV32ZBB-NEXT:    j .LBB9_6
+; RV32ZBB-NEXT:  .LBB9_5:
+; RV32ZBB-NEXT:    sll a1, s1, s2
+; RV32ZBB-NEXT:    not a3, s2
+; RV32ZBB-NEXT:    srli s0, s0, 1
+; RV32ZBB-NEXT:    srl a3, s0, a3
+; RV32ZBB-NEXT:    or a1, a1, a3
+; RV32ZBB-NEXT:  .LBB9_6: # %entry
+; RV32ZBB-NEXT:    srai a2, a2, 31
+; RV32ZBB-NEXT:    and a0, a2, a0
+; RV32ZBB-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    addi sp, sp, 16
+; RV32ZBB-NEXT:    ret
+;
+; RV64I-LABEL: shl_cttz_multiuse_i64:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    .cfi_def_cfa_offset 32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    .cfi_offset s0, -16
+; RV64I-NEXT:    .cfi_offset s1, -24
+; RV64I-NEXT:    lui a2, %hi(.LCPI9_0)
+; RV64I-NEXT:    ld a2, %lo(.LCPI9_0)(a2)
+; RV64I-NEXT:    neg a3, a1
+; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    srli a1, a1, 58
+; RV64I-NEXT:    lui a2, %hi(.LCPI9_1)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI9_1)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu s0, 0(a1)
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call use64
+; RV64I-NEXT:    sll a0, s1, s0
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: shl_cttz_multiuse_i64:
+; RV64ZBB:       # %bb.0: # %entry
+; RV64ZBB-NEXT:    addi sp, sp, -32
+; RV64ZBB-NEXT:    .cfi_def_cfa_offset 32
+; RV64ZBB-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64ZBB-NEXT:    .cfi_offset ra, -8
+; RV64ZBB-NEXT:    .cfi_offset s0, -16
+; RV64ZBB-NEXT:    .cfi_offset s1, -24
+; RV64ZBB-NEXT:    mv s0, a0
+; RV64ZBB-NEXT:    ctz s1, a1
+; RV64ZBB-NEXT:    mv a0, s1
+; RV64ZBB-NEXT:    call use64
+; RV64ZBB-NEXT:    sll a0, s0, s1
+; RV64ZBB-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT:    addi sp, sp, 32
+; RV64ZBB-NEXT:    ret
+entry:
+  %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true)
+  call void @use64(i64 %cttz)
+  %res = shl i64 %x, %cttz
+  ret i64 %res
+}
+
+declare void @use32(i32 signext)
+declare void @use64(i64)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV64IILLEGALI32: {{.*}}
+; RV64ILEGALI32: {{.*}}
+; RV64ZBBILLEGALI32: {{.*}}
+; RV64ZBBLEGALI32: {{.*}}

>From 370a2a7e42ec057f6d5efd89ecec44f582fe5676 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Wed, 13 Mar 2024 19:58:15 +0800
Subject: [PATCH 2/2] [CodeGenPrepare] Transform `shl X, cttz(Y)` to `mul (Y &
 -Y), X` if cttz is unsupported

---
 llvm/lib/CodeGen/CodeGenPrepare.cpp |  24 ++-
 llvm/test/CodeGen/RISCV/shl-cttz.ll | 242 ++++++++++++----------------
 2 files changed, 124 insertions(+), 142 deletions(-)

diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 36f6cc83be2c3f..47a8839c394347 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -8396,7 +8396,29 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) {
     return true;
 
   switch (I->getOpcode()) {
-  case Instruction::Shl:
+  case Instruction::Shl: {
+    // shl X, cttz(Y) -> mul (Y & -Y), X if cttz is unsupported on the target.
+    Value *Y;
+    if (match(I->getOperand(1),
+              m_OneUse(m_Intrinsic<Intrinsic::cttz>(m_Value(Y))))) {
+      EVT VT = TLI->getValueType(*DL, Y->getType());
+      if (!TLI->isOperationLegalOrCustom(ISD::CTTZ, VT) &&
+          TLI->isOperationLegalOrCustom(ISD::MUL, VT)) {
+        IRBuilder<> Builder(I);
+        Value *NegY = Builder.CreateNeg(Y);
+        Value *Power2 = Builder.CreateAnd(Y, NegY);
+        Value *New = Builder.CreateMul(Power2, I->getOperand(0), "",
+                                       /*HasNUW=*/I->hasNoUnsignedWrap(),
+                                       /*HasNSW=*/false);
+        replaceAllUsesWith(I, New, FreshBBs, IsHugeFunc);
+        RecursivelyDeleteTriviallyDeadInstructions(
+            I, TLInfo, nullptr,
+            [&](Value *V) { removeAllAssertingVHReferences(V); });
+        return true;
+      }
+    }
+  }
+  [[fallthrough]]
   case Instruction::LShr:
   case Instruction::AShr:
     return optimizeShiftInst(cast<BinaryOperator>(I));
diff --git a/llvm/test/CodeGen/RISCV/shl-cttz.ll b/llvm/test/CodeGen/RISCV/shl-cttz.ll
index b71b5e505f43a1..d8d50e5868405c 100644
--- a/llvm/test/CodeGen/RISCV/shl-cttz.ll
+++ b/llvm/test/CodeGen/RISCV/shl-cttz.ll
@@ -17,15 +17,7 @@ define i32 @shl_cttz_i32(i32 %x, i32 %y) {
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    neg a2, a1
 ; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    lui a2, 30667
-; RV32I-NEXT:    addi a2, a2, 1329
-; RV32I-NEXT:    mul a1, a1, a2
-; RV32I-NEXT:    srli a1, a1, 27
-; RV32I-NEXT:    lui a2, %hi(.LCPI0_0)
-; RV32I-NEXT:    addi a2, a2, %lo(.LCPI0_0)
-; RV32I-NEXT:    add a1, a2, a1
-; RV32I-NEXT:    lbu a1, 0(a1)
-; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    mul a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: shl_cttz_i32:
@@ -34,26 +26,33 @@ define i32 @shl_cttz_i32(i32 %x, i32 %y) {
 ; RV32ZBB-NEXT:    sll a0, a0, a1
 ; RV32ZBB-NEXT:    ret
 ;
-; RV64I-LABEL: shl_cttz_i32:
-; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    negw a2, a1
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    lui a2, 30667
-; RV64I-NEXT:    addi a2, a2, 1329
-; RV64I-NEXT:    mul a1, a1, a2
-; RV64I-NEXT:    srliw a1, a1, 27
-; RV64I-NEXT:    lui a2, %hi(.LCPI0_0)
-; RV64I-NEXT:    addi a2, a2, %lo(.LCPI0_0)
-; RV64I-NEXT:    add a1, a2, a1
-; RV64I-NEXT:    lbu a1, 0(a1)
-; RV64I-NEXT:    sllw a0, a0, a1
-; RV64I-NEXT:    ret
+; RV64IILLEGALI32-LABEL: shl_cttz_i32:
+; RV64IILLEGALI32:       # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT:    negw a2, a1
+; RV64IILLEGALI32-NEXT:    and a1, a1, a2
+; RV64IILLEGALI32-NEXT:    lui a2, 30667
+; RV64IILLEGALI32-NEXT:    addi a2, a2, 1329
+; RV64IILLEGALI32-NEXT:    mul a1, a1, a2
+; RV64IILLEGALI32-NEXT:    srliw a1, a1, 27
+; RV64IILLEGALI32-NEXT:    lui a2, %hi(.LCPI0_0)
+; RV64IILLEGALI32-NEXT:    addi a2, a2, %lo(.LCPI0_0)
+; RV64IILLEGALI32-NEXT:    add a1, a2, a1
+; RV64IILLEGALI32-NEXT:    lbu a1, 0(a1)
+; RV64IILLEGALI32-NEXT:    sllw a0, a0, a1
+; RV64IILLEGALI32-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: shl_cttz_i32:
 ; RV64ZBB:       # %bb.0: # %entry
 ; RV64ZBB-NEXT:    ctzw a1, a1
 ; RV64ZBB-NEXT:    sllw a0, a0, a1
 ; RV64ZBB-NEXT:    ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_i32:
+; RV64ILEGALI32:       # %bb.0: # %entry
+; RV64ILEGALI32-NEXT:    negw a2, a1
+; RV64ILEGALI32-NEXT:    and a1, a1, a2
+; RV64ILEGALI32-NEXT:    mulw a0, a1, a0
+; RV64ILEGALI32-NEXT:    ret
 entry:
   %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
   %res = shl i32 %x, %cttz
@@ -65,16 +64,7 @@ define i32 @shl_cttz_constant_i32(i32 %y) {
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    neg a1, a0
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    lui a1, 30667
-; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    mul a0, a0, a1
-; RV32I-NEXT:    srli a0, a0, 27
-; RV32I-NEXT:    lui a1, %hi(.LCPI1_0)
-; RV32I-NEXT:    addi a1, a1, %lo(.LCPI1_0)
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    lbu a0, 0(a0)
-; RV32I-NEXT:    li a1, 4
-; RV32I-NEXT:    sll a0, a1, a0
+; RV32I-NEXT:    slli a0, a0, 2
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: shl_cttz_constant_i32:
@@ -84,21 +74,21 @@ define i32 @shl_cttz_constant_i32(i32 %y) {
 ; RV32ZBB-NEXT:    sll a0, a1, a0
 ; RV32ZBB-NEXT:    ret
 ;
-; RV64I-LABEL: shl_cttz_constant_i32:
-; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    negw a1, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addi a1, a1, 1329
-; RV64I-NEXT:    mul a0, a0, a1
-; RV64I-NEXT:    srliw a0, a0, 27
-; RV64I-NEXT:    lui a1, %hi(.LCPI1_0)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI1_0)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    li a1, 4
-; RV64I-NEXT:    sllw a0, a1, a0
-; RV64I-NEXT:    ret
+; RV64IILLEGALI32-LABEL: shl_cttz_constant_i32:
+; RV64IILLEGALI32:       # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT:    negw a1, a0
+; RV64IILLEGALI32-NEXT:    and a0, a0, a1
+; RV64IILLEGALI32-NEXT:    lui a1, 30667
+; RV64IILLEGALI32-NEXT:    addi a1, a1, 1329
+; RV64IILLEGALI32-NEXT:    mul a0, a0, a1
+; RV64IILLEGALI32-NEXT:    srliw a0, a0, 27
+; RV64IILLEGALI32-NEXT:    lui a1, %hi(.LCPI1_0)
+; RV64IILLEGALI32-NEXT:    addi a1, a1, %lo(.LCPI1_0)
+; RV64IILLEGALI32-NEXT:    add a0, a1, a0
+; RV64IILLEGALI32-NEXT:    lbu a0, 0(a0)
+; RV64IILLEGALI32-NEXT:    li a1, 4
+; RV64IILLEGALI32-NEXT:    sllw a0, a1, a0
+; RV64IILLEGALI32-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: shl_cttz_constant_i32:
 ; RV64ZBB:       # %bb.0: # %entry
@@ -106,6 +96,13 @@ define i32 @shl_cttz_constant_i32(i32 %y) {
 ; RV64ZBB-NEXT:    li a1, 4
 ; RV64ZBB-NEXT:    sllw a0, a1, a0
 ; RV64ZBB-NEXT:    ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_constant_i32:
+; RV64ILEGALI32:       # %bb.0: # %entry
+; RV64ILEGALI32-NEXT:    negw a1, a0
+; RV64ILEGALI32-NEXT:    and a0, a0, a1
+; RV64ILEGALI32-NEXT:    slliw a0, a0, 2
+; RV64ILEGALI32-NEXT:    ret
 entry:
   %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
   %res = shl i32 4, %cttz
@@ -117,15 +114,7 @@ define i32 @shl_cttz_nuw_i32(i32 %x, i32 %y) {
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    neg a2, a1
 ; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    lui a2, 30667
-; RV32I-NEXT:    addi a2, a2, 1329
-; RV32I-NEXT:    mul a1, a1, a2
-; RV32I-NEXT:    srli a1, a1, 27
-; RV32I-NEXT:    lui a2, %hi(.LCPI2_0)
-; RV32I-NEXT:    addi a2, a2, %lo(.LCPI2_0)
-; RV32I-NEXT:    add a1, a2, a1
-; RV32I-NEXT:    lbu a1, 0(a1)
-; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    mul a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: shl_cttz_nuw_i32:
@@ -134,26 +123,33 @@ define i32 @shl_cttz_nuw_i32(i32 %x, i32 %y) {
 ; RV32ZBB-NEXT:    sll a0, a0, a1
 ; RV32ZBB-NEXT:    ret
 ;
-; RV64I-LABEL: shl_cttz_nuw_i32:
-; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    negw a2, a1
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    lui a2, 30667
-; RV64I-NEXT:    addi a2, a2, 1329
-; RV64I-NEXT:    mul a1, a1, a2
-; RV64I-NEXT:    srliw a1, a1, 27
-; RV64I-NEXT:    lui a2, %hi(.LCPI2_0)
-; RV64I-NEXT:    addi a2, a2, %lo(.LCPI2_0)
-; RV64I-NEXT:    add a1, a2, a1
-; RV64I-NEXT:    lbu a1, 0(a1)
-; RV64I-NEXT:    sllw a0, a0, a1
-; RV64I-NEXT:    ret
+; RV64IILLEGALI32-LABEL: shl_cttz_nuw_i32:
+; RV64IILLEGALI32:       # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT:    negw a2, a1
+; RV64IILLEGALI32-NEXT:    and a1, a1, a2
+; RV64IILLEGALI32-NEXT:    lui a2, 30667
+; RV64IILLEGALI32-NEXT:    addi a2, a2, 1329
+; RV64IILLEGALI32-NEXT:    mul a1, a1, a2
+; RV64IILLEGALI32-NEXT:    srliw a1, a1, 27
+; RV64IILLEGALI32-NEXT:    lui a2, %hi(.LCPI2_0)
+; RV64IILLEGALI32-NEXT:    addi a2, a2, %lo(.LCPI2_0)
+; RV64IILLEGALI32-NEXT:    add a1, a2, a1
+; RV64IILLEGALI32-NEXT:    lbu a1, 0(a1)
+; RV64IILLEGALI32-NEXT:    sllw a0, a0, a1
+; RV64IILLEGALI32-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: shl_cttz_nuw_i32:
 ; RV64ZBB:       # %bb.0: # %entry
 ; RV64ZBB-NEXT:    ctzw a1, a1
 ; RV64ZBB-NEXT:    sllw a0, a0, a1
 ; RV64ZBB-NEXT:    ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_nuw_i32:
+; RV64ILEGALI32:       # %bb.0: # %entry
+; RV64ILEGALI32-NEXT:    negw a2, a1
+; RV64ILEGALI32-NEXT:    and a1, a1, a2
+; RV64ILEGALI32-NEXT:    mulw a0, a1, a0
+; RV64ILEGALI32-NEXT:    ret
 entry:
   %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
   %res = shl nuw i32 %x, %cttz
@@ -165,15 +161,7 @@ define i32 @shl_cttz_nsw_i32(i32 %x, i32 %y) {
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    neg a2, a1
 ; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    lui a2, 30667
-; RV32I-NEXT:    addi a2, a2, 1329
-; RV32I-NEXT:    mul a1, a1, a2
-; RV32I-NEXT:    srli a1, a1, 27
-; RV32I-NEXT:    lui a2, %hi(.LCPI3_0)
-; RV32I-NEXT:    addi a2, a2, %lo(.LCPI3_0)
-; RV32I-NEXT:    add a1, a2, a1
-; RV32I-NEXT:    lbu a1, 0(a1)
-; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    mul a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: shl_cttz_nsw_i32:
@@ -182,26 +170,33 @@ define i32 @shl_cttz_nsw_i32(i32 %x, i32 %y) {
 ; RV32ZBB-NEXT:    sll a0, a0, a1
 ; RV32ZBB-NEXT:    ret
 ;
-; RV64I-LABEL: shl_cttz_nsw_i32:
-; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    negw a2, a1
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    lui a2, 30667
-; RV64I-NEXT:    addi a2, a2, 1329
-; RV64I-NEXT:    mul a1, a1, a2
-; RV64I-NEXT:    srliw a1, a1, 27
-; RV64I-NEXT:    lui a2, %hi(.LCPI3_0)
-; RV64I-NEXT:    addi a2, a2, %lo(.LCPI3_0)
-; RV64I-NEXT:    add a1, a2, a1
-; RV64I-NEXT:    lbu a1, 0(a1)
-; RV64I-NEXT:    sllw a0, a0, a1
-; RV64I-NEXT:    ret
+; RV64IILLEGALI32-LABEL: shl_cttz_nsw_i32:
+; RV64IILLEGALI32:       # %bb.0: # %entry
+; RV64IILLEGALI32-NEXT:    negw a2, a1
+; RV64IILLEGALI32-NEXT:    and a1, a1, a2
+; RV64IILLEGALI32-NEXT:    lui a2, 30667
+; RV64IILLEGALI32-NEXT:    addi a2, a2, 1329
+; RV64IILLEGALI32-NEXT:    mul a1, a1, a2
+; RV64IILLEGALI32-NEXT:    srliw a1, a1, 27
+; RV64IILLEGALI32-NEXT:    lui a2, %hi(.LCPI3_0)
+; RV64IILLEGALI32-NEXT:    addi a2, a2, %lo(.LCPI3_0)
+; RV64IILLEGALI32-NEXT:    add a1, a2, a1
+; RV64IILLEGALI32-NEXT:    lbu a1, 0(a1)
+; RV64IILLEGALI32-NEXT:    sllw a0, a0, a1
+; RV64IILLEGALI32-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: shl_cttz_nsw_i32:
 ; RV64ZBB:       # %bb.0: # %entry
 ; RV64ZBB-NEXT:    ctzw a1, a1
 ; RV64ZBB-NEXT:    sllw a0, a0, a1
 ; RV64ZBB-NEXT:    ret
+;
+; RV64ILEGALI32-LABEL: shl_cttz_nsw_i32:
+; RV64ILEGALI32:       # %bb.0: # %entry
+; RV64ILEGALI32-NEXT:    negw a2, a1
+; RV64ILEGALI32-NEXT:    and a1, a1, a2
+; RV64ILEGALI32-NEXT:    mulw a0, a1, a0
+; RV64ILEGALI32-NEXT:    ret
 entry:
   %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
   %res = shl nsw i32 %x, %cttz
@@ -388,17 +383,9 @@ define i64 @shl_cttz_i64(i64 %x, i64 %y) {
 ;
 ; RV64I-LABEL: shl_cttz_i64:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    lui a2, %hi(.LCPI5_0)
-; RV64I-NEXT:    ld a2, %lo(.LCPI5_0)(a2)
-; RV64I-NEXT:    neg a3, a1
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    mul a1, a1, a2
-; RV64I-NEXT:    srli a1, a1, 58
-; RV64I-NEXT:    lui a2, %hi(.LCPI5_1)
-; RV64I-NEXT:    addi a2, a2, %lo(.LCPI5_1)
-; RV64I-NEXT:    add a1, a2, a1
-; RV64I-NEXT:    lbu a1, 0(a1)
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    neg a2, a1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    mul a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: shl_cttz_i64:
@@ -481,18 +468,9 @@ define i64 @shl_cttz_constant_i64(i64 %y) {
 ;
 ; RV64I-LABEL: shl_cttz_constant_i64:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI6_0)(a1)
-; RV64I-NEXT:    neg a2, a0
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    mul a0, a0, a1
-; RV64I-NEXT:    srli a0, a0, 58
-; RV64I-NEXT:    lui a1, %hi(.LCPI6_1)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_1)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    li a1, 4
-; RV64I-NEXT:    sll a0, a1, a0
+; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 2
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: shl_cttz_constant_i64:
@@ -578,17 +556,9 @@ define i64 @shl_cttz_nuw_i64(i64 %x, i64 %y) {
 ;
 ; RV64I-LABEL: shl_cttz_nuw_i64:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    lui a2, %hi(.LCPI7_0)
-; RV64I-NEXT:    ld a2, %lo(.LCPI7_0)(a2)
-; RV64I-NEXT:    neg a3, a1
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    mul a1, a1, a2
-; RV64I-NEXT:    srli a1, a1, 58
-; RV64I-NEXT:    lui a2, %hi(.LCPI7_1)
-; RV64I-NEXT:    addi a2, a2, %lo(.LCPI7_1)
-; RV64I-NEXT:    add a1, a2, a1
-; RV64I-NEXT:    lbu a1, 0(a1)
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    neg a2, a1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    mul a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: shl_cttz_nuw_i64:
@@ -673,17 +643,9 @@ define i64 @shl_cttz_nsw_i64(i64 %x, i64 %y) {
 ;
 ; RV64I-LABEL: shl_cttz_nsw_i64:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    lui a2, %hi(.LCPI8_0)
-; RV64I-NEXT:    ld a2, %lo(.LCPI8_0)(a2)
-; RV64I-NEXT:    neg a3, a1
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    mul a1, a1, a2
-; RV64I-NEXT:    srli a1, a1, 58
-; RV64I-NEXT:    lui a2, %hi(.LCPI8_1)
-; RV64I-NEXT:    addi a2, a2, %lo(.LCPI8_1)
-; RV64I-NEXT:    add a1, a2, a1
-; RV64I-NEXT:    lbu a1, 0(a1)
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    neg a2, a1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    mul a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: shl_cttz_nsw_i64:
@@ -866,7 +828,5 @@ entry:
 declare void @use32(i32 signext)
 declare void @use64(i64)
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; RV64IILLEGALI32: {{.*}}
-; RV64ILEGALI32: {{.*}}
 ; RV64ZBBILLEGALI32: {{.*}}
 ; RV64ZBBLEGALI32: {{.*}}



More information about the llvm-commits mailing list