[llvm] 23e9b49 - [RISCV][GISel] Copy some Zba IR test cases from SelectionDAG. NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 14 13:59:18 PST 2024
Author: Craig Topper
Date: 2024-11-14T13:47:49-08:00
New Revision: 23e9b49b88dc9b8be3edd2e46485d59e05f9f6ba
URL: https://github.com/llvm/llvm-project/commit/23e9b49b88dc9b8be3edd2e46485d59e05f9f6ba
DIFF: https://github.com/llvm/llvm-project/commit/23e9b49b88dc9b8be3edd2e46485d59e05f9f6ba.diff
LOG: [RISCV][GISel] Copy some Zba IR test cases from SelectionDAG. NFC
Added:
llvm/test/CodeGen/RISCV/GlobalISel/rv32zba.ll
llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zba.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zba.ll
new file mode 100644
index 00000000000000..233491836152a9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zba.ll
@@ -0,0 +1,172 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -global-isel -mattr=+m -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=CHECK,RV32I
+; RUN: llc -mtriple=riscv32 -global-isel -mattr=+m,+zba -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBA
+
+define signext i16 @sh1add(i64 %0, ptr %1) {
+; RV32I-LABEL: sh1add:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: lh a0, 0(a0)
+; RV32I-NEXT: ret
+;
+; RV32ZBA-LABEL: sh1add:
+; RV32ZBA: # %bb.0:
+; RV32ZBA-NEXT: sh1add a0, a0, a2
+; RV32ZBA-NEXT: lh a0, 0(a0)
+; RV32ZBA-NEXT: ret
+ %3 = getelementptr inbounds i16, ptr %1, i64 %0
+ %4 = load i16, ptr %3
+ ret i16 %4
+}
+
+define i32 @sh2add(i64 %0, ptr %1) {
+; RV32I-LABEL: sh2add:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 2
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: lw a0, 0(a0)
+; RV32I-NEXT: ret
+;
+; RV32ZBA-LABEL: sh2add:
+; RV32ZBA: # %bb.0:
+; RV32ZBA-NEXT: sh2add a0, a0, a2
+; RV32ZBA-NEXT: lw a0, 0(a0)
+; RV32ZBA-NEXT: ret
+ %3 = getelementptr inbounds i32, ptr %1, i64 %0
+ %4 = load i32, ptr %3
+ ret i32 %4
+}
+
+define i64 @sh3add(i64 %0, ptr %1) {
+; RV32I-LABEL: sh3add:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 3
+; RV32I-NEXT: add a2, a2, a0
+; RV32I-NEXT: lw a0, 0(a2)
+; RV32I-NEXT: lw a1, 4(a2)
+; RV32I-NEXT: ret
+;
+; RV32ZBA-LABEL: sh3add:
+; RV32ZBA: # %bb.0:
+; RV32ZBA-NEXT: sh3add a1, a0, a2
+; RV32ZBA-NEXT: lw a0, 0(a1)
+; RV32ZBA-NEXT: lw a1, 4(a1)
+; RV32ZBA-NEXT: ret
+ %3 = getelementptr inbounds i64, ptr %1, i64 %0
+ %4 = load i64, ptr %3
+ ret i64 %4
+}
+
+define i32 @srli_1_sh2add(ptr %0, i32 %1) {
+; RV32I-LABEL: srli_1_sh2add:
+; RV32I: # %bb.0:
+; RV32I-NEXT: srli a1, a1, 1
+; RV32I-NEXT: slli a1, a1, 2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lw a0, 0(a0)
+; RV32I-NEXT: ret
+;
+; RV32ZBA-LABEL: srli_1_sh2add:
+; RV32ZBA: # %bb.0:
+; RV32ZBA-NEXT: srli a1, a1, 1
+; RV32ZBA-NEXT: sh2add a0, a1, a0
+; RV32ZBA-NEXT: lw a0, 0(a0)
+; RV32ZBA-NEXT: ret
+ %3 = lshr i32 %1, 1
+ %4 = getelementptr inbounds i32, ptr %0, i32 %3
+ %5 = load i32, ptr %4, align 4
+ ret i32 %5
+}
+
+define i64 @srli_2_sh3add(ptr %0, i32 %1) {
+; RV32I-LABEL: srli_2_sh3add:
+; RV32I: # %bb.0:
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: slli a1, a1, 3
+; RV32I-NEXT: add a1, a0, a1
+; RV32I-NEXT: lw a0, 0(a1)
+; RV32I-NEXT: lw a1, 4(a1)
+; RV32I-NEXT: ret
+;
+; RV32ZBA-LABEL: srli_2_sh3add:
+; RV32ZBA: # %bb.0:
+; RV32ZBA-NEXT: srli a1, a1, 2
+; RV32ZBA-NEXT: sh3add a1, a1, a0
+; RV32ZBA-NEXT: lw a0, 0(a1)
+; RV32ZBA-NEXT: lw a1, 4(a1)
+; RV32ZBA-NEXT: ret
+ %3 = lshr i32 %1, 2
+ %4 = getelementptr inbounds i64, ptr %0, i32 %3
+ %5 = load i64, ptr %4, align 8
+ ret i64 %5
+}
+
+define signext i16 @srli_2_sh1add(ptr %0, i32 %1) {
+; RV32I-LABEL: srli_2_sh1add:
+; RV32I: # %bb.0:
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: slli a1, a1, 1
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lh a0, 0(a0)
+; RV32I-NEXT: ret
+;
+; RV32ZBA-LABEL: srli_2_sh1add:
+; RV32ZBA: # %bb.0:
+; RV32ZBA-NEXT: srli a1, a1, 2
+; RV32ZBA-NEXT: sh1add a0, a1, a0
+; RV32ZBA-NEXT: lh a0, 0(a0)
+; RV32ZBA-NEXT: ret
+ %3 = lshr i32 %1, 2
+ %4 = getelementptr inbounds i16, ptr %0, i32 %3
+ %5 = load i16, ptr %4, align 2
+ ret i16 %5
+}
+
+define i32 @srli_3_sh2add(ptr %0, i32 %1) {
+; RV32I-LABEL: srli_3_sh2add:
+; RV32I: # %bb.0:
+; RV32I-NEXT: srli a1, a1, 3
+; RV32I-NEXT: slli a1, a1, 2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lw a0, 0(a0)
+; RV32I-NEXT: ret
+;
+; RV32ZBA-LABEL: srli_3_sh2add:
+; RV32ZBA: # %bb.0:
+; RV32ZBA-NEXT: srli a1, a1, 3
+; RV32ZBA-NEXT: sh2add a0, a1, a0
+; RV32ZBA-NEXT: lw a0, 0(a0)
+; RV32ZBA-NEXT: ret
+ %3 = lshr i32 %1, 3
+ %4 = getelementptr inbounds i32, ptr %0, i32 %3
+ %5 = load i32, ptr %4, align 4
+ ret i32 %5
+}
+
+define i64 @srli_4_sh3add(ptr %0, i32 %1) {
+; RV32I-LABEL: srli_4_sh3add:
+; RV32I: # %bb.0:
+; RV32I-NEXT: srli a1, a1, 4
+; RV32I-NEXT: slli a1, a1, 3
+; RV32I-NEXT: add a1, a0, a1
+; RV32I-NEXT: lw a0, 0(a1)
+; RV32I-NEXT: lw a1, 4(a1)
+; RV32I-NEXT: ret
+;
+; RV32ZBA-LABEL: srli_4_sh3add:
+; RV32ZBA: # %bb.0:
+; RV32ZBA-NEXT: srli a1, a1, 4
+; RV32ZBA-NEXT: sh3add a1, a1, a0
+; RV32ZBA-NEXT: lw a0, 0(a1)
+; RV32ZBA-NEXT: lw a1, 4(a1)
+; RV32ZBA-NEXT: ret
+ %3 = lshr i32 %1, 4
+ %4 = getelementptr inbounds i64, ptr %0, i32 %3
+ %5 = load i64, ptr %4, align 8
+ ret i64 %5
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll
new file mode 100644
index 00000000000000..2bd0c78659b004
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll
@@ -0,0 +1,1682 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -global-isel -mattr=+m -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=CHECK,RV64I
+; RUN: llc -mtriple=riscv64 -global-isel -mattr=+m,+zba -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBANOZBB
+; RUN: llc -mtriple=riscv64 -global-isel -mattr=+m,+zba,+zbb -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBAZBB
+
+define i64 @slliuw(i64 %a) nounwind {
+; RV64I-LABEL: slliuw:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 33
+; RV64I-NEXT: addi a1, a1, -2
+; RV64I-NEXT: slli a0, a0, 1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: slliuw:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a0, a0, 1
+; RV64ZBA-NEXT: srli a0, a0, 1
+; RV64ZBA-NEXT: slli.uw a0, a0, 1
+; RV64ZBA-NEXT: ret
+ %conv1 = shl i64 %a, 1
+ %shl = and i64 %conv1, 8589934590
+ ret i64 %shl
+}
+
+define i128 @slliuw_2(i32 signext %0, ptr %1) {
+; RV64I-LABEL: slliuw_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 28
+; RV64I-NEXT: add a1, a1, a0
+; RV64I-NEXT: ld a0, 0(a1)
+; RV64I-NEXT: ld a1, 8(a1)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: slliuw_2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli.uw a0, a0, 4
+; RV64ZBA-NEXT: add a1, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a1)
+; RV64ZBA-NEXT: ld a1, 8(a1)
+; RV64ZBA-NEXT: ret
+ %3 = zext i32 %0 to i64
+ %4 = getelementptr inbounds i128, ptr %1, i64 %3
+ %5 = load i128, ptr %4
+ ret i128 %5
+}
+
+define i64 @adduw(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: adduw:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: srli a1, a1, 32
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: adduw:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: add.uw a0, a1, a0
+; RV64ZBA-NEXT: ret
+ %and = and i64 %b, 4294967295
+ %add = add i64 %and, %a
+ ret i64 %add
+}
+
+define signext i8 @adduw_2(i32 signext %0, ptr %1) {
+; RV64I-LABEL: adduw_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lb a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: adduw_2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: add.uw a0, a0, a1
+; RV64ZBA-NEXT: lb a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = zext i32 %0 to i64
+ %4 = getelementptr inbounds i8, ptr %1, i64 %3
+ %5 = load i8, ptr %4
+ ret i8 %5
+}
+
+define i64 @zextw_i64(i64 %a) nounwind {
+; RV64I-LABEL: zextw_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: zextw_i64:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: zext.w a0, a0
+; RV64ZBA-NEXT: ret
+ %and = and i64 %a, 4294967295
+ ret i64 %and
+}
+
+; This makes sure targetShrinkDemandedConstant changes the and immmediate to
+; allow zext.w or slli+srli.
+define i64 @zextw_demandedbits_i64(i64 %0) {
+; RV64I-LABEL: zextw_demandedbits_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: addi a1, a1, -2
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ori a0, a0, 1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: zextw_demandedbits_i64:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: li a1, -2
+; RV64ZBA-NEXT: zext.w a1, a1
+; RV64ZBA-NEXT: and a0, a0, a1
+; RV64ZBA-NEXT: ori a0, a0, 1
+; RV64ZBA-NEXT: ret
+ %2 = and i64 %0, 4294967294
+ %3 = or i64 %2, 1
+ ret i64 %3
+}
+
+define signext i16 @sh1add(i64 %0, ptr %1) {
+; RV64I-LABEL: sh1add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 1
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh1add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh1add a0, a0, a1
+; RV64ZBA-NEXT: lh a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = getelementptr inbounds i16, ptr %1, i64 %0
+ %4 = load i16, ptr %3
+ ret i16 %4
+}
+
+define signext i32 @sh2add(i64 %0, ptr %1) {
+; RV64I-LABEL: sh2add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 2
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh2add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh2add a0, a0, a1
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = getelementptr inbounds i32, ptr %1, i64 %0
+ %4 = load i32, ptr %3
+ ret i32 %4
+}
+
+define i64 @sh3add(i64 %0, ptr %1) {
+; RV64I-LABEL: sh3add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh3add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh3add a0, a0, a1
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = getelementptr inbounds i64, ptr %1, i64 %0
+ %4 = load i64, ptr %3
+ ret i64 %4
+}
+
+define signext i16 @sh1adduw(i32 signext %0, ptr %1) {
+; RV64I-LABEL: sh1adduw:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 31
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh1adduw:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
+; RV64ZBA-NEXT: lh a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = zext i32 %0 to i64
+ %4 = getelementptr inbounds i16, ptr %1, i64 %3
+ %5 = load i16, ptr %4
+ ret i16 %5
+}
+
+define i64 @sh1adduw_2(i64 %0, i64 %1) {
+; RV64I-LABEL: sh1adduw_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 1
+; RV64I-NEXT: slli a2, a2, 33
+; RV64I-NEXT: addi a2, a2, -2
+; RV64I-NEXT: slli a0, a0, 1
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh1adduw_2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a0, a0, 1
+; RV64ZBA-NEXT: srli a0, a0, 1
+; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 1
+ %4 = and i64 %3, 8589934590
+ %5 = add i64 %4, %1
+ ret i64 %5
+}
+
+define i64 @sh1adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh1adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 1
+; RV64I-NEXT: slli a2, a2, 33
+; RV64I-NEXT: addi a2, a2, -2
+; RV64I-NEXT: slli a0, a0, 1
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh1adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a0, a0, 1
+; RV64ZBA-NEXT: srli a0, a0, 1
+; RV64ZBA-NEXT: slli.uw a0, a0, 1
+; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 1
+ %4 = and i64 %3, 8589934590
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
+define signext i32 @sh2adduw(i32 signext %0, ptr %1) {
+; RV64I-LABEL: sh2adduw:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 30
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh2adduw:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = zext i32 %0 to i64
+ %4 = getelementptr inbounds i32, ptr %1, i64 %3
+ %5 = load i32, ptr %4
+ ret i32 %5
+}
+
+define i64 @sh2adduw_2(i64 %0, i64 %1) {
+; RV64I-LABEL: sh2adduw_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 1
+; RV64I-NEXT: slli a2, a2, 34
+; RV64I-NEXT: addi a2, a2, -4
+; RV64I-NEXT: slli a0, a0, 2
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh2adduw_2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a0, a0, 2
+; RV64ZBA-NEXT: srli a0, a0, 2
+; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 2
+ %4 = and i64 %3, 17179869180
+ %5 = add i64 %4, %1
+ ret i64 %5
+}
+
+define i64 @sh2adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh2adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 1
+; RV64I-NEXT: slli a2, a2, 34
+; RV64I-NEXT: addi a2, a2, -4
+; RV64I-NEXT: slli a0, a0, 2
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh2adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a0, a0, 2
+; RV64ZBA-NEXT: srli a0, a0, 2
+; RV64ZBA-NEXT: slli.uw a0, a0, 2
+; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 2
+ %4 = and i64 %3, 17179869180
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
+define i64 @sh3adduw(i32 signext %0, ptr %1) {
+; RV64I-LABEL: sh3adduw:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 29
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh3adduw:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = zext i32 %0 to i64
+ %4 = getelementptr inbounds i64, ptr %1, i64 %3
+ %5 = load i64, ptr %4
+ ret i64 %5
+}
+
+define i64 @sh3adduw_2(i64 %0, i64 %1) {
+; RV64I-LABEL: sh3adduw_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 1
+; RV64I-NEXT: slli a2, a2, 35
+; RV64I-NEXT: addi a2, a2, -8
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh3adduw_2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a0, a0, 3
+; RV64ZBA-NEXT: srli a0, a0, 3
+; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 3
+ %4 = and i64 %3, 34359738360
+ %5 = add i64 %4, %1
+ ret i64 %5
+}
+
+define i64 @sh3adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh3adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 1
+; RV64I-NEXT: slli a2, a2, 35
+; RV64I-NEXT: addi a2, a2, -8
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh3adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a0, a0, 3
+; RV64ZBA-NEXT: srli a0, a0, 3
+; RV64ZBA-NEXT: slli.uw a0, a0, 3
+; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 3
+ %4 = and i64 %3, 34359738360
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
+; Make sure we use sext.h+slli+srli for Zba+Zbb.
+; FIXME: The RV64I and Zba only cases can be done with only 3 shifts.
+define zeroext i32 @sext_ashr_zext_i8(i8 %a) nounwind {
+; RV64I-LABEL: sext_ashr_zext_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 56
+; RV64I-NEXT: srai a0, a0, 63
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: ret
+;
+; RV64ZBANOZBB-LABEL: sext_ashr_zext_i8:
+; RV64ZBANOZBB: # %bb.0:
+; RV64ZBANOZBB-NEXT: slli a0, a0, 56
+; RV64ZBANOZBB-NEXT: srai a0, a0, 63
+; RV64ZBANOZBB-NEXT: zext.w a0, a0
+; RV64ZBANOZBB-NEXT: ret
+;
+; RV64ZBAZBB-LABEL: sext_ashr_zext_i8:
+; RV64ZBAZBB: # %bb.0:
+; RV64ZBAZBB-NEXT: sext.b a0, a0
+; RV64ZBAZBB-NEXT: srai a0, a0, 9
+; RV64ZBAZBB-NEXT: zext.w a0, a0
+; RV64ZBAZBB-NEXT: ret
+ %ext = sext i8 %a to i32
+ %1 = ashr i32 %ext, 9
+ ret i32 %1
+}
+
+define i64 @sh6_sh3_add1(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add1:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add1:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: slli a1, a1, 6
+; RV64ZBA-NEXT: sh3add a1, a2, a1
+; RV64ZBA-NEXT: add a0, a1, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %shl
+ %add2 = add nsw i64 %add, %x
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add2(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add2:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add2:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: sh3add a1, a1, a2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %x
+ %add2 = add nsw i64 %add, %shl
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add3(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add3:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add3:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: slli a1, a1, 6
+; RV64ZBA-NEXT: sh3add a1, a2, a1
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %shl
+ %add2 = add nsw i64 %x, %add
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add4(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add4:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add4:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: slli a1, a1, 6
+; RV64ZBA-NEXT: sh3add a0, a2, a0
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %x, %shl
+ %add2 = add nsw i64 %add, %shl1
+ ret i64 %add2
+}
+
+; Make sure we use sext.h+slli+srli for Zba+Zbb.
+; FIXME: The RV64I and Zba only cases can be done with only 3 shifts.
+define zeroext i32 @sext_ashr_zext_i16(i16 %a) nounwind {
+; RV64I-LABEL: sext_ashr_zext_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 48
+; RV64I-NEXT: srai a0, a0, 57
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: ret
+;
+; RV64ZBANOZBB-LABEL: sext_ashr_zext_i16:
+; RV64ZBANOZBB: # %bb.0:
+; RV64ZBANOZBB-NEXT: slli a0, a0, 48
+; RV64ZBANOZBB-NEXT: srai a0, a0, 57
+; RV64ZBANOZBB-NEXT: zext.w a0, a0
+; RV64ZBANOZBB-NEXT: ret
+;
+; RV64ZBAZBB-LABEL: sext_ashr_zext_i16:
+; RV64ZBAZBB: # %bb.0:
+; RV64ZBAZBB-NEXT: sext.h a0, a0
+; RV64ZBAZBB-NEXT: srai a0, a0, 9
+; RV64ZBAZBB-NEXT: zext.w a0, a0
+; RV64ZBAZBB-NEXT: ret
+ %ext = sext i16 %a to i32
+ %1 = ashr i32 %ext, 9
+ ret i32 %1
+}
+
+; This the IR you get from InstCombine if take the
diff erence of 2 pointers and
+; cast is to unsigned before using as an index.
+define signext i16 @sh1adduw_ptr
diff (i64 %
diff , ptr %baseptr) {
+; RV64I-LABEL: sh1adduw_ptr
diff :
+; RV64I: # %bb.0:
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 31
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh1adduw_ptr
diff :
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a0, a0, 1
+; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
+; RV64ZBA-NEXT: lh a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %ptr
diff = lshr exact i64 %
diff , 1
+ %cast = and i64 %ptr
diff , 4294967295
+ %ptr = getelementptr inbounds i16, ptr %baseptr, i64 %cast
+ %res = load i16, ptr %ptr
+ ret i16 %res
+}
+
+define signext i32 @sh2adduw_ptr
diff (i64 %
diff , ptr %baseptr) {
+; RV64I-LABEL: sh2adduw_ptr
diff :
+; RV64I: # %bb.0:
+; RV64I-NEXT: srli a0, a0, 2
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 30
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh2adduw_ptr
diff :
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a0, a0, 2
+; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %ptr
diff = lshr exact i64 %
diff , 2
+ %cast = and i64 %ptr
diff , 4294967295
+ %ptr = getelementptr inbounds i32, ptr %baseptr, i64 %cast
+ %res = load i32, ptr %ptr
+ ret i32 %res
+}
+
+define i64 @sh3adduw_ptr
diff (i64 %
diff , ptr %baseptr) {
+; RV64I-LABEL: sh3adduw_ptr
diff :
+; RV64I: # %bb.0:
+; RV64I-NEXT: srli a0, a0, 3
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 29
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh3adduw_ptr
diff :
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a0, a0, 3
+; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %ptr
diff = lshr exact i64 %
diff , 3
+ %cast = and i64 %ptr
diff , 4294967295
+ %ptr = getelementptr inbounds i64, ptr %baseptr, i64 %cast
+ %res = load i64, ptr %ptr
+ ret i64 %res
+}
+
+define signext i16 @srliw_1_sh1add(ptr %0, i32 signext %1) {
+; RV64I-LABEL: srliw_1_sh1add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srliw a1, a1, 1
+; RV64I-NEXT: slli a1, a1, 1
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srliw_1_sh1add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srliw a1, a1, 1
+; RV64ZBA-NEXT: sh1add a0, a1, a0
+; RV64ZBA-NEXT: lh a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i32 %1, 1
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i16, ptr %0, i64 %4
+ %6 = load i16, ptr %5, align 2
+ ret i16 %6
+}
+
+define i128 @slliuw_ptr
diff (i64 %
diff , ptr %baseptr) {
+; RV64I-LABEL: slliuw_ptr
diff :
+; RV64I: # %bb.0:
+; RV64I-NEXT: srli a0, a0, 4
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 28
+; RV64I-NEXT: add a1, a1, a0
+; RV64I-NEXT: ld a0, 0(a1)
+; RV64I-NEXT: ld a1, 8(a1)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: slliuw_ptr
diff :
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a0, a0, 4
+; RV64ZBA-NEXT: slli.uw a0, a0, 4
+; RV64ZBA-NEXT: add a1, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a1)
+; RV64ZBA-NEXT: ld a1, 8(a1)
+; RV64ZBA-NEXT: ret
+ %ptr
diff = lshr exact i64 %
diff , 4
+ %cast = and i64 %ptr
diff , 4294967295
+ %ptr = getelementptr inbounds i128, ptr %baseptr, i64 %cast
+ %res = load i128, ptr %ptr
+ ret i128 %res
+}
+
+define signext i32 @srliw_2_sh2add(ptr %0, i32 signext %1) {
+; RV64I-LABEL: srliw_2_sh2add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srliw a1, a1, 2
+; RV64I-NEXT: slli a1, a1, 2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srliw_2_sh2add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srliw a1, a1, 2
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i32 %1, 2
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i32, ptr %0, i64 %4
+ %6 = load i32, ptr %5, align 4
+ ret i32 %6
+}
+
+define i64 @srliw_3_sh3add(ptr %0, i32 signext %1) {
+; RV64I-LABEL: srliw_3_sh3add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srliw a1, a1, 3
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srliw_3_sh3add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srliw a1, a1, 3
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i32 %1, 3
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i64, ptr %0, i64 %4
+ %6 = load i64, ptr %5, align 8
+ ret i64 %6
+}
+
+define signext i32 @srliw_1_sh2add(ptr %0, i32 signext %1) {
+; RV64I-LABEL: srliw_1_sh2add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srliw a1, a1, 1
+; RV64I-NEXT: slli a1, a1, 2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srliw_1_sh2add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srliw a1, a1, 1
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i32 %1, 1
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i32, ptr %0, i64 %4
+ %6 = load i32, ptr %5, align 4
+ ret i32 %6
+}
+
+define i64 @srliw_1_sh3add(ptr %0, i32 signext %1) {
+; RV64I-LABEL: srliw_1_sh3add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srliw a1, a1, 1
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srliw_1_sh3add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srliw a1, a1, 1
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i32 %1, 1
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i64, ptr %0, i64 %4
+ %6 = load i64, ptr %5, align 8
+ ret i64 %6
+}
+
+define i64 @srliw_2_sh3add(ptr %0, i32 signext %1) {
+; RV64I-LABEL: srliw_2_sh3add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srliw a1, a1, 2
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srliw_2_sh3add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srliw a1, a1, 2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i32 %1, 2
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i64, ptr %0, i64 %4
+ %6 = load i64, ptr %5, align 8
+ ret i64 %6
+}
+
+define signext i16 @srliw_2_sh1add(ptr %0, i32 signext %1) {
+; RV64I-LABEL: srliw_2_sh1add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srliw a1, a1, 2
+; RV64I-NEXT: slli a1, a1, 1
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srliw_2_sh1add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srliw a1, a1, 2
+; RV64ZBA-NEXT: sh1add a0, a1, a0
+; RV64ZBA-NEXT: lh a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i32 %1, 2
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i16, ptr %0, i64 %4
+ %6 = load i16, ptr %5, align 2
+ ret i16 %6
+}
+
+
+define signext i32 @srliw_3_sh2add(ptr %0, i32 signext %1) {
+; RV64I-LABEL: srliw_3_sh2add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srliw a1, a1, 3
+; RV64I-NEXT: slli a1, a1, 2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srliw_3_sh2add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srliw a1, a1, 3
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i32 %1, 3
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i32, ptr %0, i64 %4
+ %6 = load i32, ptr %5, align 4
+ ret i32 %6
+}
+
+define i64 @srliw_4_sh3add(ptr %0, i32 signext %1) {
+; RV64I-LABEL: srliw_4_sh3add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srliw a1, a1, 4
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srliw_4_sh3add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srliw a1, a1, 4
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i32 %1, 4
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i64, ptr %0, i64 %4
+ %6 = load i64, ptr %5, align 8
+ ret i64 %6
+}
+
+define signext i32 @srli_1_sh2add(ptr %0, i64 %1) {
+; RV64I-LABEL: srli_1_sh2add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srli a1, a1, 1
+; RV64I-NEXT: slli a1, a1, 2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srli_1_sh2add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a1, a1, 1
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i64 %1, 1
+ %4 = getelementptr inbounds i32, ptr %0, i64 %3
+ %5 = load i32, ptr %4, align 4
+ ret i32 %5
+}
+
+define i64 @srli_2_sh3add(ptr %0, i64 %1) {
+; RV64I-LABEL: srli_2_sh3add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srli_2_sh3add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a1, a1, 2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i64 %1, 2
+ %4 = getelementptr inbounds i64, ptr %0, i64 %3
+ %5 = load i64, ptr %4, align 8
+ ret i64 %5
+}
+
+define signext i16 @srli_2_sh1add(ptr %0, i64 %1) {
+; RV64I-LABEL: srli_2_sh1add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: slli a1, a1, 1
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srli_2_sh1add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a1, a1, 2
+; RV64ZBA-NEXT: sh1add a0, a1, a0
+; RV64ZBA-NEXT: lh a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i64 %1, 2
+ %4 = getelementptr inbounds i16, ptr %0, i64 %3
+ %5 = load i16, ptr %4, align 2
+ ret i16 %5
+}
+
+define signext i32 @srli_3_sh2add(ptr %0, i64 %1) {
+; RV64I-LABEL: srli_3_sh2add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srli a1, a1, 3
+; RV64I-NEXT: slli a1, a1, 2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srli_3_sh2add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a1, a1, 3
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i64 %1, 3
+ %4 = getelementptr inbounds i32, ptr %0, i64 %3
+ %5 = load i32, ptr %4, align 4
+ ret i32 %5
+}
+
+define i64 @srli_4_sh3add(ptr %0, i64 %1) {
+; RV64I-LABEL: srli_4_sh3add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srli a1, a1, 4
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srli_4_sh3add:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a1, a1, 4
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = lshr i64 %1, 4
+ %4 = getelementptr inbounds i64, ptr %0, i64 %3
+ %5 = load i64, ptr %4, align 8
+ ret i64 %5
+}
+
+define signext i16 @shl_2_sh1adduw(ptr %0, i32 signext %1) {
+; RV64I-LABEL: shl_2_sh1adduw:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 2
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: srli a1, a1, 31
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: shl_2_sh1adduw:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a1, a1, 2
+; RV64ZBA-NEXT: sh1add.uw a0, a1, a0
+; RV64ZBA-NEXT: lh a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = shl i32 %1, 2
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i16, ptr %0, i64 %4
+ %6 = load i16, ptr %5, align 2
+ ret i16 %6
+}
+
+define signext i32 @shl_16_sh2adduw(ptr %0, i32 signext %1) {
+; RV64I-LABEL: shl_16_sh2adduw:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 16
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: srli a1, a1, 30
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: shl_16_sh2adduw:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a1, a1, 16
+; RV64ZBA-NEXT: sh2add.uw a0, a1, a0
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = shl i32 %1, 16
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i32, ptr %0, i64 %4
+ %6 = load i32, ptr %5, align 4
+ ret i32 %6
+}
+
+define i64 @shl_31_sh3adduw(ptr %0, i32 signext %1) {
+; RV64I-LABEL: shl_31_sh3adduw:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 31
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: srli a1, a1, 29
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: shl_31_sh3adduw:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a1, a1, 31
+; RV64ZBA-NEXT: sh3add.uw a0, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %3 = shl i32 %1, 31
+ %4 = zext i32 %3 to i64
+ %5 = getelementptr inbounds i64, ptr %0, i64 %4
+ %6 = load i64, ptr %5, align 8
+ ret i64 %6
+}
+
+define i64 @pack_i64(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: pack_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: pack_i64:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: zext.w a0, a0
+; RV64ZBA-NEXT: slli a1, a1, 32
+; RV64ZBA-NEXT: or a0, a1, a0
+; RV64ZBA-NEXT: ret
+ %shl = and i64 %a, 4294967295
+ %shl1 = shl i64 %b, 32
+ %or = or i64 %shl1, %shl
+ ret i64 %or
+}
+
+define i64 @pack_i64_2(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: pack_i64_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: srli a1, a1, 32
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: pack_i64_2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: zext.w a0, a0
+; RV64ZBA-NEXT: zext.w a1, a1
+; RV64ZBA-NEXT: slli a1, a1, 32
+; RV64ZBA-NEXT: or a0, a1, a0
+; RV64ZBA-NEXT: ret
+ %zexta = zext i32 %a to i64
+ %zextb = zext i32 %b to i64
+ %shl1 = shl i64 %zextb, 32
+ %or = or i64 %shl1, %zexta
+ ret i64 %or
+}
+
+define i64 @pack_i64_disjoint(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: pack_i64_disjoint:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: pack_i64_disjoint:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: zext.w a0, a0
+; RV64ZBA-NEXT: or a0, a1, a0
+; RV64ZBA-NEXT: ret
+ %shl = and i64 %a, 4294967295
+ %or = or disjoint i64 %b, %shl
+ ret i64 %or
+}
+
+define i64 @pack_i64_disjoint_2(i32 signext %a, i64 %b) nounwind {
+; RV64I-LABEL: pack_i64_disjoint_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: pack_i64_disjoint_2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: zext.w a0, a0
+; RV64ZBA-NEXT: or a0, a1, a0
+; RV64ZBA-NEXT: ret
+ %zexta = zext i32 %a to i64
+ %or = or disjoint i64 %b, %zexta
+ ret i64 %or
+}
+
+define i8 @array_index_sh1_sh0(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh1_sh0:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 1
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lbu a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh1_sh0:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh1add a0, a1, a0
+; RV64ZBA-NEXT: add a0, a0, a2
+; RV64ZBA-NEXT: lbu a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [2 x i8], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i8, ptr %a, align 1
+ ret i8 %b
+}
+
+define i16 @array_index_sh1_sh1(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh1_sh1:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 1
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh1_sh1:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: sh1add a0, a2, a0
+; RV64ZBA-NEXT: lh a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [2 x i16], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i16, ptr %a, align 2
+ ret i16 %b
+}
+
+define i32 @array_index_sh1_sh2(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh1_sh2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 2
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh1_sh2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: sh2add a0, a2, a0
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [2 x i32], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i32, ptr %a, align 4
+ ret i32 %b
+}
+
+define i64 @array_index_sh1_sh3(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh1_sh3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 4
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh1_sh3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh1add a1, a1, a2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [2 x i64], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i64, ptr %a, align 8
+ ret i64 %b
+}
+
+define i8 @array_index_sh2_sh0(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh2_sh0:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 2
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lbu a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh2_sh0:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: add a0, a0, a2
+; RV64ZBA-NEXT: lbu a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [4 x i8], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i8, ptr %a, align 1
+ ret i8 %b
+}
+
+define i16 @array_index_sh2_sh1(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh2_sh1:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 1
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh2_sh1:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: sh1add a0, a2, a0
+; RV64ZBA-NEXT: lh a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [4 x i16], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i16, ptr %a, align 2
+ ret i16 %b
+}
+
+define i32 @array_index_sh2_sh2(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh2_sh2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 4
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 2
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh2_sh2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh2add a1, a1, a2
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [4 x i32], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i32, ptr %a, align 4
+ ret i32 %b
+}
+
+define i64 @array_index_sh2_sh3(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh2_sh3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 5
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh2_sh3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh2add a1, a1, a2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [4 x i64], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i64, ptr %a, align 8
+ ret i64 %b
+}
+
+define i8 @array_index_sh3_sh0(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh3_sh0:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: lbu a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh3_sh0:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: add a0, a0, a2
+; RV64ZBA-NEXT: lbu a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [8 x i8], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i8, ptr %a, align 1
+ ret i8 %b
+}
+
+define i16 @array_index_sh3_sh1(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh3_sh1:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 4
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 1
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh3_sh1:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh3add a1, a1, a2
+; RV64ZBA-NEXT: sh1add a0, a1, a0
+; RV64ZBA-NEXT: lh a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [8 x i16], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i16, ptr %a, align 2
+ ret i16 %b
+}
+
+define i32 @array_index_sh3_sh2(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh3_sh2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 5
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 2
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh3_sh2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh3add a1, a1, a2
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [8 x i32], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i32, ptr %a, align 4
+ ret i32 %b
+}
+
+define i64 @array_index_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh3_sh3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh3_sh3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh3add a1, a1, a2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [8 x i64], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i64, ptr %a, align 8
+ ret i64 %b
+}
+
+; Similar to above, but with a lshr on one of the indices. This requires
+; special handling during isel to form a shift pair.
+define i64 @array_index_lshr_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_lshr_sh3_sh3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srli a1, a1, 58
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_lshr_sh3_sh3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a1, a1, 58
+; RV64ZBA-NEXT: sh3add a1, a1, a2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %shr = lshr i64 %idx1, 58
+ %a = getelementptr inbounds [8 x i64], ptr %p, i64 %shr, i64 %idx2
+ %b = load i64, ptr %a, align 8
+ ret i64 %b
+}
+
+define i8 @array_index_sh4_sh0(ptr %p, i64 %idx1, i64 %idx2) {
+; CHECK-LABEL: array_index_sh4_sh0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: lbu a0, 0(a0)
+; CHECK-NEXT: ret
+ %a = getelementptr inbounds [16 x i8], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i8, ptr %a, align 1
+ ret i8 %b
+}
+
+define i16 @array_index_sh4_sh1(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh4_sh1:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 5
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 1
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh4_sh1:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a1, a1, 5
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: sh1add a0, a2, a0
+; RV64ZBA-NEXT: lh a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [16 x i16], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i16, ptr %a, align 2
+ ret i16 %b
+}
+
+define i32 @array_index_sh4_sh2(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh4_sh2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 2
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh4_sh2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a1, a1, 6
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: sh2add a0, a2, a0
+; RV64ZBA-NEXT: lw a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [16 x i32], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i32, ptr %a, align 4
+ ret i32 %b
+}
+
+define i64 @array_index_sh4_sh3(ptr %p, i64 %idx1, i64 %idx2) {
+; RV64I-LABEL: array_index_sh4_sh3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 7
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: array_index_sh4_sh3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a1, a1, 7
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: sh3add a0, a2, a0
+; RV64ZBA-NEXT: ld a0, 0(a0)
+; RV64ZBA-NEXT: ret
+ %a = getelementptr inbounds [16 x i64], ptr %p, i64 %idx1, i64 %idx2
+ %b = load i64, ptr %a, align 8
+ ret i64 %b
+}
+
+define ptr @gep_lshr_i32(ptr %0, i64 %1) {
+; RV64I-LABEL: gep_lshr_i32:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: srli a1, a1, 32
+; RV64I-NEXT: li a2, 80
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: gep_lshr_i32:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: srli a1, a1, 2
+; RV64ZBA-NEXT: zext.w a1, a1
+; RV64ZBA-NEXT: li a2, 80
+; RV64ZBA-NEXT: mul a1, a1, a2
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: ret
+entry:
+ %2 = lshr exact i64 %1, 2
+ %3 = and i64 %2, 4294967295
+ %5 = getelementptr [80 x i8], ptr %0, i64 %3
+ ret ptr %5
+}
+
+define i64 @srli_slliuw(i64 %1) {
+; RV64I-LABEL: srli_slliuw:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: srli a0, a0, 2
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 28
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srli_slliuw:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: srli a0, a0, 2
+; RV64ZBA-NEXT: slli.uw a0, a0, 4
+; RV64ZBA-NEXT: ret
+entry:
+ %2 = lshr exact i64 %1, 2
+ %3 = and i64 %2, 4294967295
+ %4 = shl i64 %3, 4
+ ret i64 %4
+}
+
+define i64 @srli_slliuw_canonical(i64 %0) {
+; RV64I-LABEL: srli_slliuw_canonical:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 36
+; RV64I-NEXT: addi a1, a1, -16
+; RV64I-NEXT: slli a0, a0, 2
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srli_slliuw_canonical:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: slli a0, a0, 2
+; RV64ZBA-NEXT: srli a0, a0, 4
+; RV64ZBA-NEXT: slli.uw a0, a0, 4
+; RV64ZBA-NEXT: ret
+entry:
+ %1 = shl i64 %0, 2
+ %2 = and i64 %1, 68719476720
+ ret i64 %2
+}
+
+; Make sure we don't accidentally use slli.uw with a shift of 32.
+define i64 @srli_slliuw_negative_test(i64 %0) {
+; CHECK-LABEL: srli_slliuw_negative_test:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: srli a0, a0, 6
+; CHECK-NEXT: slli a0, a0, 32
+; CHECK-NEXT: ret
+entry:
+ %1 = lshr i64 %0, 6
+ %2 = shl i64 %1, 32
+ ret i64 %2
+}
+
+define i64 @srli_slli_i16(i64 %1) {
+; RV64I-LABEL: srli_slli_i16:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: lui a1, 16
+; RV64I-NEXT: addiw a1, a1, -1
+; RV64I-NEXT: srli a0, a0, 2
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: slli a0, a0, 4
+; RV64I-NEXT: ret
+;
+; RV64ZBANOZBB-LABEL: srli_slli_i16:
+; RV64ZBANOZBB: # %bb.0: # %entry
+; RV64ZBANOZBB-NEXT: lui a1, 16
+; RV64ZBANOZBB-NEXT: addiw a1, a1, -1
+; RV64ZBANOZBB-NEXT: srli a0, a0, 2
+; RV64ZBANOZBB-NEXT: and a0, a0, a1
+; RV64ZBANOZBB-NEXT: slli a0, a0, 4
+; RV64ZBANOZBB-NEXT: ret
+;
+; RV64ZBAZBB-LABEL: srli_slli_i16:
+; RV64ZBAZBB: # %bb.0: # %entry
+; RV64ZBAZBB-NEXT: srli a0, a0, 2
+; RV64ZBAZBB-NEXT: zext.h a0, a0
+; RV64ZBAZBB-NEXT: slli a0, a0, 4
+; RV64ZBAZBB-NEXT: ret
+entry:
+ %2 = lshr exact i64 %1, 2
+ %3 = and i64 %2, 65535
+ %4 = shl i64 %3, 4
+ ret i64 %4
+}
+
+define i64 @srli_slliuw_2(i64 %1) {
+; RV64I-LABEL: srli_slliuw_2:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: srli a0, a0, 18
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 29
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srli_slliuw_2:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: srli a0, a0, 18
+; RV64ZBA-NEXT: slli.uw a0, a0, 3
+; RV64ZBA-NEXT: ret
+entry:
+ %2 = lshr i64 %1, 18
+ %3 = and i64 %2, 4294967295
+ %4 = shl i64 %3, 3
+ ret i64 %4
+}
+
+define i64 @srli_slliuw_canonical_2(i64 %0) {
+; RV64I-LABEL: srli_slliuw_canonical_2:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 35
+; RV64I-NEXT: addi a1, a1, -8
+; RV64I-NEXT: srli a0, a0, 15
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srli_slliuw_canonical_2:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: srli a0, a0, 15
+; RV64ZBA-NEXT: srli a0, a0, 3
+; RV64ZBA-NEXT: slli.uw a0, a0, 3
+; RV64ZBA-NEXT: ret
+entry:
+ %1 = lshr i64 %0, 15
+ %2 = and i64 %1, 34359738360
+ ret i64 %2
+}
+
+define ptr @srai_srli_sh3add(ptr %0, i64 %1) nounwind {
+; RV64I-LABEL: srai_srli_sh3add:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: srai a1, a1, 32
+; RV64I-NEXT: srli a1, a1, 6
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: srai_srli_sh3add:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: srai a1, a1, 32
+; RV64ZBA-NEXT: srli a1, a1, 6
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %2 = ashr i64 %1, 32
+ %3 = lshr i64 %2, 6
+ %4 = getelementptr i64, ptr %0, i64 %3
+ ret ptr %4
+}
+
+define ptr @srai_srli_slli(ptr %0, i64 %1) nounwind {
+; CHECK-LABEL: srai_srli_slli:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: srai a1, a1, 32
+; CHECK-NEXT: srli a1, a1, 6
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: ret
+entry:
+ %2 = ashr i64 %1, 32
+ %3 = lshr i64 %2, 6
+ %4 = getelementptr i128, ptr %0, i64 %3
+ ret ptr %4
+}
+
+; Negative to make sure the peephole added for srai_srli_slli and
+; srai_srli_sh3add doesn't break this.
+define i64 @srai_andi(i64 %x) nounwind {
+; CHECK-LABEL: srai_andi:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: srai a0, a0, 8
+; CHECK-NEXT: andi a0, a0, -8
+; CHECK-NEXT: ret
+entry:
+ %y = ashr i64 %x, 8
+ %z = and i64 %y, -8
+ ret i64 %z
+}
+
+; Negative to make sure the peephole added for srai_srli_slli and
+; srai_srli_sh3add doesn't break this.
+define i64 @srai_lui_and(i64 %x) nounwind {
+; CHECK-LABEL: srai_lui_and:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a1, 1048574
+; CHECK-NEXT: srai a0, a0, 8
+; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: ret
+entry:
+ %y = ashr i64 %x, 8
+ %z = and i64 %y, -8192
+ ret i64 %z
+}
+
+define i64 @add_u32simm32_zextw(i64 %x) nounwind {
+; RV64I-LABEL: add_u32simm32_zextw:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: addi a1, a1, -2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a1, a1, 1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: add_u32simm32_zextw:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: li a1, -2
+; RV64ZBA-NEXT: zext.w a1, a1
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: zext.w a0, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %add = add i64 %x, 4294967294
+ %and = and i64 %add, 4294967295
+ ret i64 %and
+}
More information about the llvm-commits
mailing list