[llvm] 450f0bd - [RISCV] Add additional tests for D121833. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 14 16:10:29 PDT 2022


Author: Craig Topper
Date: 2022-07-14T16:10:14-07:00
New Revision: 450f0bd17b749850c50ce925cb987a5329a656f0

URL: https://github.com/llvm/llvm-project/commit/450f0bd17b749850c50ce925cb987a5329a656f0
DIFF: https://github.com/llvm/llvm-project/commit/450f0bd17b749850c50ce925cb987a5329a656f0.diff

LOG: [RISCV] Add additional tests for D121833. NFC

Added: 
    llvm/test/CodeGen/RISCV/shl-demanded.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/shl-demanded.ll b/llvm/test/CodeGen/RISCV/shl-demanded.ll
new file mode 100644
index 000000000000..a902c39787f1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/shl-demanded.ll
@@ -0,0 +1,277 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+
+define void @sext_shl_trunc_same_size(i16 %x, i32 %y, i16* %res) {
+; RV32I-LABEL: sext_shl_trunc_same_size:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    sh a0, 0(a2)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_shl_trunc_same_size:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    sh a0, 0(a2)
+; RV64I-NEXT:    ret
+  %conv = sext i16 %x to i32
+  %shl = shl i32 %conv, %y
+  %t = trunc i32 %shl to i16
+  store i16 %t, i16* %res
+  ret void
+}
+
+define void @zext_shl_trunc_same_size(i16 %x, i32 %y, i16* %res) {
+; RV32I-LABEL: zext_shl_trunc_same_size:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    sh a0, 0(a2)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_shl_trunc_same_size:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    sh a0, 0(a2)
+; RV64I-NEXT:    ret
+  %conv = zext i16 %x to i32
+  %shl = shl i32 %conv, %y
+  %t = trunc i32 %shl to i16
+  store i16 %t, i16* %res
+  ret void
+}
+
+define void @sext_shl_trunc_smaller(i16 %x, i32 %y, i8* %res) {
+; RV32I-LABEL: sext_shl_trunc_smaller:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    sb a0, 0(a2)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_shl_trunc_smaller:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    sb a0, 0(a2)
+; RV64I-NEXT:    ret
+  %conv = sext i16 %x to i32
+  %shl = shl i32 %conv, %y
+  %t = trunc i32 %shl to i8
+  store i8 %t, i8* %res
+  ret void
+}
+
+define void @zext_shl_trunc_smaller(i16 %x, i32 %y, i8* %res) {
+; RV32I-LABEL: zext_shl_trunc_smaller:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    sb a0, 0(a2)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_shl_trunc_smaller:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    sb a0, 0(a2)
+; RV64I-NEXT:    ret
+  %conv = zext i16 %x to i32
+  %shl = shl i32 %conv, %y
+  %t = trunc i32 %shl to i8
+  store i8 %t, i8* %res
+  ret void
+}
+
+; negative test - demanding 1 high-bit too many to change the extend
+
+define signext i17 @sext_shl_trunc_larger(i16 %x, i32 %y) {
+; RV32I-LABEL: sext_shl_trunc_larger:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 15
+; RV32I-NEXT:    srai a0, a0, 15
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_shl_trunc_larger:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 47
+; RV64I-NEXT:    srai a0, a0, 47
+; RV64I-NEXT:    ret
+  %conv = sext i16 %x to i32
+  %shl = shl i32 %conv, %y
+  %t = trunc i32 %shl to i17
+  ret i17 %t
+}
+
+; negative test - demanding 1 high-bit too many to change the extend
+
+define zeroext i17 @zext_shl_trunc_larger(i16 %x, i32 %y) {
+; RV32I-LABEL: zext_shl_trunc_larger:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 15
+; RV32I-NEXT:    srli a0, a0, 15
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_shl_trunc_larger:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 47
+; RV64I-NEXT:    srli a0, a0, 47
+; RV64I-NEXT:    ret
+  %conv = zext i16 %x to i32
+  %shl = shl i32 %conv, %y
+  %t = trunc i32 %shl to i17
+  ret i17 %t
+}
+
+define i32 @sext_shl_mask(i16 %x, i32 %y) {
+; RV32I-LABEL: sext_shl_mask:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_shl_mask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    ret
+  %conv = sext i16 %x to i32
+  %shl = shl i32 %conv, %y
+  %t = and i32 %shl, 65535
+  ret i32 %t
+}
+
+define i32 @zext_shl_mask(i16 %x, i32 %y) {
+; RV32I-LABEL: zext_shl_mask:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_shl_mask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    ret
+  %conv = zext i16 %x to i32
+  %shl = shl i32 %conv, %y
+  %t = and i32 %shl, 65535
+  ret i32 %t
+}
+
+; negative test - demanding a bit that could change with sext
+
+define i32 @sext_shl_mask_higher(i16 %x, i32 %y) {
+; RV32I-LABEL: sext_shl_mask_higher:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_shl_mask_higher:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+  %conv = sext i16 %x to i32
+  %shl = shl i32 %conv, %y
+  %t = and i32 %shl, 65536
+  ret i32 %t
+}
+
+; negative test - demanding a bit that could change with zext
+
+define i32 @zext_shl_mask_higher(i16 %x, i32 %y) {
+; RV32I-LABEL: zext_shl_mask_higher:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_shl_mask_higher:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+  %conv = zext i16 %x to i32
+  %shl = shl i32 %conv, %y
+  %t = and i32 %shl, 65536
+  ret i32 %t
+}
+
+; May need some, but not all of the bits set by the 'or'.
+
+define i32 @set_shl_mask(i32 %x, i32 %y) {
+; RV32I-LABEL: set_shl_mask:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 48
+; RV32I-NEXT:    addi a2, a2, 1
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: set_shl_mask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 48
+; RV64I-NEXT:    addiw a2, a2, 1
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+  %z = or i32 %x, 196609
+  %s = shl i32 %z, %y
+  %r = and i32 %s, 65536
+  ret i32 %r
+}


        


More information about the llvm-commits mailing list