[llvm] d0bafb5 - [RISCV] Add coverage for zext.w/h interaction with shift transforms

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu May 9 09:52:31 PDT 2024


Author: Philip Reames
Date: 2024-05-09T09:52:23-07:00
New Revision: d0bafb5435d5ebd90cdf965a9b35bdfa05dde23b

URL: https://github.com/llvm/llvm-project/commit/d0bafb5435d5ebd90cdf965a9b35bdfa05dde23b
DIFF: https://github.com/llvm/llvm-project/commit/d0bafb5435d5ebd90cdf965a9b35bdfa05dde23b.diff

LOG: [RISCV] Add coverage for zext.w/h interaction with shift transforms

Two cases where folding the and (which could be a zext.w) through
shifts in generic DAG result in net worse code quality.  And one
negative case where keeping a zext.h would result in a longer
critical path.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rv64zba.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 5931e0982a4a6..8fe221f2a297a 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -2853,3 +2853,66 @@ entry:
   ret i64 %6
 }
 
+define ptr @gep_lshr_i32(ptr %0, i64 %1) {
+; RV64I-LABEL: gep_lshr_i32:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    srli a1, a1, 2
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    slli a2, a2, 36
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    mulhu a1, a1, a2
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: gep_lshr_i32:
+; RV64ZBA:       # %bb.0: # %entry
+; RV64ZBA-NEXT:    slli a1, a1, 2
+; RV64ZBA-NEXT:    srli a1, a1, 4
+; RV64ZBA-NEXT:    slli.uw a1, a1, 4
+; RV64ZBA-NEXT:    sh2add a1, a1, a1
+; RV64ZBA-NEXT:    add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+entry:
+  %2 = lshr exact i64 %1, 2
+  %3 = and i64 %2, 4294967295
+  %5 = getelementptr [80 x i8], ptr %0, i64 %3
+  ret ptr %5
+}
+
+define i64 @srli_slliw(i64 %1) {
+; RV64I-LABEL: srli_slliw:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    li a1, 1
+; RV64I-NEXT:    slli a1, a1, 36
+; RV64I-NEXT:    addi a1, a1, -16
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: srli_slliw:
+; RV64ZBA:       # %bb.0: # %entry
+; RV64ZBA-NEXT:    slli a0, a0, 2
+; RV64ZBA-NEXT:    srli a0, a0, 4
+; RV64ZBA-NEXT:    slli.uw a0, a0, 4
+; RV64ZBA-NEXT:    ret
+entry:
+  %2 = lshr exact i64 %1, 2
+  %3 = and i64 %2, 4294967295
+  %4 = shl i64 %3, 4
+  ret i64 %4
+}
+
+define i64 @srli_slli_i16(i64 %1) {
+; CHECK-LABEL: srli_slli_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    lui a1, 256
+; CHECK-NEXT:    addiw a1, a1, -16
+; CHECK-NEXT:    and a0, a0, a1
+; CHECK-NEXT:    ret
+entry:
+  %2 = lshr exact i64 %1, 2
+  %3 = and i64 %2, 65535
+  %4 = shl i64 %3, 4
+  ret i64 %4
+}


        


More information about the llvm-commits mailing list