[llvm] [RISCV][GISel] Added GISelPRedicateCodes to LeadingOnes*Mask (PR #119886)

via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 13 06:35:42 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-globalisel

Author: Luke Quinn (lquinn2015)

<details>
<summary>Changes</summary>



---
Full diff: https://github.com/llvm/llvm-project/pull/119886.diff


5 Files Affected:

- (modified) llvm/lib/Target/RISCV/RISCVInstrInfo.td (+20-2) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll (+7-9) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll (+2-3) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll (+3-4) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll (+6-16) 


``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 2f0d9de42b4865..02827cea94fbc6 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -493,7 +493,15 @@ def LeadingOnesMask : PatLeaf<(imm), [{
   if (!N->hasOneUse())
     return false;
   return !isInt<32>(N->getSExtValue()) && isMask_64(~N->getSExtValue());
-}], TrailingZeros>;
+}], TrailingZeros> {
+  let GISelPredicateCode = [{
+    if (!MRI.hasOneNonDBGUse(MI.getOperand(0).getReg()))
+      return false;
+    const auto &MO = MI.getOperand(1);
+    return !isInt<32>(MO.getCImm()->getSExtValue()) && 
+            isMask_64(~MO.getCImm()->getSExtValue());
+  }];
+}
 
 def TrailingOnesMask : PatLeaf<(imm), [{
   if (!N->hasOneUse())
@@ -520,7 +528,17 @@ def LeadingOnesWMask : PatLeaf<(imm), [{
   int64_t Imm = N->getSExtValue();
   return !isInt<32>(Imm) && isUInt<32>(Imm) && isShiftedMask_64(Imm) &&
          Imm != UINT64_C(0xffffffff);
-}], TrailingZeros>;
+}], TrailingZeros> {
+  let GISelPredicateCode = [{
+    if (!MRI.hasOneNonDBGUse(MI.getOperand(0).getReg()))
+      return false;
+    const auto &MO = MI.getOperand(1);
+    int64_t Imm = MO.getCImm()->getSExtValue();
+    return !isInt<32>(Imm) && isUInt<32>(Imm) && isShiftedMask_64(Imm) &&
+         Imm != UINT64_C(0xffffffff);
+  }];
+
+}
 
 //===----------------------------------------------------------------------===//
 // Instruction Formats
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
index 534fec21ce7c47..66eb4372aefadb 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
@@ -169,11 +169,10 @@ define double @fsgnj_d(double %a, double %b) nounwind {
 ;
 ; RV64I-LABEL: fsgnj_d:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srli a1, a1, 63
 ; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call double @llvm.copysign.f64(double %a, double %b)
@@ -1354,12 +1353,11 @@ define double @fsgnjx_f64(double %x, double %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    li a2, -1
-; RV64I-NEXT:    li a3, 1023
-; RV64I-NEXT:    slli a2, a2, 63
-; RV64I-NEXT:    slli a3, a3, 52
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a0, a3
+; RV64I-NEXT:    li a2, 1023
+; RV64I-NEXT:    srli a0, a0, 63
+; RV64I-NEXT:    slli a2, a2, 52
+; RV64I-NEXT:    slli a0, a0, 63
+; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    call __muldf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
index 8d77d41ab6b455..1469d49e210e08 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
@@ -721,11 +721,10 @@ define double @copysign_f64(double %a, double %b) nounwind {
 ;
 ; RV64I-LABEL: copysign_f64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srli a1, a1, 63
 ; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call double @llvm.copysign.f64(double %a, double %b)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
index 2fc25fbb39bb9b..eb48c90e14f803 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
@@ -108,12 +108,11 @@ define fp128 @fabs(fp128 %x) {
 define fp128 @fcopysign(fp128 %x, fp128 %y) {
 ; CHECK-LABEL: fcopysign:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a2, -1
 ; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    slli a2, a2, 63
+; CHECK-NEXT:    srli a3, a3, 63
 ; CHECK-NEXT:    srli a1, a1, 1
-; CHECK-NEXT:    and a2, a3, a2
-; CHECK-NEXT:    or a1, a1, a2
+; CHECK-NEXT:    slli a3, a3, 63
+; CHECK-NEXT:    or a1, a1, a3
 ; CHECK-NEXT:    ret
   %a = call fp128 @llvm.copysign.f128(fp128 %x, fp128 %y)
   ret fp128 %a
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll
index 993ba19caa6b4b..736bb8fea599ec 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll
@@ -105,22 +105,12 @@ define i64 @zextw_i64(i64 %a) nounwind {
 ; This makes sure targetShrinkDemandedConstant changes the and immmediate to
 ; allow zext.w or slli+srli.
 define i64 @zextw_demandedbits_i64(i64 %0) {
-; RV64I-LABEL: zextw_demandedbits_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 1
-; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    addi a1, a1, -2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    ori a0, a0, 1
-; RV64I-NEXT:    ret
-;
-; RV64ZBA-LABEL: zextw_demandedbits_i64:
-; RV64ZBA:       # %bb.0:
-; RV64ZBA-NEXT:    li a1, -2
-; RV64ZBA-NEXT:    zext.w a1, a1
-; RV64ZBA-NEXT:    and a0, a0, a1
-; RV64ZBA-NEXT:    ori a0, a0, 1
-; RV64ZBA-NEXT:    ret
+; CHECK-LABEL: zextw_demandedbits_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    srliw a0, a0, 1
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    ori a0, a0, 1
+; CHECK-NEXT:    ret
   %2 = and i64 %0, 4294967294
   %3 = or i64 %2, 1
   ret i64 %3

``````````

</details>


https://github.com/llvm/llvm-project/pull/119886


More information about the llvm-commits mailing list