[llvm] 111932d - [RISCV] Fix same mask vmerge peephole discarding false operand (#107827)

via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 9 07:45:48 PDT 2024


Author: Luke Lau
Date: 2024-09-09T22:45:44+08:00
New Revision: 111932d5cae0199d9c59669b37232a011f8b8757

URL: https://github.com/llvm/llvm-project/commit/111932d5cae0199d9c59669b37232a011f8b8757
DIFF: https://github.com/llvm/llvm-project/commit/111932d5cae0199d9c59669b37232a011f8b8757.diff

LOG: [RISCV] Fix same mask vmerge peephole discarding false operand (#107827)

This fixes the issue raised in
https://github.com/llvm/llvm-project/pull/106108#discussion_r1749677510

True's passthru needs to be equivalent to vmerge's false, but we also
allow true's passthru to be undef.

However if it's undef then we need to replace it with false, otherwise
we end up discarding the false operand entirely.

The changes in fixed-vectors-strided-load-store-asm.ll undo the changes
in #106108 where we introduced this miscompile.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
index 48ea2a01876177..1bad53f807546c 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
@@ -66,7 +66,7 @@ class RISCVVectorPeephole : public MachineFunctionPass {
   bool convertToWholeRegister(MachineInstr &MI) const;
   bool convertToUnmasked(MachineInstr &MI) const;
   bool convertAllOnesVMergeToVMv(MachineInstr &MI) const;
-  bool convertSameMaskVMergeToVMv(MachineInstr &MI) const;
+  bool convertSameMaskVMergeToVMv(MachineInstr &MI);
   bool foldUndefPassthruVMV_V_V(MachineInstr &MI);
   bool foldVMV_V_V(MachineInstr &MI);
 
@@ -396,7 +396,7 @@ bool RISCVVectorPeephole::convertAllOnesVMergeToVMv(MachineInstr &MI) const {
 /// ->
 /// %true = PseudoVADD_VV_M1_MASK %false, %x, %y, %mask, vl1, sew, policy
 /// %x = PseudoVMV_V_V %passthru, %true, vl2, sew, tu_mu
-bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) const {
+bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) {
   unsigned NewOpc = getVMV_V_VOpcodeForVMERGE_VVM(MI);
   if (!NewOpc)
     return false;
@@ -405,18 +405,24 @@ bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) const {
       !hasSameEEW(MI, *True))
     return false;
 
-  // True's passthru needs to be equivalent to False
-  Register TruePassthruReg = True->getOperand(1).getReg();
-  Register FalseReg = MI.getOperand(2).getReg();
-  if (TruePassthruReg != RISCV::NoRegister && TruePassthruReg != FalseReg)
-    return false;
-
   const MachineInstr *TrueV0Def = V0Defs.lookup(True);
   const MachineInstr *MIV0Def = V0Defs.lookup(&MI);
   assert(TrueV0Def && TrueV0Def->isCopy() && MIV0Def && MIV0Def->isCopy());
   if (TrueV0Def->getOperand(1).getReg() != MIV0Def->getOperand(1).getReg())
     return false;
 
+  // True's passthru needs to be equivalent to False
+  Register TruePassthruReg = True->getOperand(1).getReg();
+  Register FalseReg = MI.getOperand(2).getReg();
+  if (TruePassthruReg != FalseReg) {
+    // If True's passthru is undef see if we can change it to False
+    if (TruePassthruReg != RISCV::NoRegister ||
+        !MRI->hasOneUse(MI.getOperand(3).getReg()) ||
+        !ensureDominates(MI.getOperand(2), *True))
+      return false;
+    True->getOperand(1).setReg(MI.getOperand(2).getReg());
+  }
+
   MI.setDesc(TII->get(NewOpc));
   MI.removeOperand(2); // False operand
   MI.removeOperand(3); // Mask operand

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
index 569ada7949b1b5..e57b6a22dd6eab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
@@ -62,11 +62,12 @@ define void @gather_masked(ptr noalias nocapture %A, ptr noalias nocapture reado
 ; CHECK-NEXT:    li a4, 5
 ; CHECK-NEXT:  .LBB1_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m1, ta, ma
-; CHECK-NEXT:    vlse8.v v8, (a1), a4, v0.t
-; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vadd.vv v8, v9, v8
-; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a3, e8, m1, ta, mu
+; CHECK-NEXT:    vlse8.v v9, (a1), a4, v0.t
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vadd.vv v9, v10, v9
+; CHECK-NEXT:    vse8.v v9, (a0)
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    addi a1, a1, 160
 ; CHECK-NEXT:    bne a0, a2, .LBB1_1
@@ -343,11 +344,12 @@ define void @scatter_masked(ptr noalias nocapture %A, ptr noalias nocapture read
 ; CHECK-NEXT:    li a4, 5
 ; CHECK-NEXT:  .LBB7_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m1, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a1)
-; CHECK-NEXT:    vlse8.v v9, (a0), a4, v0.t
-; CHECK-NEXT:    vadd.vv v8, v9, v8
-; CHECK-NEXT:    vsse8.v v8, (a0), a4, v0.t
+; CHECK-NEXT:    vsetvli zero, a3, e8, m1, ta, mu
+; CHECK-NEXT:    vle8.v v9, (a1)
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vlse8.v v10, (a0), a4, v0.t
+; CHECK-NEXT:    vadd.vv v9, v10, v9
+; CHECK-NEXT:    vsse8.v v9, (a0), a4, v0.t
 ; CHECK-NEXT:    addi a1, a1, 32
 ; CHECK-NEXT:    addi a0, a0, 160
 ; CHECK-NEXT:    bne a1, a2, .LBB7_1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir
index 875d4229bbc6e1..2383842bc49d97 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir
@@ -138,3 +138,24 @@ body: |
     %true:vrnov0 = PseudoVADD_VV_M1_MASK %false, $noreg, $noreg, $v0, 4, 4 /* e16 */, 0 /* tu, mu */
     $v0 = COPY %mask
     %x:vrnov0 = PseudoVMERGE_VVM_M1 %pt, %false, %true, $v0, 8, 5 /* e32 */
+...
+---
+name: same_mask_undef_truepassthru
+body: |
+  bb.0:
+    liveins: $v8, $v0
+    ; CHECK-LABEL: name: same_mask_undef_truepassthru
+    ; CHECK: liveins: $v8, $v0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %false:vrnov0 = COPY $v8
+    ; CHECK-NEXT: %mask:vr = COPY $v0
+    ; CHECK-NEXT: $v0 = COPY %mask
+    ; CHECK-NEXT: %true:vrnov0 = PseudoVADD_VV_M1_MASK %false, $noreg, $noreg, $v0, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: $v0 = COPY %mask
+    %false:vrnov0 = COPY $v8
+    %mask:vr = COPY $v0
+    $v0 = COPY %mask
+    %true:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, $v0, 4, 5 /* e32 */, 0 /* tu, mu */
+    $v0 = COPY %mask
+    %x:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, %false, %true, $v0, 4, 5 /* e32 */
+...


        


More information about the llvm-commits mailing list