[llvm] 713c2ad - [GlobalISel] Extend not_cmp_fold to work on conditional expressions

Jay Foad via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 7 01:31:25 PDT 2020


Author: Jay Foad
Date: 2020-09-07T09:31:08+01:00
New Revision: 713c2ad60c137a88c0a64cc98f2db4be702a25e9

URL: https://github.com/llvm/llvm-project/commit/713c2ad60c137a88c0a64cc98f2db4be702a25e9
DIFF: https://github.com/llvm/llvm-project/commit/713c2ad60c137a88c0a64cc98f2db4be702a25e9.diff

LOG: [GlobalISel] Extend not_cmp_fold to work on conditional expressions

Differential Revision: https://reviews.llvm.org/D86709

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
    llvm/include/llvm/Target/GlobalISel/Combine.td
    llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index e1f0535affcd..8607ad02d506 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -356,8 +356,8 @@ class CombinerHelper {
   bool matchRedundantSExtInReg(MachineInstr &MI);
 
   /// Combine inverting a result of a compare into the opposite cond code.
-  bool matchNotCmp(MachineInstr &MI, Register &CmpReg);
-  bool applyNotCmp(MachineInstr &MI, Register &CmpReg);
+  bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
+  bool applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
 
   /// Try to transform \p MI by using all of the above
   /// combine functions. Returns true if changed.

diff  --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 4b0fe43c1868..6a6f97ae78b0 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -373,7 +373,7 @@ def ext_ext_fold: GICombineRule <
   (apply [{ return Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }])
 >;
 
-def not_cmp_fold_matchinfo : GIDefMatchData<"Register">;
+def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">;
 def not_cmp_fold : GICombineRule<
   (defs root:$d, not_cmp_fold_matchinfo:$info),
   (match (wip_match_opcode G_XOR): $d,

diff  --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 6a89060805e0..10cd58f17e9a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -2243,13 +2243,13 @@ static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
          isConstTrueVal(TLI, Cst, IsVector, IsFP);
 }
 
-bool CombinerHelper::matchNotCmp(MachineInstr &MI, Register &CmpReg) {
+bool CombinerHelper::matchNotCmp(MachineInstr &MI,
+                                 SmallVectorImpl<Register> &RegsToNegate) {
   assert(MI.getOpcode() == TargetOpcode::G_XOR);
   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
   const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
   Register XorSrc;
   Register CstReg;
-  int64_t Cst;
   // We match xor(src, true) here.
   if (!mi_match(MI.getOperand(0).getReg(), MRI,
                 m_GXor(m_Reg(XorSrc), m_Reg(CstReg))))
@@ -2258,15 +2258,51 @@ bool CombinerHelper::matchNotCmp(MachineInstr &MI, Register &CmpReg) {
   if (!MRI.hasOneNonDBGUse(XorSrc))
     return false;
 
-  // Now try match src to either icmp or fcmp.
+  // Check that XorSrc is the root of a tree of comparisons combined with ANDs
+  // and ORs. The suffix of RegsToNegate starting from index I is used a work
+  // list of tree nodes to visit.
+  RegsToNegate.push_back(XorSrc);
+  // Remember whether the comparisons are all integer or all floating point.
+  bool IsInt = false;
   bool IsFP = false;
-  if (!mi_match(XorSrc, MRI, m_GICmp(m_Pred(), m_Reg(), m_Reg()))) {
-    // Try fcmp.
-    if (!mi_match(XorSrc, MRI, m_GFCmp(m_Pred(), m_Reg(), m_Reg())))
+  for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
+    Register Reg = RegsToNegate[I];
+    if (!MRI.hasOneNonDBGUse(Reg))
+      return false;
+    MachineInstr *Def = MRI.getVRegDef(Reg);
+    switch (Def->getOpcode()) {
+    default:
+      // Don't match if the tree contains anything other than ANDs, ORs and
+      // comparisons.
       return false;
-    IsFP = true;
+    case TargetOpcode::G_ICMP:
+      if (IsFP)
+        return false;
+      IsInt = true;
+      // When we apply the combine we will invert the predicate.
+      break;
+    case TargetOpcode::G_FCMP:
+      if (IsInt)
+        return false;
+      IsFP = true;
+      // When we apply the combine we will invert the predicate.
+      break;
+    case TargetOpcode::G_AND:
+    case TargetOpcode::G_OR:
+      // Implement De Morgan's laws:
+      // ~(x & y) -> ~x | ~y
+      // ~(x | y) -> ~x & ~y
+      // When we apply the combine we will change the opcode and recursively
+      // negate the operands.
+      RegsToNegate.push_back(Def->getOperand(1).getReg());
+      RegsToNegate.push_back(Def->getOperand(2).getReg());
+      break;
+    }
   }
 
+  // Now we know whether the comparisons are integer or floating point, check
+  // the constant in the xor.
+  int64_t Cst;
   if (Ty.isVector()) {
     MachineInstr *CstDef = MRI.getVRegDef(CstReg);
     auto MaybeCst = getBuildVectorConstantSplat(*CstDef, MRI);
@@ -2281,25 +2317,38 @@ bool CombinerHelper::matchNotCmp(MachineInstr &MI, Register &CmpReg) {
       return false;
   }
 
-  CmpReg = XorSrc;
   return true;
 }
 
-bool CombinerHelper::applyNotCmp(MachineInstr &MI, Register &CmpReg) {
-  MachineInstr *CmpDef = MRI.getVRegDef(CmpReg);
-  assert(CmpDef && "Should have been given an MI reg");
-  assert(CmpDef->getOpcode() == TargetOpcode::G_ICMP ||
-         CmpDef->getOpcode() == TargetOpcode::G_FCMP);
-
-  Observer.changingInstr(*CmpDef);
-  MachineOperand &PredOp = CmpDef->getOperand(1);
-  CmpInst::Predicate NewP = CmpInst::getInversePredicate(
-      (CmpInst::Predicate)PredOp.getPredicate());
-  PredOp.setPredicate(NewP);
-  Observer.changedInstr(*CmpDef);
+bool CombinerHelper::applyNotCmp(MachineInstr &MI,
+                                 SmallVectorImpl<Register> &RegsToNegate) {
+  for (Register Reg : RegsToNegate) {
+    MachineInstr *Def = MRI.getVRegDef(Reg);
+    Observer.changingInstr(*Def);
+    // For each comparison, invert the opcode. For each AND and OR, change the
+    // opcode.
+    switch (Def->getOpcode()) {
+    default:
+      llvm_unreachable("Unexpected opcode");
+    case TargetOpcode::G_ICMP:
+    case TargetOpcode::G_FCMP: {
+      MachineOperand &PredOp = Def->getOperand(1);
+      CmpInst::Predicate NewP = CmpInst::getInversePredicate(
+          (CmpInst::Predicate)PredOp.getPredicate());
+      PredOp.setPredicate(NewP);
+      break;
+    }
+    case TargetOpcode::G_AND:
+      Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR));
+      break;
+    case TargetOpcode::G_OR:
+      Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND));
+      break;
+    }
+    Observer.changedInstr(*Def);
+  }
 
-  replaceRegWith(MRI, MI.getOperand(0).getReg(),
-                 CmpDef->getOperand(0).getReg());
+  replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
   MI.eraseFromParent();
   return true;
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir
index 3356206c4cfc..93f8e4284cd4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir
@@ -164,3 +164,121 @@ body:             |
     $q0 = COPY %5(<4 x s32>)
     RET_ReallyLR implicit $q0
 ...
+---
+name: icmp_and_icmp
+tracksRegLiveness: true
+body:             |
+  bb.1:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: icmp_and_icmp
+    ; CHECK: liveins: $x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s64), [[C]]
+    ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ule), [[COPY]](s64), [[C]]
+    ; CHECK: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP1]]
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s1)
+    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK: RET_ReallyLR implicit $w0
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = G_CONSTANT i64 1
+    %2:_(s1) = G_CONSTANT i1 1
+    %3:_(s1) = G_ICMP intpred(sgt), %0(s64), %1
+    %4:_(s1) = G_ICMP intpred(ugt), %0(s64), %1
+    %5:_(s1) = G_AND %3, %4
+    %6:_(s1) = G_XOR %5, %2
+    %7:_(s32) = G_ANYEXT %6
+    $w0 = COPY %7(s32)
+    RET_ReallyLR implicit $w0
+...
+---
+name: icmp_or_icmp
+tracksRegLiveness: true
+body:             |
+  bb.1:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: icmp_or_icmp
+    ; CHECK: liveins: $x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s64), [[C]]
+    ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ule), [[COPY]](s64), [[C]]
+    ; CHECK: [[AND:%[0-9]+]]:_(s1) = G_AND [[ICMP]], [[ICMP1]]
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s1)
+    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK: RET_ReallyLR implicit $w0
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = G_CONSTANT i64 1
+    %2:_(s1) = G_CONSTANT i1 1
+    %3:_(s1) = G_ICMP intpred(sgt), %0(s64), %1
+    %4:_(s1) = G_ICMP intpred(ugt), %0(s64), %1
+    %5:_(s1) = G_OR %3, %4
+    %6:_(s1) = G_XOR %5, %2
+    %7:_(s32) = G_ANYEXT %6
+    $w0 = COPY %7(s32)
+    RET_ReallyLR implicit $w0
+...
+---
+name: icmp_and_icmp_or_icmp
+tracksRegLiveness: true
+body:             |
+  bb.1:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: icmp_and_icmp_or_icmp
+    ; CHECK: liveins: $x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s64), [[C]]
+    ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ule), [[COPY]](s64), [[C]]
+    ; CHECK: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP1]]
+    ; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[C]]
+    ; CHECK: [[AND:%[0-9]+]]:_(s1) = G_AND [[OR]], [[ICMP2]]
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s1)
+    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK: RET_ReallyLR implicit $w0
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = G_CONSTANT i64 1
+    %2:_(s1) = G_CONSTANT i1 1
+    %3:_(s1) = G_ICMP intpred(sgt), %0(s64), %1
+    %4:_(s1) = G_ICMP intpred(ugt), %0(s64), %1
+    %5:_(s1) = G_AND %3, %4
+    %6:_(s1) = G_ICMP intpred(ne), %0(s64), %1
+    %7:_(s1) = G_OR %5, %6
+    %8:_(s1) = G_XOR %7, %2
+    %9:_(s32) = G_ANYEXT %8
+    $w0 = COPY %9(s32)
+    RET_ReallyLR implicit $w0
+...
+---
+name: icmp_and_trunc
+tracksRegLiveness: true
+body:             |
+  bb.1:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: icmp_and_trunc
+    ; CHECK: liveins: $x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK: [[C1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
+    ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+    ; CHECK: [[AND:%[0-9]+]]:_(s1) = G_AND [[ICMP]], [[TRUNC]]
+    ; CHECK: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[AND]], [[C1]]
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
+    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK: RET_ReallyLR implicit $w0
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = G_CONSTANT i64 1
+    %2:_(s1) = G_CONSTANT i1 1
+    %3:_(s1) = G_ICMP intpred(sgt), %0(s64), %1
+    %4:_(s1) = G_TRUNC %0(s64)
+    %5:_(s1) = G_AND %3, %4
+    %6:_(s1) = G_XOR %5, %2
+    %7:_(s32) = G_ANYEXT %6
+    $w0 = COPY %7(s32)
+    RET_ReallyLR implicit $w0
+...


        


More information about the llvm-commits mailing list