[llvm] [GlobalISel] port rewrite from SelectionDAG to GlobalISel (PR #181486)
Luisa Cicolini via llvm-commits
llvm-commits at lists.llvm.org
Sat Feb 14 09:41:54 PST 2026
https://github.com/luisacicolini updated https://github.com/llvm/llvm-project/pull/181486
>From f0ba0f871ca98fe80eecf950ca2bc9231ed2b86b Mon Sep 17 00:00:00 2001
From: luisacicolini <luisacicolini at gmail.com>
Date: Sat, 14 Feb 2026 16:16:05 +0000
Subject: [PATCH 01/16] chore: test
---
.../GlobalISel/GIMatchTableExecutorImpl.h | 22 ++++++-
.../include/llvm/Target/GlobalISel/Combine.td | 45 +++++++++----
.../AArch64/GlobalISel/combine-integer.mir | 66 ++++++++++++++++---
3 files changed, 108 insertions(+), 25 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
index 8f6586e79d78a..05a312a0c0812 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
@@ -35,6 +35,7 @@
#include <cassert>
#include <cstddef>
#include <cstdint>
+#include <cstdio>
namespace llvm {
@@ -49,6 +50,8 @@ bool GIMatchTableExecutor::executeMatchTable(
const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI,
const PredicateBitset &AvailableFeatures,
CodeGenCoverage *CoverageInfo) const {
+
+ printf("\nHALO\n");
uint64_t CurrentIdx = 0;
SmallVector<uint64_t, 4> OnFailResumeAt;
@@ -215,7 +218,7 @@ bool GIMatchTableExecutor::executeMatchTable(
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
unsigned Opcode = State.MIs[InsnID]->getOpcode();
-
+
DEBUG_WITH_TYPE(TgtExecutor::getName(), {
dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
<< "], ExpectedOpcode=" << Expected0;
@@ -230,6 +233,7 @@ bool GIMatchTableExecutor::executeMatchTable(
}
break;
}
+
case GIM_SwitchOpcode: {
uint64_t InsnID = readULEB();
uint16_t LowerBound = readU16();
@@ -460,6 +464,7 @@ bool GIMatchTableExecutor::executeMatchTable(
// Note: we don't check for invalid here because this is purely a hook to
// allow some executors (such as the combiner) to check arbitrary,
// contextless predicates, such as whether a rule is enabled or not.
+
uint16_t Predicate = readU16();
DEBUG_WITH_TYPE(TgtExecutor::getName(),
dbgs() << CurrentIdx
@@ -1151,6 +1156,9 @@ bool GIMatchTableExecutor::executeMatchTable(
}
case GIR_CopySubReg: {
+
+ printf("\nGIR_CopySubReg\n");
+
uint64_t NewInsnID = readULEB();
uint64_t OldInsnID = readULEB();
uint64_t OpIdx = readULEB();
@@ -1259,6 +1267,9 @@ bool GIMatchTableExecutor::executeMatchTable(
case GIR_AddSimpleTempRegister:
case GIR_AddTempRegister:
case GIR_AddTempSubRegister: {
+
+ printf("\nGIR_AddTempSubRegister\n");
+
uint64_t InsnID = readULEB();
uint64_t TempRegID = readULEB();
RegState TempRegFlags = {};
@@ -1323,6 +1334,9 @@ bool GIMatchTableExecutor::executeMatchTable(
break;
}
case GIR_ComplexSubOperandRenderer: {
+
+ printf("\nGIR_ComplexSubOperandRenderer\n");
+
uint64_t InsnID = readULEB();
uint16_t RendererID = readU16();
uint64_t RenderOpID = readULEB();
@@ -1336,6 +1350,9 @@ bool GIMatchTableExecutor::executeMatchTable(
break;
}
case GIR_ComplexSubOperandSubRegRenderer: {
+
+ printf("\nGIR_ComplexSubOperandSubRegRenderer\n");
+
uint64_t InsnID = readULEB();
uint16_t RendererID = readU16();
uint64_t RenderOpID = readULEB();
@@ -1455,6 +1472,9 @@ bool GIMatchTableExecutor::executeMatchTable(
case GIR_RootConstrainSelectedInstOperands:
case GIR_ConstrainSelectedInstOperands: {
+
+ printf("GIR_ConstrainSelectedInstOperands at index %lu\n", CurrentIdx);
+
uint64_t InsnID = (MatcherOpcode == GIR_RootConstrainSelectedInstOperands)
? 0
: readULEB();
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index f5c940bffc8fb..697d70cfda7d1 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1879,6 +1879,22 @@ def APlusBMinusCMinusB : GICombineRule<
(G_SUB $root, $add1, $B)),
(apply (G_SUB $root, $A, $C))>;
+// fold ((A+(B+C))-B) -> A+C
+def APlusBPlusCMinusB_frags : GICombinePatFrag<
+ (outs root:$root), (ins $x, $y, $n),
+ [
+ (pattern (G_ADD $add1, $y, $n),
+ (G_ADD $add2, $x, $add1),
+ (G_SUB $root, $add2, $y),
+ [{ return MRI.hasOneNonDBGUse(${add2}.getReg()) &&
+ MRI.hasOneNonDBGUse(${add1}.getReg()); }]),
+ ]>;
+
+def APlusBPlusCMinusB : GICombineRule<
+ (defs root:$root),
+ (match (APlusBPlusCMinusB_frags $root, $x, $y, $n)),
+ (apply (G_ADD $root, $x, $n))>;
+
// fold ((A-(B-C))-C) -> A-B
def AMinusBMinusCMinusC : GICombineRule<
(defs root:$root),
@@ -1999,20 +2015,21 @@ def AMinusC1PlusC2: GICombineRule<
def integer_reassoc_combines: GICombineGroup<[
APlusBMinusCMinusB,
- AMinusBMinusCMinusC,
- ZeroMinusAPlusB,
- APlusZeroMinusB,
- APlusBMinusB,
- BMinusAPlusA,
- AMinusBPlusCMinusA,
- AMinusBPlusBMinusC,
- APlusBMinusAplusC,
- APlusBMinusCPlusA,
- APlusC1MinusC2,
- C2MinusAPlusC1,
- AMinusC1MinusC2,
- C1Minus2MinusC2,
- AMinusC1PlusC2
+ APlusBPlusCMinusB,
+ // AMinusBMinusCMinusC,
+ // ZeroMinusAPlusB,
+ // APlusZeroMinusB,
+ // APlusBMinusB,
+ // BMinusAPlusA,
+ // AMinusBPlusCMinusA,
+ // AMinusBPlusBMinusC,
+ // APlusBMinusAplusC,
+ // APlusBMinusCPlusA,
+ // APlusC1MinusC2,
+ // C2MinusAPlusC1,
+ // AMinusC1MinusC2,
+ // C1Minus2MinusC2,
+ // AMinusC1PlusC2
]>;
// fold (A+(shl (0-B), C)) -> (A-(shl B, C))
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
index c9b24ad75ce27..2693caad43d21 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
@@ -35,7 +35,10 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %a:_(s64) = COPY $x0
; CHECK-NEXT: %b:_(s64) = COPY $x1
- ; CHECK-NEXT: %sub:_(s64) = G_SUB %a, %b
+ ; CHECK-NEXT: %c:_(s64) = COPY $x2
+ ; CHECK-NEXT: %sub1:_(s64) = G_SUB %b, %c
+ ; CHECK-NEXT: %sub2:_(s64) = G_SUB %a, %sub1
+ ; CHECK-NEXT: %sub:_(s64) = G_SUB %sub2, %c
; CHECK-NEXT: $x0 = COPY %sub(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -175,9 +178,12 @@ body: |
; CHECK-LABEL: name: AMinusBPlusCMinusA
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %a:_(s64) = COPY $x0
; CHECK-NEXT: %b:_(s64) = COPY $x1
; CHECK-NEXT: %c:_(s64) = COPY $x2
- ; CHECK-NEXT: %add:_(s64) = G_SUB %c, %b
+ ; CHECK-NEXT: %sub2:_(s64) = G_SUB %c, %a
+ ; CHECK-NEXT: %sub1:_(s64) = G_SUB %a, %b
+ ; CHECK-NEXT: %add:_(s64) = G_ADD %sub1, %sub2
; CHECK-NEXT: $x0 = COPY %add(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -201,8 +207,11 @@ body: |
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %a:_(s64) = COPY $x0
+ ; CHECK-NEXT: %b:_(s64) = COPY $x1
; CHECK-NEXT: %c:_(s64) = COPY $x2
- ; CHECK-NEXT: %add:_(s64) = G_SUB %a, %c
+ ; CHECK-NEXT: %sub2:_(s64) = G_SUB %b, %c
+ ; CHECK-NEXT: %sub1:_(s64) = G_SUB %a, %b
+ ; CHECK-NEXT: %add:_(s64) = G_ADD %sub1, %sub2
; CHECK-NEXT: $x0 = COPY %add(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -226,9 +235,12 @@ body: |
; CHECK-LABEL: name: APlusBMinusAplusC
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %a:_(s64) = COPY $x0
; CHECK-NEXT: %b:_(s64) = COPY $x1
; CHECK-NEXT: %c:_(s64) = COPY $x2
- ; CHECK-NEXT: %add:_(s64) = G_SUB %b, %c
+ ; CHECK-NEXT: %add1:_(s64) = G_ADD %a, %c
+ ; CHECK-NEXT: %sub1:_(s64) = G_SUB %b, %add1
+ ; CHECK-NEXT: %add:_(s64) = G_ADD %a, %sub1
; CHECK-NEXT: $x0 = COPY %add(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -251,9 +263,12 @@ body: |
; CHECK-LABEL: name: APlusBMinusCPlusA
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %a:_(s64) = COPY $x0
; CHECK-NEXT: %b:_(s64) = COPY $x1
; CHECK-NEXT: %c:_(s64) = COPY $x2
- ; CHECK-NEXT: %add:_(s64) = G_SUB %b, %c
+ ; CHECK-NEXT: %add1:_(s64) = G_ADD %c, %a
+ ; CHECK-NEXT: %sub1:_(s64) = G_SUB %b, %add1
+ ; CHECK-NEXT: %add:_(s64) = G_ADD %a, %sub1
; CHECK-NEXT: $x0 = COPY %add(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -279,9 +294,12 @@ body: |
; CHECK-NEXT: %a1:_(s64) = COPY $x0
; CHECK-NEXT: %b1:_(s64) = COPY $x1
; CHECK-NEXT: %c1:_(s64) = COPY $x2
+ ; CHECK-NEXT: %a:_(<2 x s64>) = G_BUILD_VECTOR %a1(s64), %b1(s64)
; CHECK-NEXT: %b:_(<2 x s64>) = G_BUILD_VECTOR %b1(s64), %ba:_(s64)
; CHECK-NEXT: %c:_(<2 x s64>) = G_BUILD_VECTOR %a1(s64), %c1(s64)
- ; CHECK-NEXT: %add:_(<2 x s64>) = G_SUB %b, %c
+ ; CHECK-NEXT: %add1:_(<2 x s64>) = G_ADD %c, %a
+ ; CHECK-NEXT: %sub1:_(<2 x s64>) = G_SUB %b, %add1
+ ; CHECK-NEXT: %add:_(<2 x s64>) = G_ADD %a, %sub1
; CHECK-NEXT: $q0 = COPY %add(<2 x s64>)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a1:_(s64) = COPY $x0
@@ -331,8 +349,10 @@ body: |
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %a:_(s64) = COPY $x0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
- ; CHECK-NEXT: %sub:_(s64) = G_SUB [[C]], %a
+ ; CHECK-NEXT: %c1:_(s64) = G_CONSTANT i64 4
+ ; CHECK-NEXT: %c2:_(s64) = G_CONSTANT i64 9
+ ; CHECK-NEXT: %add:_(s64) = G_ADD %a, %c1
+ ; CHECK-NEXT: %sub:_(s64) = G_SUB %c2, %add
; CHECK-NEXT: $x0 = COPY %sub(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -377,8 +397,10 @@ body: |
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %a:_(s64) = COPY $x0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -49
- ; CHECK-NEXT: %sub:_(s64) = G_SUB [[C]], %a
+ ; CHECK-NEXT: %c1:_(s64) = G_CONSTANT i64 11
+ ; CHECK-NEXT: %sub1:_(s64) = G_SUB %c1, %a
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -60
+ ; CHECK-NEXT: %sub:_(s64) = G_ADD %sub1, [[C]]
; CHECK-NEXT: $x0 = COPY %sub(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -413,3 +435,27 @@ body: |
$x0 = COPY %add
RET_ReallyLR implicit $x0
+...
+---
+name: APlusBPlusCMinusB
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: APlusBPlusCMinusB
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %a:_(s64) = COPY $x0
+ ; CHECK-NEXT: %c:_(s64) = COPY $x2
+ ; CHECK-NEXT: %sub1:_(s64) = G_ADD %a, %c
+ ; CHECK-NEXT: $x0 = COPY %sub1(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %a:_(s64) = COPY $x0
+ %b:_(s64) = COPY $x1
+ %c:_(s64) = COPY $x2
+ %add1:_(s64) = G_ADD %b, %c
+ %add2:_(s64) = G_ADD %a, %add1
+ %sub1:_(s64) = G_SUB %add2, %b
+ $x0 = COPY %sub1
+ RET_ReallyLR implicit $x0
+
>From 5dba0ef850db425e870b4fed6639360f45afafcd Mon Sep 17 00:00:00 2001
From: luisacicolini <luisacicolini at gmail.com>
Date: Sat, 14 Feb 2026 16:18:46 +0000
Subject: [PATCH 02/16] chore: clean
---
.../GlobalISel/GIMatchTableExecutorImpl.h | 18 ------------------
1 file changed, 18 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
index 05a312a0c0812..87c531aaed725 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
@@ -35,7 +35,6 @@
#include <cassert>
#include <cstddef>
#include <cstdint>
-#include <cstdio>
namespace llvm {
@@ -51,8 +50,6 @@ bool GIMatchTableExecutor::executeMatchTable(
const PredicateBitset &AvailableFeatures,
CodeGenCoverage *CoverageInfo) const {
- printf("\nHALO\n");
-
uint64_t CurrentIdx = 0;
SmallVector<uint64_t, 4> OnFailResumeAt;
NewMIVector OutMIs;
@@ -233,7 +230,6 @@ bool GIMatchTableExecutor::executeMatchTable(
}
break;
}
-
case GIM_SwitchOpcode: {
uint64_t InsnID = readULEB();
uint16_t LowerBound = readU16();
@@ -242,7 +238,6 @@ bool GIMatchTableExecutor::executeMatchTable(
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
const int64_t Opcode = State.MIs[InsnID]->getOpcode();
-
DEBUG_WITH_TYPE(TgtExecutor::getName(), {
dbgs() << CurrentIdx << ": GIM_SwitchOpcode(MIs[" << InsnID << "], ["
<< LowerBound << ", " << UpperBound << "), Default=" << Default
@@ -464,7 +459,6 @@ bool GIMatchTableExecutor::executeMatchTable(
// Note: we don't check for invalid here because this is purely a hook to
// allow some executors (such as the combiner) to check arbitrary,
// contextless predicates, such as whether a rule is enabled or not.
-
uint16_t Predicate = readU16();
DEBUG_WITH_TYPE(TgtExecutor::getName(),
dbgs() << CurrentIdx
@@ -1156,9 +1150,6 @@ bool GIMatchTableExecutor::executeMatchTable(
}
case GIR_CopySubReg: {
-
- printf("\nGIR_CopySubReg\n");
-
uint64_t NewInsnID = readULEB();
uint64_t OldInsnID = readULEB();
uint64_t OpIdx = readULEB();
@@ -1267,9 +1258,6 @@ bool GIMatchTableExecutor::executeMatchTable(
case GIR_AddSimpleTempRegister:
case GIR_AddTempRegister:
case GIR_AddTempSubRegister: {
-
- printf("\nGIR_AddTempSubRegister\n");
-
uint64_t InsnID = readULEB();
uint64_t TempRegID = readULEB();
RegState TempRegFlags = {};
@@ -1334,9 +1322,6 @@ bool GIMatchTableExecutor::executeMatchTable(
break;
}
case GIR_ComplexSubOperandRenderer: {
-
- printf("\nGIR_ComplexSubOperandRenderer\n");
-
uint64_t InsnID = readULEB();
uint16_t RendererID = readU16();
uint64_t RenderOpID = readULEB();
@@ -1350,9 +1335,6 @@ bool GIMatchTableExecutor::executeMatchTable(
break;
}
case GIR_ComplexSubOperandSubRegRenderer: {
-
- printf("\nGIR_ComplexSubOperandSubRegRenderer\n");
-
uint64_t InsnID = readULEB();
uint16_t RendererID = readU16();
uint64_t RenderOpID = readULEB();
>From eb80ece3e7704b5da4c6038e07b7b1f5736aee42 Mon Sep 17 00:00:00 2001
From: luisacicolini <luisacicolini at gmail.com>
Date: Sat, 14 Feb 2026 16:20:24 +0000
Subject: [PATCH 03/16] chore: fix
---
.../include/llvm/Target/GlobalISel/Combine.td | 28 ++++++-------
.../AArch64/GlobalISel/combine-integer.mir | 42 +++++--------------
2 files changed, 24 insertions(+), 46 deletions(-)
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 697d70cfda7d1..e70d90283d3be 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -2016,20 +2016,20 @@ def AMinusC1PlusC2: GICombineRule<
def integer_reassoc_combines: GICombineGroup<[
APlusBMinusCMinusB,
APlusBPlusCMinusB,
- // AMinusBMinusCMinusC,
- // ZeroMinusAPlusB,
- // APlusZeroMinusB,
- // APlusBMinusB,
- // BMinusAPlusA,
- // AMinusBPlusCMinusA,
- // AMinusBPlusBMinusC,
- // APlusBMinusAplusC,
- // APlusBMinusCPlusA,
- // APlusC1MinusC2,
- // C2MinusAPlusC1,
- // AMinusC1MinusC2,
- // C1Minus2MinusC2,
- // AMinusC1PlusC2
+ AMinusBMinusCMinusC,
+ ZeroMinusAPlusB,
+ APlusZeroMinusB,
+ APlusBMinusB,
+ BMinusAPlusA,
+ AMinusBPlusCMinusA,
+ AMinusBPlusBMinusC,
+ APlusBMinusAplusC,
+ APlusBMinusCPlusA,
+ APlusC1MinusC2,
+ C2MinusAPlusC1,
+ AMinusC1MinusC2,
+ C1Minus2MinusC2,
+ AMinusC1PlusC2
]>;
// fold (A+(shl (0-B), C)) -> (A-(shl B, C))
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
index 2693caad43d21..d972f3aad591f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
@@ -35,10 +35,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %a:_(s64) = COPY $x0
; CHECK-NEXT: %b:_(s64) = COPY $x1
- ; CHECK-NEXT: %c:_(s64) = COPY $x2
- ; CHECK-NEXT: %sub1:_(s64) = G_SUB %b, %c
- ; CHECK-NEXT: %sub2:_(s64) = G_SUB %a, %sub1
- ; CHECK-NEXT: %sub:_(s64) = G_SUB %sub2, %c
+ ; CHECK-NEXT: %sub:_(s64) = G_SUB %a, %b
; CHECK-NEXT: $x0 = COPY %sub(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -178,12 +175,9 @@ body: |
; CHECK-LABEL: name: AMinusBPlusCMinusA
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: %a:_(s64) = COPY $x0
; CHECK-NEXT: %b:_(s64) = COPY $x1
; CHECK-NEXT: %c:_(s64) = COPY $x2
- ; CHECK-NEXT: %sub2:_(s64) = G_SUB %c, %a
- ; CHECK-NEXT: %sub1:_(s64) = G_SUB %a, %b
- ; CHECK-NEXT: %add:_(s64) = G_ADD %sub1, %sub2
+ ; CHECK-NEXT: %add:_(s64) = G_SUB %c, %b
; CHECK-NEXT: $x0 = COPY %add(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -207,11 +201,8 @@ body: |
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %a:_(s64) = COPY $x0
- ; CHECK-NEXT: %b:_(s64) = COPY $x1
; CHECK-NEXT: %c:_(s64) = COPY $x2
- ; CHECK-NEXT: %sub2:_(s64) = G_SUB %b, %c
- ; CHECK-NEXT: %sub1:_(s64) = G_SUB %a, %b
- ; CHECK-NEXT: %add:_(s64) = G_ADD %sub1, %sub2
+ ; CHECK-NEXT: %add:_(s64) = G_SUB %a, %c
; CHECK-NEXT: $x0 = COPY %add(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -235,12 +226,9 @@ body: |
; CHECK-LABEL: name: APlusBMinusAplusC
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: %a:_(s64) = COPY $x0
; CHECK-NEXT: %b:_(s64) = COPY $x1
; CHECK-NEXT: %c:_(s64) = COPY $x2
- ; CHECK-NEXT: %add1:_(s64) = G_ADD %a, %c
- ; CHECK-NEXT: %sub1:_(s64) = G_SUB %b, %add1
- ; CHECK-NEXT: %add:_(s64) = G_ADD %a, %sub1
+ ; CHECK-NEXT: %add:_(s64) = G_SUB %b, %c
; CHECK-NEXT: $x0 = COPY %add(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -263,12 +251,9 @@ body: |
; CHECK-LABEL: name: APlusBMinusCPlusA
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: %a:_(s64) = COPY $x0
; CHECK-NEXT: %b:_(s64) = COPY $x1
; CHECK-NEXT: %c:_(s64) = COPY $x2
- ; CHECK-NEXT: %add1:_(s64) = G_ADD %c, %a
- ; CHECK-NEXT: %sub1:_(s64) = G_SUB %b, %add1
- ; CHECK-NEXT: %add:_(s64) = G_ADD %a, %sub1
+ ; CHECK-NEXT: %add:_(s64) = G_SUB %b, %c
; CHECK-NEXT: $x0 = COPY %add(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -294,12 +279,9 @@ body: |
; CHECK-NEXT: %a1:_(s64) = COPY $x0
; CHECK-NEXT: %b1:_(s64) = COPY $x1
; CHECK-NEXT: %c1:_(s64) = COPY $x2
- ; CHECK-NEXT: %a:_(<2 x s64>) = G_BUILD_VECTOR %a1(s64), %b1(s64)
; CHECK-NEXT: %b:_(<2 x s64>) = G_BUILD_VECTOR %b1(s64), %ba:_(s64)
; CHECK-NEXT: %c:_(<2 x s64>) = G_BUILD_VECTOR %a1(s64), %c1(s64)
- ; CHECK-NEXT: %add1:_(<2 x s64>) = G_ADD %c, %a
- ; CHECK-NEXT: %sub1:_(<2 x s64>) = G_SUB %b, %add1
- ; CHECK-NEXT: %add:_(<2 x s64>) = G_ADD %a, %sub1
+ ; CHECK-NEXT: %add:_(<2 x s64>) = G_SUB %b, %c
; CHECK-NEXT: $q0 = COPY %add(<2 x s64>)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a1:_(s64) = COPY $x0
@@ -349,10 +331,8 @@ body: |
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %a:_(s64) = COPY $x0
- ; CHECK-NEXT: %c1:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: %c2:_(s64) = G_CONSTANT i64 9
- ; CHECK-NEXT: %add:_(s64) = G_ADD %a, %c1
- ; CHECK-NEXT: %sub:_(s64) = G_SUB %c2, %add
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
+ ; CHECK-NEXT: %sub:_(s64) = G_SUB [[C]], %a
; CHECK-NEXT: $x0 = COPY %sub(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
@@ -397,10 +377,8 @@ body: |
; CHECK: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %a:_(s64) = COPY $x0
- ; CHECK-NEXT: %c1:_(s64) = G_CONSTANT i64 11
- ; CHECK-NEXT: %sub1:_(s64) = G_SUB %c1, %a
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -60
- ; CHECK-NEXT: %sub:_(s64) = G_ADD %sub1, [[C]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -49
+ ; CHECK-NEXT: %sub:_(s64) = G_SUB [[C]], %a
; CHECK-NEXT: $x0 = COPY %sub(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s64) = COPY $x0
>From c1d5472e8db19abebdec5cc2cd79ce7724e97430 Mon Sep 17 00:00:00 2001
From: luisacicolini <luisacicolini at gmail.com>
Date: Sat, 14 Feb 2026 16:21:59 +0000
Subject: [PATCH 04/16] chore: things
---
.../llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
index 87c531aaed725..8f720bb282206 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
@@ -49,7 +49,6 @@ bool GIMatchTableExecutor::executeMatchTable(
const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI,
const PredicateBitset &AvailableFeatures,
CodeGenCoverage *CoverageInfo) const {
-
uint64_t CurrentIdx = 0;
SmallVector<uint64_t, 4> OnFailResumeAt;
NewMIVector OutMIs;
@@ -215,7 +214,6 @@ bool GIMatchTableExecutor::executeMatchTable(
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
unsigned Opcode = State.MIs[InsnID]->getOpcode();
-
DEBUG_WITH_TYPE(TgtExecutor::getName(), {
dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
<< "], ExpectedOpcode=" << Expected0;
@@ -238,6 +236,7 @@ bool GIMatchTableExecutor::executeMatchTable(
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
const int64_t Opcode = State.MIs[InsnID]->getOpcode();
+
DEBUG_WITH_TYPE(TgtExecutor::getName(), {
dbgs() << CurrentIdx << ": GIM_SwitchOpcode(MIs[" << InsnID << "], ["
<< LowerBound << ", " << UpperBound << "), Default=" << Default
@@ -1454,9 +1453,6 @@ bool GIMatchTableExecutor::executeMatchTable(
case GIR_RootConstrainSelectedInstOperands:
case GIR_ConstrainSelectedInstOperands: {
-
- printf("GIR_ConstrainSelectedInstOperands at index %lu\n", CurrentIdx);
-
uint64_t InsnID = (MatcherOpcode == GIR_RootConstrainSelectedInstOperands)
? 0
: readULEB();
>From 64b26b0ca4af2b8c0f4330104e0e807aac724fd5 Mon Sep 17 00:00:00 2001
From: luisacicolini <luisacicolini at gmail.com>
Date: Sat, 14 Feb 2026 16:33:56 +0000
Subject: [PATCH 05/16] chore: reset?
---
.../include/llvm/Target/GlobalISel/Combine.td | 3831 +++++++----------
1 file changed, 1544 insertions(+), 2287 deletions(-)
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index e70d90283d3be..e7b1ba29805ee 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1,4 +1,4 @@
-//===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===//
+//===- llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,2318 +6,1575 @@
//
//===----------------------------------------------------------------------===//
//
-// Declare GlobalISel combine rules and provide mechanisms to opt-out.
+/// \file This file implements GIMatchTableExecutor's `executeMatchTable`
+/// function. This is implemented in a separate file because the function is
+/// quite large.
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTORIMPL_H
+#define LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTORIMPL_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterBankInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/CodeGenCoverage.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+template <class TgtExecutor, class PredicateBitset, class ComplexMatcherMemFn,
+ class CustomRendererFn>
+bool GIMatchTableExecutor::executeMatchTable(
+ TgtExecutor &Exec, MatcherState &State,
+ const ExecInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
+ &ExecInfo,
+ MachineIRBuilder &Builder, const uint8_t *MatchTable,
+ const TargetInstrInfo &TII, MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI,
+ const PredicateBitset &AvailableFeatures,
+ CodeGenCoverage *CoverageInfo) const {
+
+ uint64_t CurrentIdx = 0;
+ SmallVector<uint64_t, 4> OnFailResumeAt;
+ NewMIVector OutMIs;
+
+ GISelChangeObserver *Observer = Builder.getObserver();
+ // Bypass the flag check on the instruction, and only look at the MCInstrDesc.
+ bool NoFPException = !State.MIs[0]->getDesc().mayRaiseFPException();
+
+ const uint32_t Flags = State.MIs[0]->getFlags();
+
+ enum RejectAction { RejectAndGiveUp, RejectAndResume };
+ auto handleReject = [&]() -> RejectAction {
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": Rejected\n");
+ if (OnFailResumeAt.empty())
+ return RejectAndGiveUp;
+ CurrentIdx = OnFailResumeAt.pop_back_val();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": Resume at " << CurrentIdx << " ("
+ << OnFailResumeAt.size() << " try-blocks remain)\n");
+ return RejectAndResume;
+ };
+
+ const auto propagateFlags = [&]() {
+ for (auto MIB : OutMIs) {
+ // Set the NoFPExcept flag when no original matched instruction could
+ // raise an FP exception, but the new instruction potentially might.
+ uint32_t MIBFlags = Flags | MIB.getInstr()->getFlags();
+ if (NoFPException && MIB->mayRaiseFPException())
+ MIBFlags |= MachineInstr::NoFPExcept;
+ if (Observer)
+ Observer->changingInstr(*MIB);
+ MIB.setMIFlags(MIBFlags);
+ if (Observer)
+ Observer->changedInstr(*MIB);
+ }
+ };
+
+ // If the index is >= 0, it's an index in the type objects generated by
+ // TableGen. If the index is <0, it's an index in the recorded types object.
+ const auto getTypeFromIdx = [&](int64_t Idx) -> LLT {
+ if (Idx >= 0)
+ return ExecInfo.TypeObjects[Idx];
+ return State.RecordedTypes[1 - Idx];
+ };
+
+ const auto readULEB = [&]() {
+ return fastDecodeULEB128(MatchTable, CurrentIdx);
+ };
+
+ // Convenience function to return a signed value. This avoids
+ // us forgetting to first cast to int8_t before casting to a
+ // wider signed int type.
+ // if we casted uint8 directly to a wider type we'd lose
+ // negative values.
+ const auto readS8 = [&]() { return (int8_t)MatchTable[CurrentIdx++]; };
+
+ const auto readU16 = [&]() {
+ auto V = readBytesAs<uint16_t>(MatchTable + CurrentIdx);
+ CurrentIdx += 2;
+ return V;
+ };
+
+ const auto readU32 = [&]() {
+ auto V = readBytesAs<uint32_t>(MatchTable + CurrentIdx);
+ CurrentIdx += 4;
+ return V;
+ };
+
+ const auto readU64 = [&]() {
+ auto V = readBytesAs<uint64_t>(MatchTable + CurrentIdx);
+ CurrentIdx += 8;
+ return V;
+ };
+
+ const auto eraseImpl = [&](MachineInstr *MI) {
+ // If we're erasing the insertion point, ensure we don't leave a dangling
+ // pointer in the builder.
+ if (Builder.getInsertPt() == MI)
+ Builder.setInsertPt(*MI->getParent(), ++MI->getIterator());
+ if (Observer)
+ Observer->erasingInstr(*MI);
+ MI->eraseFromParent();
+ };
+
+ while (true) {
+ assert(CurrentIdx != ~0u && "Invalid MatchTable index");
+ uint8_t MatcherOpcode = MatchTable[CurrentIdx++];
+ switch (MatcherOpcode) {
+ case GIM_Try: {
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": Begin try-block\n");
+ OnFailResumeAt.push_back(readU32());
+ break;
+ }
-//===----------------------------------------------------------------------===//
-// Base Classes
-//
-// These are the core classes that the combiner backend relies on.
-//===----------------------------------------------------------------------===//
+ case GIM_RecordInsn:
+ case GIM_RecordInsnIgnoreCopies: {
+ uint64_t NewInsnID = readULEB();
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+
+ // As an optimisation we require that MIs[0] is always the root. Refuse
+ // any attempt to modify it.
+ assert(NewInsnID != 0 && "Refusing to modify MIs[0]");
+
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isReg()) {
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": Not a register\n");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ if (MO.getReg().isPhysical()) {
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": Is a physical register\n");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ MachineInstr *NewMI;
+ if (MatcherOpcode == GIM_RecordInsnIgnoreCopies)
+ NewMI = getDefIgnoringCopies(MO.getReg(), MRI);
+ else
+ NewMI = MRI.getVRegDef(MO.getReg());
+
+ if ((size_t)NewInsnID < State.MIs.size())
+ State.MIs[NewInsnID] = NewMI;
+ else {
+ assert((size_t)NewInsnID == State.MIs.size() &&
+ "Expected to store MIs in order");
+ State.MIs.push_back(NewMI);
+ }
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": MIs[" << NewInsnID
+ << "] = GIM_RecordInsn(" << InsnID << ", " << OpIdx
+ << ")\n");
+ break;
+ }
-/// All arguments of the defs operator must be subclasses of GIDefKind or
-/// sub-dags whose operator is GIDefKindWithArgs.
-class GIDefKind;
-class GIDefKindWithArgs;
+ case GIM_CheckFeatures: {
+ uint16_t ExpectedBitsetID = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckFeatures(ExpectedBitsetID="
+ << ExpectedBitsetID << ")\n");
+ if ((AvailableFeatures & ExecInfo.FeatureBitsets[ExpectedBitsetID]) !=
+ ExecInfo.FeatureBitsets[ExpectedBitsetID]) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckOpcode:
+ case GIM_CheckOpcodeIsEither: {
+ uint64_t InsnID = readULEB();
+ uint16_t Expected0 = readU16();
+ uint16_t Expected1 = -1;
+ if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+ Expected1 = readU16();
+
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ unsigned Opcode = State.MIs[InsnID]->getOpcode();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(), {
+ dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
+ << "], ExpectedOpcode=" << Expected0;
+ if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+ dbgs() << " || " << Expected1;
+ dbgs() << ") // Got=" << Opcode << "\n";
+ });
+
+ if (Opcode != Expected0 && Opcode != Expected1) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_SwitchOpcode: {
+ uint64_t InsnID = readULEB();
+ uint16_t LowerBound = readU16();
+ uint16_t UpperBound = readU16();
+ uint32_t Default = readU32();
+
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ const int64_t Opcode = State.MIs[InsnID]->getOpcode();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(), {
+ dbgs() << CurrentIdx << ": GIM_SwitchOpcode(MIs[" << InsnID << "], ["
+ << LowerBound << ", " << UpperBound << "), Default=" << Default
+ << ", JumpTable...) // Got=" << Opcode << "\n";
+ });
+ if (Opcode < LowerBound || UpperBound <= Opcode) {
+ CurrentIdx = Default;
+ break;
+ }
+ const auto EntryIdx = (Opcode - LowerBound);
+ // Each entry is 4 bytes
+ CurrentIdx =
+ readBytesAs<uint32_t>(MatchTable + CurrentIdx + (EntryIdx * 4));
+ if (!CurrentIdx) {
+ CurrentIdx = Default;
+ break;
+ }
+ OnFailResumeAt.push_back(Default);
+ break;
+ }
-/// Declare a root node. There must be at least one of these in every combine
-/// rule.
-def root : GIDefKind;
+ case GIM_SwitchType: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint16_t LowerBound = readU16();
+ uint16_t UpperBound = readU16();
+ int64_t Default = readU32();
+
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(), {
+ dbgs() << CurrentIdx << ": GIM_SwitchType(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx << "), [" << LowerBound << ", "
+ << UpperBound << "), Default=" << Default
+ << ", JumpTable...) // Got=";
+ if (!MO.isReg())
+ dbgs() << "Not a VReg\n";
+ else
+ dbgs() << MRI.getType(MO.getReg()) << "\n";
+ });
+ if (!MO.isReg()) {
+ CurrentIdx = Default;
+ break;
+ }
+ const LLT Ty = MRI.getType(MO.getReg());
+ const auto TyI = ExecInfo.TypeIDMap.find(Ty);
+ if (TyI == ExecInfo.TypeIDMap.end()) {
+ CurrentIdx = Default;
+ break;
+ }
+ const int64_t TypeID = TyI->second;
+ if (TypeID < LowerBound || UpperBound <= TypeID) {
+ CurrentIdx = Default;
+ break;
+ }
+ const auto NumEntry = (TypeID - LowerBound);
+ // Each entry is 4 bytes
+ CurrentIdx =
+ readBytesAs<uint32_t>(MatchTable + CurrentIdx + (NumEntry * 4));
+ if (!CurrentIdx) {
+ CurrentIdx = Default;
+ break;
+ }
+ OnFailResumeAt.push_back(Default);
+ break;
+ }
-def defs;
+ case GIM_CheckNumOperandsGE:
+ case GIM_CheckNumOperandsLE: {
+ uint64_t InsnID = readULEB();
+ uint64_t Expected = readULEB();
+ const bool IsLE = (MatcherOpcode == GIM_CheckNumOperandsLE);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckNumOperands"
+ << (IsLE ? "LE" : "GE") << "(MIs[" << InsnID
+ << "], Expected=" << Expected << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ const unsigned NumOps = State.MIs[InsnID]->getNumOperands();
+ if (IsLE ? (NumOps > Expected) : (NumOps < Expected)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckNumOperands: {
+ uint64_t InsnID = readULEB();
+ uint64_t Expected = readULEB();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckNumOperands(MIs["
+ << InsnID << "], Expected=" << Expected << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (State.MIs[InsnID]->getNumOperands() != Expected) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckI64ImmPredicate:
+ case GIM_CheckImmOperandPredicate: {
+ uint64_t InsnID = readULEB();
+ unsigned OpIdx =
+ MatcherOpcode == GIM_CheckImmOperandPredicate ? readULEB() : 1;
+ uint16_t Predicate = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckImmPredicate(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert((State.MIs[InsnID]->getOperand(OpIdx).isImm() ||
+ State.MIs[InsnID]->getOperand(OpIdx).isCImm()) &&
+ "Expected immediate operand");
+ assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
+ int64_t Value = 0;
+ if (State.MIs[InsnID]->getOperand(OpIdx).isCImm())
+ Value = State.MIs[InsnID]->getOperand(OpIdx).getCImm()->getSExtValue();
+ else if (State.MIs[InsnID]->getOperand(OpIdx).isImm())
+ Value = State.MIs[InsnID]->getOperand(OpIdx).getImm();
+ else
+ llvm_unreachable("Expected Imm or CImm operand");
+
+ if (!testImmPredicate_I64(Predicate, Value))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAPIntImmPredicate: {
+ uint64_t InsnID = readULEB();
+ uint16_t Predicate = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs()
+ << CurrentIdx << ": GIM_CheckAPIntImmPredicate(MIs["
+ << InsnID << "], Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
+ "Expected G_CONSTANT");
+ assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
+ if (!State.MIs[InsnID]->getOperand(1).isCImm())
+ llvm_unreachable("Expected Imm or CImm operand");
+
+ const APInt &Value =
+ State.MIs[InsnID]->getOperand(1).getCImm()->getValue();
+ if (!testImmPredicate_APInt(Predicate, Value))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAPFloatImmPredicate: {
+ uint64_t InsnID = readULEB();
+ uint16_t Predicate = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs()
+ << CurrentIdx << ": GIM_CheckAPFloatImmPredicate(MIs["
+ << InsnID << "], Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
+ "Expected G_FCONSTANT");
+ assert(State.MIs[InsnID]->getOperand(1).isFPImm() &&
+ "Expected FPImm operand");
+ assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
+ const APFloat &Value =
+ State.MIs[InsnID]->getOperand(1).getFPImm()->getValueAPF();
+
+ if (!testImmPredicate_APFloat(Predicate, Value))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckLeafOperandPredicate: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint16_t Predicate = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckLeafOperandPredicate(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx
+ << "), Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[InsnID]->getOperand(OpIdx).isReg() &&
+ "Expected register operand");
+ assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+
+ if (!testMOPredicate_MO(Predicate, MO, State))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckIsBuildVectorAllOnes:
+ case GIM_CheckIsBuildVectorAllZeros: {
+ uint64_t InsnID = readULEB();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckBuildVectorAll{Zeros|Ones}(MIs["
+ << InsnID << "])\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ const MachineInstr *MI = State.MIs[InsnID];
+ assert((MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
+ MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR_TRUNC) &&
+ "Expected G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC");
+
+ if (MatcherOpcode == GIM_CheckIsBuildVectorAllOnes) {
+ if (!isBuildVectorAllOnes(*MI, MRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ } else {
+ if (!isBuildVectorAllZeros(*MI, MRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ }
+
+ break;
+ }
+ case GIM_CheckSimplePredicate: {
+ // Note: we don't check for invalid here because this is purely a hook to
+ // allow some executors (such as the combiner) to check arbitrary,
+ // contextless predicates, such as whether a rule is enabled or not.
+ uint16_t Predicate = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckSimplePredicate(Predicate="
+ << Predicate << ")\n");
+ assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
+ if (!testSimplePredicate(Predicate)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckCxxInsnPredicate: {
+ uint64_t InsnID = readULEB();
+ uint16_t Predicate = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs()
+ << CurrentIdx << ": GIM_CheckCxxPredicate(MIs["
+ << InsnID << "], Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
+
+ if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID], State))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckHasNoUse: {
+ uint64_t InsnID = readULEB();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckHasNoUse(MIs["
+ << InsnID << "]\n");
+
+ const MachineInstr *MI = State.MIs[InsnID];
+ assert(MI && "Used insn before defined");
+ assert(MI->getNumDefs() > 0 && "No defs");
+ const Register Res = MI->getOperand(0).getReg();
+
+ if (!MRI.use_nodbg_empty(Res)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckHasOneUse: {
+ uint64_t InsnID = readULEB();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckHasOneUse(MIs["
+ << InsnID << "]\n");
+
+ const MachineInstr *MI = State.MIs[InsnID];
+ assert(MI && "Used insn before defined");
+ assert(MI->getNumDefs() > 0 && "No defs");
+ const Register Res = MI->getOperand(0).getReg();
+
+ if (!MRI.hasOneNonDBGUse(Res)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckAtomicOrdering: {
+ uint64_t InsnID = readULEB();
+ auto Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->hasOneMemOperand())
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ for (const auto &MMO : State.MIs[InsnID]->memoperands())
+ if (MMO->getMergedOrdering() != Ordering)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAtomicOrderingOrStrongerThan: {
+ uint64_t InsnID = readULEB();
+ auto Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckAtomicOrderingOrStrongerThan(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->hasOneMemOperand())
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ for (const auto &MMO : State.MIs[InsnID]->memoperands())
+ if (!isAtLeastOrStrongerThan(MMO->getMergedOrdering(), Ordering))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAtomicOrderingWeakerThan: {
+ uint64_t InsnID = readULEB();
+ auto Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckAtomicOrderingWeakerThan(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->hasOneMemOperand())
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ for (const auto &MMO : State.MIs[InsnID]->memoperands())
+ if (!isStrongerThan(Ordering, MMO->getMergedOrdering()))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckMemoryAddressSpace: {
+ uint64_t InsnID = readULEB();
+ uint64_t MMOIdx = readULEB();
+ // This accepts a list of possible address spaces.
+ const uint64_t NumAddrSpace = MatchTable[CurrentIdx++];
+
+ if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ // Need to still jump to the end of the list of address spaces if we find
+ // a match earlier.
+ const uint64_t LastIdx = CurrentIdx + NumAddrSpace;
+
+ const MachineMemOperand *MMO =
+ *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+ const unsigned MMOAddrSpace = MMO->getAddrSpace();
+
+ bool Success = false;
+ for (unsigned I = 0; I != NumAddrSpace; ++I) {
+ uint64_t AddrSpace = readULEB();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << "addrspace(" << MMOAddrSpace << ") vs "
+ << AddrSpace << '\n');
+
+ if (AddrSpace == MMOAddrSpace) {
+ Success = true;
+ break;
+ }
+ }
+
+ CurrentIdx = LastIdx;
+ if (!Success && handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckMemoryAlignment: {
+ uint64_t InsnID = readULEB();
+ uint64_t MMOIdx = readULEB();
+ uint64_t MinAlign = MatchTable[CurrentIdx++];
+
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ MachineMemOperand *MMO =
+ *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckMemoryAlignment"
+ << "(MIs[" << InsnID << "]->memoperands() + "
+ << MMOIdx << ")->getAlignment() >= " << MinAlign
+ << ")\n");
+ if (MMO->getAlign() < MinAlign && handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
+ case GIM_CheckMemorySizeEqualTo: {
+ uint64_t InsnID = readULEB();
+ uint64_t MMOIdx = readULEB();
+ uint32_t Size = readU32();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckMemorySizeEqual(MIs["
+ << InsnID << "]->memoperands() + " << MMOIdx
+ << ", Size=" << Size << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ MachineMemOperand *MMO =
+ *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(), dbgs() << MMO->getSize()
+ << " bytes vs " << Size
+ << " bytes\n");
+ if (MMO->getSize() != Size)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
+ case GIM_CheckMemorySizeEqualToLLT:
+ case GIM_CheckMemorySizeLessThanLLT:
+ case GIM_CheckMemorySizeGreaterThanLLT: {
+ uint64_t InsnID = readULEB();
+ uint64_t MMOIdx = readULEB();
+ uint64_t OpIdx = readULEB();
+
+ DEBUG_WITH_TYPE(
+ TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckMemorySize"
+ << (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT ? "EqualTo"
+ : MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT
+ ? "GreaterThan"
+ : "LessThan")
+ << "LLT(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
+ << ", OpIdx=" << OpIdx << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isReg()) {
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": Not a register\n");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ MachineMemOperand *MMO =
+ *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+
+ const TypeSize Size = MRI.getType(MO.getReg()).getSizeInBits();
+ if (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT &&
+ MMO->getSizeInBits() != Size) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ } else if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT &&
+ TypeSize::isKnownGE(MMO->getSizeInBits().getValue(), Size)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ } else if (MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT &&
+ TypeSize::isKnownLE(MMO->getSizeInBits().getValue(), Size))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
+ case GIM_RootCheckType:
+ case GIM_CheckType: {
+ uint64_t InsnID = (MatcherOpcode == GIM_RootCheckType) ? 0 : readULEB();
+ uint64_t OpIdx = readULEB();
+ int TypeID = readS8();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckType(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx
+ << "), TypeID=" << TypeID << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isReg() || MRI.getType(MO.getReg()) != getTypeFromIdx(TypeID)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckPointerToAny: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint64_t SizeInBits = readULEB();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), SizeInBits=" << SizeInBits << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ const LLT Ty = MRI.getType(MO.getReg());
+
+ // iPTR must be looked up in the target.
+ if (SizeInBits == 0) {
+ MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
+ const unsigned AddrSpace = Ty.getAddressSpace();
+ SizeInBits = MF->getDataLayout().getPointerSizeInBits(AddrSpace);
+ }
+
+ assert(SizeInBits != 0 && "Pointer size must be known");
+
+ if (MO.isReg()) {
+ if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ } else if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
+ case GIM_RecordNamedOperand: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint64_t StoreIdx = readULEB();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_RecordNamedOperand(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), StoreIdx=" << StoreIdx << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(StoreIdx < State.RecordedOperands.size() && "Index out of range");
+ State.RecordedOperands[StoreIdx] = &State.MIs[InsnID]->getOperand(OpIdx);
+ break;
+ }
+ case GIM_RecordRegType: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ int TypeIdx = readS8();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_RecordRegType(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), TypeIdx=" << TypeIdx << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(TypeIdx < 0 && "Temp types always have negative indexes!");
+ // Indexes start at -1.
+ TypeIdx = 1 - TypeIdx;
+ const auto &Op = State.MIs[InsnID]->getOperand(OpIdx);
+ if (State.RecordedTypes.size() <= (uint64_t)TypeIdx)
+ State.RecordedTypes.resize(TypeIdx + 1, LLT());
+ State.RecordedTypes[TypeIdx] = MRI.getType(Op.getReg());
+ break;
+ }
-def pattern;
-def match;
-def apply;
-def combine;
-def empty_action;
+ case GIM_RootCheckRegBankForClass:
+ case GIM_CheckRegBankForClass: {
+ uint64_t InsnID =
+ (MatcherOpcode == GIM_RootCheckRegBankForClass) ? 0 : readULEB();
+ uint64_t OpIdx = readULEB();
+ uint16_t RCEnum = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckRegBankForClass(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), RCEnum=" << RCEnum << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isReg() ||
+ &RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum),
+ MRI.getType(MO.getReg())) !=
+ RBI.getRegBank(MO.getReg(), MRI, TRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
-def wip_match_opcode;
+ case GIM_CheckComplexPattern: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint16_t RendererID = readU16();
+ uint16_t ComplexPredicateID = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": State.Renderers[" << RendererID
+ << "] = GIM_CheckComplexPattern(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx
+ << "), ComplexPredicateID=" << ComplexPredicateID
+ << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ // FIXME: Use std::invoke() when it's available.
+ ComplexRendererFns Renderer =
+ (Exec.*ExecInfo.ComplexPredicates[ComplexPredicateID])(
+ State.MIs[InsnID]->getOperand(OpIdx));
+ if (Renderer)
+ State.Renderers[RendererID] = *Renderer;
+ else if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
-// Common base class for GICombineRule and GICombineGroup.
-class GICombine {
- // See GICombineGroup. We only declare it here to make the tablegen pass
- // simpler.
- list<GICombine> Rules = ?;
-}
+ case GIM_CheckConstantInt:
+ case GIM_CheckConstantInt8: {
+ const bool IsInt8 = (MatcherOpcode == GIM_CheckConstantInt8);
+
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint64_t Value = IsInt8 ? (int64_t)readS8() : readU64();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckConstantInt(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (MO.isReg()) {
+ // isOperandImmEqual() will sign-extend to 64-bits, so should we.
+ LLT Ty = MRI.getType(MO.getReg());
+ // If the type is > 64 bits, it can't be a constant int, so we bail
+ // early because SignExtend64 will assert otherwise.
+ if (Ty.getScalarSizeInBits() > 64) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ Value = SignExtend64(Value, Ty.getScalarSizeInBits());
+ if (!isOperandImmEqual(MO, Value, MRI, /*Splat=*/true)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ } else if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
-// A group of combine rules that can be added to a GICombiner or another group.
-class GICombineGroup<list<GICombine> rules> : GICombine {
- // The rules contained in this group. The rules in a group are flattened into
- // a single list and sorted into whatever order is most efficient. However,
- // they will never be re-ordered such that behaviour differs from the
- // specified order. It is therefore possible to use the order of rules in this
- // list to describe priorities.
- let Rules = rules;
-}
+ case GIM_CheckLiteralInt: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ int64_t Value = readU64();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckLiteralInt(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (MO.isImm() && MO.getImm() == Value)
+ break;
+
+ if (MO.isCImm() && MO.getCImm()->equalsInt(Value))
+ break;
+
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
-// Declares a combiner implementation class
-class GICombiner<string classname, list<GICombine> rules>
- : GICombineGroup<rules> {
- // The class name to use in the generated output.
- string Classname = classname;
- // Combiners can use this so they're free to define tryCombineAll themselves
- // and do extra work before/after calling the TableGen-erated code.
- string CombineAllMethodName = "tryCombineAll";
-}
+ case GIM_CheckIntrinsicID: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint16_t Value = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIntrinsicID(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isIntrinsicID() || MO.getIntrinsicID() != Value)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckCmpPredicate: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint16_t Value = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckCmpPredicate(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isPredicate() || MO.getPredicate() != Value)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckIsMBB: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIsMBB(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx << "))\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB()) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckIsImm: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIsImm(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx << "))\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->getOperand(OpIdx).isImm()) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckIsSafeToFold: {
+ uint64_t NumInsn = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIsSafeToFold(N = "
+ << NumInsn << ")\n");
+ MachineInstr &Root = *State.MIs[0];
+ for (unsigned K = 1, E = NumInsn + 1; K < E; ++K) {
+ if (!isObviouslySafeToFold(*State.MIs[K], Root)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ }
+ break;
+ }
+ case GIM_CheckIsSameOperand:
+ case GIM_CheckIsSameOperandIgnoreCopies: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint64_t OtherInsnID = readULEB();
+ uint64_t OtherOpIdx = readULEB();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIsSameOperand(MIs["
+ << InsnID << "][" << OpIdx << "], MIs["
+ << OtherInsnID << "][" << OtherOpIdx << "])\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[OtherInsnID] != nullptr && "Used insn before defined");
+
+ MachineOperand &Op = State.MIs[InsnID]->getOperand(OpIdx);
+ MachineOperand &OtherOp = State.MIs[OtherInsnID]->getOperand(OtherOpIdx);
+
+ if (MatcherOpcode == GIM_CheckIsSameOperandIgnoreCopies) {
+ if (Op.isReg() && OtherOp.isReg()) {
+ if (getSrcRegIgnoringCopies(Op.getReg(), MRI) ==
+ getSrcRegIgnoringCopies(OtherOp.getReg(), MRI))
+ break;
+ }
+ }
+
+ if (!Op.isIdenticalTo(OtherOp)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckCanReplaceReg: {
+ uint64_t OldInsnID = readULEB();
+ uint64_t OldOpIdx = readULEB();
+ uint64_t NewInsnID = readULEB();
+ uint64_t NewOpIdx = readULEB();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckCanReplaceReg(MIs["
+ << OldInsnID << "][" << OldOpIdx << "] = MIs["
+ << NewInsnID << "][" << NewOpIdx << "])\n");
+
+ Register Old = State.MIs[OldInsnID]->getOperand(OldOpIdx).getReg();
+ Register New = State.MIs[NewInsnID]->getOperand(NewOpIdx).getReg();
+ if (!canReplaceReg(Old, New, MRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_MIFlags: {
+ uint64_t InsnID = readULEB();
+ uint32_t Flags = readU32();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_MIFlags(MIs[" << InsnID
+ << "], " << Flags << ")\n");
+ if ((State.MIs[InsnID]->getFlags() & Flags) != Flags) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_MIFlagsNot: {
+ uint64_t InsnID = readULEB();
+ uint32_t Flags = readU32();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_MIFlagsNot(MIs[" << InsnID
+ << "], " << Flags << ")\n");
+ if ((State.MIs[InsnID]->getFlags() & Flags)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_Reject:
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_Reject\n");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ case GIR_MutateOpcode: {
+ uint64_t OldInsnID = readULEB();
+ uint64_t NewInsnID = readULEB();
+ uint32_t NewOpcode = readU16();
+ if (NewInsnID >= OutMIs.size())
+ OutMIs.resize(NewInsnID + 1);
+
+ MachineInstr *OldMI = State.MIs[OldInsnID];
+ if (Observer)
+ Observer->changingInstr(*OldMI);
+ OutMIs[NewInsnID] = MachineInstrBuilder(*OldMI->getMF(), OldMI);
+ OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
+ if (Observer)
+ Observer->changedInstr(*OldMI);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_MutateOpcode(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "], "
+ << NewOpcode << ")\n");
+ break;
+ }
-/// Declares data that is passed from the match stage to the apply stage.
-class GIDefMatchData<string type> {
- /// A C++ type name indicating the storage type.
- string Type = type;
-}
+ case GIR_BuildRootMI:
+ case GIR_BuildMI: {
+ uint64_t NewInsnID = (MatcherOpcode == GIR_BuildRootMI) ? 0 : readULEB();
+ uint32_t Opcode = readU16();
+ if (NewInsnID >= OutMIs.size())
+ OutMIs.resize(NewInsnID + 1);
+
+ OutMIs[NewInsnID] = Builder.buildInstr(Opcode);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_BuildMI(OutMIs["
+ << NewInsnID << "], " << Opcode << ")\n");
+ break;
+ }
-class GICombineRule<dag defs, dag a0, dag a1 = (empty_action)> : GICombine {
- /// Defines the external interface of the match rule. This includes:
- /// * The names of the root nodes (requires at least one)
- /// See GIDefKind for details.
- dag Defs = defs;
-
- // The patterns that will be used. Two types of list can exist:
- // match (Action0) + apply (Action1).
- // combine (Action0) + empty_action (Action1).
- dag Action0 = a0;
- dag Action1 = a1;
-
- /// Defines the predicates that are checked before the match function
- /// is called. Targets can use this to, for instance, check Subtarget
- /// features.
- list<Predicate> Predicates = [];
-
- // Maximum number of permutations of this rule that can be emitted.
- // Set to -1 to disable the limit.
- int MaxPermutations = 16;
-}
+ case GIR_BuildConstant: {
+ uint64_t TempRegID = readULEB();
+ uint64_t Imm = readU64();
+ Builder.buildConstant(State.TempRegisters[TempRegID], Imm);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_BuildConstant(TempReg["
+ << TempRegID << "], Imm=" << Imm << ")\n");
+ break;
+ }
-def gi_mo;
-def gi_imm;
+ case GIR_RootToRootCopy:
+ case GIR_Copy: {
+ uint64_t NewInsnID =
+ (MatcherOpcode == GIR_RootToRootCopy) ? 0 : readULEB();
+ uint64_t OldInsnID =
+ (MatcherOpcode == GIR_RootToRootCopy) ? 0 : readULEB();
+ uint64_t OpIdx = readULEB();
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs()
+ << CurrentIdx << ": GIR_Copy(OutMIs[" << NewInsnID
+ << "], MIs[" << OldInsnID << "], " << OpIdx << ")\n");
+ break;
+ }
-// This is an equivalent of PatFrags but for MIR Patterns.
-//
-// GICombinePatFrags can be used in place of instructions for 'match' patterns.
-// Much like normal instructions, the defs (outs) come first, and the ins second
-//
-// Out operands can only be of type "root" or "gi_mo", and they must be defined
-// by an instruction pattern in all alternatives.
-//
-// In operands can be gi_imm or gi_mo. They cannot be redefined in any alternative
-// pattern and may only appear in the C++ code, or in the output operand of an
-// instruction pattern.
-class GICombinePatFrag<dag outs, dag ins, list<dag> alts> {
- dag InOperands = ins;
- dag OutOperands = outs;
- list<dag> Alternatives = alts;
-}
+ case GIR_CopyRemaining: {
+ uint64_t NewInsnID = readULEB();
+ uint64_t OldInsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ MachineInstr &OldMI = *State.MIs[OldInsnID];
+ MachineInstrBuilder &NewMI = OutMIs[NewInsnID];
+ for (const auto &Op : drop_begin(OldMI.operands(), OpIdx))
+ NewMI.add(Op);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopyRemaining(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID
+ << "], /*start=*/" << OpIdx << ")\n");
+ break;
+ }
-//===----------------------------------------------------------------------===//
-// Pattern Special Types
-//===----------------------------------------------------------------------===//
+ case GIR_CopyOrAddZeroReg: {
+ uint64_t NewInsnID = readULEB();
+ uint64_t OldInsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint16_t ZeroReg = readU16();
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ MachineOperand &MO = State.MIs[OldInsnID]->getOperand(OpIdx);
+ if (isOperandImmEqual(MO, 0, MRI))
+ OutMIs[NewInsnID].addReg(ZeroReg);
+ else
+ OutMIs[NewInsnID].add(MO);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopyOrAddZeroReg(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "], "
+ << OpIdx << ", " << ZeroReg << ")\n");
+ break;
+ }
-class GISpecialType;
+ case GIR_CopySubReg: {
+ uint64_t NewInsnID = readULEB();
+ uint64_t OldInsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint16_t SubRegIdx = readU16();
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
+ {}, SubRegIdx);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopySubReg(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "], "
+ << OpIdx << ", " << SubRegIdx << ")\n");
+ break;
+ }
-// In an apply pattern, GITypeOf can be used to set the type of a new temporary
-// register to match the type of a matched register.
-//
-// This can only be used on temporary registers defined by the apply pattern.
-//
-// TODO: Make this work in matchers as well?
-//
-// FIXME: Syntax is very ugly.
-class GITypeOf<string opName> : GISpecialType {
- string OpName = opName;
-}
+ case GIR_AddImplicitDef: {
+ uint64_t InsnID = readULEB();
+ uint16_t RegNum = readU16();
+ RegState Flags = static_cast<RegState>(readU16());
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ Flags |= RegState::Implicit;
+ OutMIs[InsnID].addDef(RegNum, Flags);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddImplicitDef(OutMIs["
+ << InsnID << "], " << RegNum << ", "
+ << static_cast<uint16_t>(Flags) << ")\n");
+ break;
+ }
-// The type of an operand that can match a variable amount of operands.
-// This type contains a minimum and maximum number of operands to match.
-// The minimum must be 1 or more, as we cannot have an operand representing
-// zero operands, and the max can be zero (which means "unlimited") or a value
-// greater than the minimum.
-class GIVariadic<int min = 1, int max = 0> : GISpecialType {
- int MinArgs = min;
- int MaxArgs = max;
-}
+ case GIR_AddImplicitUse: {
+ uint64_t InsnID = readULEB();
+ uint16_t RegNum = readU16();
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddImplicitUse(OutMIs["
+ << InsnID << "], " << RegNum << ")\n");
+ break;
+ }
-//===----------------------------------------------------------------------===//
-// Pattern Builtins
-//===----------------------------------------------------------------------===//
+ case GIR_AddRegister: {
+ uint64_t InsnID = readULEB();
+ uint16_t RegNum = readU16();
+ RegState RegFlags = static_cast<RegState>(readU16());
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addReg(RegNum, RegFlags);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddRegister(OutMIs["
+ << InsnID << "], " << RegNum << ", "
+ << static_cast<uint16_t>(RegFlags) << ")\n");
+ break;
+ }
+ case GIR_AddIntrinsicID: {
+ uint64_t InsnID = readULEB();
+ uint16_t Value = readU16();
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addIntrinsicID((Intrinsic::ID)Value);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddIntrinsicID(OutMIs["
+ << InsnID << "], " << Value << ")\n");
+ break;
+ }
+ case GIR_SetImplicitDefDead: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_SetImplicitDefDead(OutMIs["
+ << InsnID << "], OpIdx=" << OpIdx << ")\n");
+ MachineInstr *MI = OutMIs[InsnID];
+ assert(MI && "Modifying undefined instruction");
+ MI->getOperand(MI->getNumExplicitOperands() + OpIdx).setIsDead();
+ break;
+ }
+ case GIR_SetMIFlags: {
+ uint64_t InsnID = readULEB();
+ uint32_t Flags = readU32();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_SetMIFlags(OutMIs["
+ << InsnID << "], " << Flags << ")\n");
+ MachineInstr *MI = OutMIs[InsnID];
+ MI->setFlags(MI->getFlags() | Flags);
+ break;
+ }
+ case GIR_UnsetMIFlags: {
+ uint64_t InsnID = readULEB();
+ uint32_t Flags = readU32();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_UnsetMIFlags(OutMIs["
+ << InsnID << "], " << Flags << ")\n");
+ MachineInstr *MI = OutMIs[InsnID];
+ MI->setFlags(MI->getFlags() & ~Flags);
+ break;
+ }
+ case GIR_CopyMIFlags: {
+ uint64_t InsnID = readULEB();
+ uint64_t OldInsnID = readULEB();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopyMIFlags(OutMIs["
+ << InsnID << "], MIs[" << OldInsnID << "])\n");
+ MachineInstr *MI = OutMIs[InsnID];
+ MI->setFlags(MI->getFlags() | State.MIs[OldInsnID]->getFlags());
+ break;
+ }
+ case GIR_AddSimpleTempRegister:
+ case GIR_AddTempRegister:
+ case GIR_AddTempSubRegister: {
+ uint64_t InsnID = readULEB();
+ uint64_t TempRegID = readULEB();
+ RegState TempRegFlags = {};
+ if (MatcherOpcode != GIR_AddSimpleTempRegister)
+ TempRegFlags = static_cast<RegState>(readU16());
+ uint16_t SubReg = 0;
+ if (MatcherOpcode == GIR_AddTempSubRegister)
+ SubReg = readU16();
+
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+
+ OutMIs[InsnID].addReg(State.TempRegisters[TempRegID], TempRegFlags,
+ SubReg);
+ DEBUG_WITH_TYPE(
+ TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddTempRegister(OutMIs[" << InsnID
+ << "], TempRegisters[" << TempRegID << "]";
+ if (SubReg) dbgs() << '.' << TRI.getSubRegIndexName(SubReg);
+ dbgs() << ", " << static_cast<uint16_t>(TempRegFlags) << ")\n");
+ break;
+ }
-// "Magic" Builtin instructions for MIR patterns.
-// The definitions that implement
-class GIBuiltinInst;
+ case GIR_AddImm8:
+ case GIR_AddImm: {
+ const bool IsAdd8 = (MatcherOpcode == GIR_AddImm8);
+ uint64_t InsnID = readULEB();
+ uint64_t Imm = IsAdd8 ? (int64_t)readS8() : readU64();
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addImm(Imm);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddImm(OutMIs[" << InsnID
+ << "], " << Imm << ")\n");
+ break;
+ }
-// Replace all references to a register with another one.
-//
-// Usage:
-// (apply (GIReplaceReg $old, $new))
-//
-// Operands:
-// - $old (out) register defined by a matched instruction
-// - $new (in) register
-//
-// Semantics:
-// - Can only appear in an 'apply' pattern.
-// - If both old/new are operands of matched instructions,
-// "canReplaceReg" is checked before applying the rule.
-def GIReplaceReg : GIBuiltinInst;
+ case GIR_AddCImm: {
+ uint64_t InsnID = readULEB();
+ int TypeID = readS8();
+ uint64_t Imm = readU64();
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+
+ unsigned Width = ExecInfo.TypeObjects[TypeID].getScalarSizeInBits();
+ LLVMContext &Ctx = MF->getFunction().getContext();
+ OutMIs[InsnID].addCImm(
+ ConstantInt::get(IntegerType::get(Ctx, Width), Imm, /*signed*/ true));
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddCImm(OutMIs[" << InsnID
+ << "], TypeID=" << TypeID << ", Imm=" << Imm
+ << ")\n");
+ break;
+ }
-// Apply action that erases the match root.
-//
-// Usage:
-// (apply (GIEraseRoot))
-//
-// Semantics:
-// - Can only appear as the only pattern of an 'apply' pattern list.
-// - The root cannot have any output operands.
-// - The root must be a CodeGenInstruction
-//
-// TODO: Allow using this directly, like (apply GIEraseRoot)
-def GIEraseRoot : GIBuiltinInst;
+ case GIR_ComplexRenderer: {
+ uint64_t InsnID = readULEB();
+ uint16_t RendererID = readU16();
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ for (const auto &RenderOpFn : State.Renderers[RendererID])
+ RenderOpFn(OutMIs[InsnID]);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_ComplexRenderer(OutMIs["
+ << InsnID << "], " << RendererID << ")\n");
+ break;
+ }
+ case GIR_ComplexSubOperandRenderer: {
+ uint64_t InsnID = readULEB();
+ uint16_t RendererID = readU16();
+ uint64_t RenderOpID = readULEB();
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ State.Renderers[RendererID][RenderOpID](OutMIs[InsnID]);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx
+ << ": GIR_ComplexSubOperandRenderer(OutMIs["
+ << InsnID << "], " << RendererID << ", "
+ << RenderOpID << ")\n");
+ break;
+ }
+ case GIR_ComplexSubOperandSubRegRenderer: {
+ uint64_t InsnID = readULEB();
+ uint16_t RendererID = readU16();
+ uint64_t RenderOpID = readULEB();
+ uint16_t SubRegIdx = readU16();
+ MachineInstrBuilder &MI = OutMIs[InsnID];
+ assert(MI && "Attempted to add to undefined instruction");
+ State.Renderers[RendererID][RenderOpID](MI);
+ MI->getOperand(MI->getNumOperands() - 1).setSubReg(SubRegIdx);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx
+ << ": GIR_ComplexSubOperandSubRegRenderer(OutMIs["
+ << InsnID << "], " << RendererID << ", "
+ << RenderOpID << ", " << SubRegIdx << ")\n");
+ break;
+ }
-//===----------------------------------------------------------------------===//
-// Pattern MIFlags
-//===----------------------------------------------------------------------===//
+ case GIR_CopyConstantAsSImm: {
+ uint64_t NewInsnID = readULEB();
+ uint64_t OldInsnID = readULEB();
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
+ "Expected G_CONSTANT");
+ if (State.MIs[OldInsnID]->getOperand(1).isCImm()) {
+ OutMIs[NewInsnID].addImm(
+ State.MIs[OldInsnID]->getOperand(1).getCImm()->getSExtValue());
+ } else if (State.MIs[OldInsnID]->getOperand(1).isImm())
+ OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(1));
+ else
+ llvm_unreachable("Expected Imm or CImm operand");
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopyConstantAsSImm(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "])\n");
+ break;
+ }
-class MIFlagEnum<string enumName> {
- string EnumName = "MachineInstr::" # enumName;
-}
+ // TODO: Needs a test case once we have a pattern that uses this.
+ case GIR_CopyFConstantAsFPImm: {
+ uint64_t NewInsnID = readULEB();
+ uint64_t OldInsnID = readULEB();
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
+ "Expected G_FCONSTANT");
+ if (State.MIs[OldInsnID]->getOperand(1).isFPImm())
+ OutMIs[NewInsnID].addFPImm(
+ State.MIs[OldInsnID]->getOperand(1).getFPImm());
+ else
+ llvm_unreachable("Expected FPImm operand");
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs()
+ << CurrentIdx << ": GIR_CopyFPConstantAsFPImm(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "])\n");
+ break;
+ }
+
+ case GIR_CustomRenderer: {
+ uint64_t InsnID = readULEB();
+ uint64_t OldInsnID = readULEB();
+ uint16_t RendererFnID = readU16();
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_CustomRenderer(OutMIs["
+ << InsnID << "], MIs[" << OldInsnID << "], "
+ << RendererFnID << ")\n");
+ (Exec.*ExecInfo.CustomRenderers[RendererFnID])(
+ OutMIs[InsnID], *State.MIs[OldInsnID],
+ -1); // Not a source operand of the old instruction.
+ break;
+ }
+ case GIR_DoneWithCustomAction: {
+ uint16_t FnID = readU16();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_DoneWithCustomAction(FnID="
+ << FnID << ")\n");
+ assert(FnID > GICXXCustomAction_Invalid && "Expected a valid FnID");
+ if (runCustomAction(FnID, State, OutMIs)) {
+ propagateFlags();
+ return true;
+ }
+
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIR_CustomOperandRenderer: {
+ uint64_t InsnID = readULEB();
+ uint64_t OldInsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint16_t RendererFnID = readU16();
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx
+ << ": GIR_CustomOperandRenderer(OutMIs[" << InsnID
+ << "], MIs[" << OldInsnID << "]->getOperand("
+ << OpIdx << "), " << RendererFnID << ")\n");
+ (Exec.*ExecInfo.CustomRenderers[RendererFnID])(
+ OutMIs[InsnID], *State.MIs[OldInsnID], OpIdx);
+ break;
+ }
+ case GIR_ConstrainOperandRC: {
+ uint64_t InsnID = readULEB();
+ uint64_t OpIdx = readULEB();
+ uint16_t RCEnum = readU16();
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ MachineInstr &I = *OutMIs[InsnID].getInstr();
+ MachineFunction &MF = *I.getParent()->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const TargetRegisterClass &RC = *TRI.getRegClass(RCEnum);
+ MachineOperand &MO = I.getOperand(OpIdx);
+ constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, RC, MO);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_ConstrainOperandRC(OutMIs["
+ << InsnID << "], " << OpIdx << ", " << RCEnum
+ << ")\n");
+ break;
+ }
+
+ case GIR_RootConstrainSelectedInstOperands:
+ case GIR_ConstrainSelectedInstOperands: {
+ uint64_t InsnID = (MatcherOpcode == GIR_RootConstrainSelectedInstOperands)
+ ? 0
+ : readULEB();
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
+ RBI);
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx
+ << ": GIR_ConstrainSelectedInstOperands(OutMIs["
+ << InsnID << "])\n");
+ break;
+ }
+ case GIR_MergeMemOperands: {
+ uint64_t InsnID = readULEB();
+ uint64_t NumInsn = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_MergeMemOperands(OutMIs["
+ << InsnID << "]");
+ for (unsigned K = 0; K < NumInsn; ++K) {
+ uint64_t NextID = readULEB();
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << ", MIs[" << NextID << "]");
+ for (const auto &MMO : State.MIs[NextID]->memoperands())
+ OutMIs[InsnID].addMemOperand(MMO);
+ }
+ DEBUG_WITH_TYPE(TgtExecutor::getName(), dbgs() << ")\n");
+ break;
+ }
+ case GIR_EraseFromParent: {
+ uint64_t InsnID = readULEB();
+ MachineInstr *MI = State.MIs[InsnID];
+ assert(MI && "Attempted to erase an undefined instruction");
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_EraseFromParent(MIs["
+ << InsnID << "])\n");
+ eraseImpl(MI);
+ break;
+ }
+ case GIR_EraseRootFromParent_Done: {
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs()
+ << CurrentIdx << ": GIR_EraseRootFromParent_Done\n");
+ eraseImpl(State.MIs[0]);
+ propagateFlags();
+ return true;
+ }
+ case GIR_MakeTempReg: {
+ uint64_t TempRegID = readULEB();
+ int TypeID = readS8();
+
+ State.TempRegisters[TempRegID] =
+ MRI.createGenericVirtualRegister(getTypeFromIdx(TypeID));
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": TempRegs[" << TempRegID
+ << "] = GIR_MakeTempReg(" << TypeID << ")\n");
+ break;
+ }
+ case GIR_ReplaceReg: {
+ uint64_t OldInsnID = readULEB();
+ uint64_t OldOpIdx = readULEB();
+ uint64_t NewInsnID = readULEB();
+ uint64_t NewOpIdx = readULEB();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_ReplaceReg(MIs["
+ << OldInsnID << "][" << OldOpIdx << "] = MIs["
+ << NewInsnID << "][" << NewOpIdx << "])\n");
+
+ Register Old = State.MIs[OldInsnID]->getOperand(OldOpIdx).getReg();
+ Register New = State.MIs[NewInsnID]->getOperand(NewOpIdx).getReg();
+ if (Observer)
+ Observer->changingAllUsesOfReg(MRI, Old);
+ MRI.replaceRegWith(Old, New);
+ if (Observer)
+ Observer->finishedChangingAllUsesOfReg();
+ break;
+ }
+ case GIR_ReplaceRegWithTempReg: {
+ uint64_t OldInsnID = readULEB();
+ uint64_t OldOpIdx = readULEB();
+ uint64_t TempRegID = readULEB();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_ReplaceRegWithTempReg(MIs["
+ << OldInsnID << "][" << OldOpIdx << "] = TempRegs["
+ << TempRegID << "])\n");
+
+ Register Old = State.MIs[OldInsnID]->getOperand(OldOpIdx).getReg();
+ Register New = State.TempRegisters[TempRegID];
+ if (Observer)
+ Observer->changingAllUsesOfReg(MRI, Old);
+ MRI.replaceRegWith(Old, New);
+ if (Observer)
+ Observer->finishedChangingAllUsesOfReg();
+ break;
+ }
+ case GIR_Coverage: {
+ uint32_t RuleID = readU32();
+ assert(CoverageInfo);
+ CoverageInfo->setCovered(RuleID);
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(), dbgs() << CurrentIdx
+ << ": GIR_Coverage("
+ << RuleID << ")");
+ break;
+ }
-def FmNoNans : MIFlagEnum<"FmNoNans">;
-def FmNoInfs : MIFlagEnum<"FmNoInfs">;
-def FmNsz : MIFlagEnum<"FmNsz">;
-def FmArcp : MIFlagEnum<"FmArcp">;
-def FmContract : MIFlagEnum<"FmContract">;
-def FmAfn : MIFlagEnum<"FmAfn">;
-def FmReassoc : MIFlagEnum<"FmReassoc">;
-def IsExact : MIFlagEnum<"IsExact">;
-def NoSWrap : MIFlagEnum<"NoSWrap">;
-def NoUWrap : MIFlagEnum<"NoUWrap">;
-def NonNeg : MIFlagEnum<"NonNeg">;
-def InBounds : MIFlagEnum<"InBounds">;
-
-def MIFlags;
-// def not; -> Already defined as a SDNode
+ case GIR_Done:
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIR_Done\n");
+ propagateFlags();
+ return true;
+ default:
+ llvm_unreachable("Unexpected command");
+ }
+ }
+}
-//===----------------------------------------------------------------------===//
+} // end namespace llvm
-def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
-def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
-def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;
-
-def register_matchinfo: GIDefMatchData<"Register">;
-def int64_matchinfo: GIDefMatchData<"int64_t">;
-def apint_matchinfo : GIDefMatchData<"APInt">;
-def constantfp_matchinfo : GIDefMatchData<"ConstantFP*">;
-def build_fn_matchinfo :
-GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
-def unsigned_matchinfo: GIDefMatchData<"unsigned">;
-def register_vector_matchinfo : GIDefMatchData<"SmallVector<Register>">;
-def mi_vector_matchinfo : GIDefMatchData<"SmallVector<MachineInstr *>">;
-
-def copy_prop : GICombineRule<
- (defs root:$d),
- (match (COPY $d, $s):$mi,
- [{ return Helper.matchCombineCopy(*${mi}); }]),
- (apply [{ Helper.applyCombineCopy(*${mi}); }])>;
-
-// idempotent operations
-// Fold (freeze (freeze x)) -> (freeze x).
-// Fold (fabs (fabs x)) -> (fabs x).
-// Fold (fcanonicalize (fcanonicalize x)) -> (fcanonicalize x).
-def idempotent_prop_frags : GICombinePatFrag<
- (outs root:$dst, $src), (ins),
- !foreach(op, [G_FREEZE, G_FABS, G_FCANONICALIZE],
- (pattern (op $dst, $src), (op $src, $x)))>;
-
-def idempotent_prop : GICombineRule<
- (defs root:$dst),
- (match (idempotent_prop_frags $dst, $src)),
- (apply (GIReplaceReg $dst, $src))>;
-
-// Convert freeze(Op(Op0, NonPoisonOps...)) to Op(freeze(Op0), NonPoisonOps...)
-// when Op0 is not guaranteed non-poison
-def push_freeze_to_prevent_poison_from_propagating : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_FREEZE $dst, $src):$root,
- [{ return !isGuaranteedNotToBePoison(${src}.getReg(), MRI) && Helper.matchFreezeOfSingleMaybePoisonOperand(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def extending_loads : GICombineRule<
- (defs root:$root, extending_load_matchdata:$matchinfo),
- (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root,
- [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>;
-
-def load_and_mask : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_AND):$root,
- [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>;
-
-def sext_trunc_sextload : GICombineRule<
- (defs root:$d),
- (match (wip_match_opcode G_SEXT_INREG):$d,
- [{ return Helper.matchSextTruncSextLoad(*${d}); }]),
- (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>;
-
-def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">;
-def sext_inreg_of_load : GICombineRule<
- (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo),
- (match (wip_match_opcode G_SEXT_INREG):$root,
- [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>;
-
-def sext_inreg_to_zext_inreg : GICombineRule<
- (defs root:$dst),
- (match
- (G_SEXT_INREG $dst, $src, $imm):$root,
- [{
- unsigned BitWidth = MRI.getType(${src}.getReg()).getScalarSizeInBits();
- return Helper.getValueTracking()->maskedValueIsZero(${src}.getReg(),
- APInt::getOneBitSet(BitWidth, ${imm}.getImm() - 1)); }]),
- (apply [{
- Helper.getBuilder().setInstrAndDebugLoc(*${root});
- Helper.getBuilder().buildZExtInReg(${dst}, ${src}, ${imm}.getImm());
- ${root}->eraseFromParent();
- }])
->;
-
-def combine_extracted_vector_load : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
- [{ return Helper.matchCombineExtractedVectorLoad(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def combine_indexed_load_store : GICombineRule<
- (defs root:$root, indexed_load_store_matchdata:$matchinfo),
- (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root,
- [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>;
-
-def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">;
-def opt_brcond_by_inverting_cond : GICombineRule<
- (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo),
- (match (wip_match_opcode G_BR):$root,
- [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>;
-
-def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">;
-def ptr_add_immed_chain : GICombineRule<
- (defs root:$d, ptr_add_immed_matchdata:$matchinfo),
- (match (wip_match_opcode G_PTR_ADD):$d,
- [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]),
- (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>;
-
-def shift_const_op : GICombinePatFrag<
- (outs root:$dst), (ins),
- !foreach(op,
- [G_SHL, G_ASHR, G_LSHR],
- (pattern (op $dst, $shifted, $amt)))>;
-def shift_result_matchdata : GIDefMatchData<"std::optional<int64_t>">;
-def shifts_too_big : GICombineRule<
- (defs root:$root, shift_result_matchdata:$matchinfo),
- (match (shift_const_op $root):$mi,
- [{ return Helper.matchShiftsTooBig(*${mi}, ${matchinfo}); }]),
- (apply [{
- if (${matchinfo}) {
- Helper.replaceInstWithConstant(*${mi}, *${matchinfo});
- } else {
- Helper.replaceInstWithUndef(*${mi});
- }
- }])>;
-
-// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same
-def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">;
-def shift_immed_chain : GICombineRule<
- (defs root:$d, shift_immed_matchdata:$matchinfo),
- (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d,
- [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]),
- (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>;
-
-// Transform shift (logic (shift X, C0), Y), C1
-// -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same
-def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">;
-def shift_of_shifted_logic_chain : GICombineRule<
- (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo),
- (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d,
- [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]),
- (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>;
-
-def mul_to_shl : GICombineRule<
- (defs root:$d, unsigned_matchinfo:$matchinfo),
- (match (G_MUL $d, $op1, $op2):$mi,
- [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]),
- (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>;
-
-// (sub x, C) -> (add x, -C)
-def sub_to_add : GICombineRule<
- (defs root:$d, build_fn_matchinfo:$matchinfo),
- (match (G_CONSTANT $c, $imm),
- (G_SUB $d, $op1, $c):$mi,
- [{ return Helper.matchCombineSubToAdd(*${mi}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnNoErase(*${mi}, ${matchinfo}); }])>;
-
-// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int
-def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">;
-def reduce_shl_of_extend : GICombineRule<
- (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo),
- (match (G_SHL $dst, $src0, $src1):$mi,
- [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
- (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;
-
-// Combine bitreverse(shl (bitreverse x), y)) -> (lshr x, y)
-def bitreverse_shl : GICombineRule<
- (defs root:$d),
- (match (G_BITREVERSE $rev, $val),
- (G_SHL $src, $rev, $amt):$mi,
- (G_BITREVERSE $d, $src),
- [{ return Helper.isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR,
- {MRI.getType(${val}.getReg()),
- MRI.getType(${amt}.getReg())}}); }]),
- (apply (G_LSHR $d, $val, $amt))>;
-
-// Combine bitreverse(lshr (bitreverse x), y)) -> (shl x, y)
-def bitreverse_lshr : GICombineRule<
- (defs root:$d),
- (match (G_BITREVERSE $rev, $val),
- (G_LSHR $src, $rev, $amt):$mi,
- (G_BITREVERSE $d, $src),
- [{ return Helper.isLegalOrBeforeLegalizer({TargetOpcode::G_SHL,
- {MRI.getType(${val}.getReg()),
- MRI.getType(${amt}.getReg())}}); }]),
- (apply (G_SHL $d, $val, $amt))>;
-
-// Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
-// Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
-def commute_shift : GICombineRule<
- (defs root:$d, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_SHL):$d,
- [{ return Helper.matchCommuteShift(*${d}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${d}, ${matchinfo}); }])>;
-
-// Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (lshr x, (C1 + C2))
-def lshr_of_trunc_of_lshr_matchdata : GIDefMatchData<"LshrOfTruncOfLshr">;
-def lshr_of_trunc_of_lshr : GICombineRule<
- (defs root:$root, lshr_of_trunc_of_lshr_matchdata:$matchinfo),
- (match (G_LSHR $d1, $x, $y):$Shift,
- (G_TRUNC $d2, $d1),
- (G_LSHR $dst, $d2, $z):$root,
- [{ return Helper.matchLshrOfTruncOfLshr(*${root}, ${matchinfo}, *${Shift}); }]),
- (apply [{ Helper.applyLshrOfTruncOfLshr(*${root}, ${matchinfo}); }])>;
-
-def narrow_binop_feeding_and : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_AND):$root,
- [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
-
-// [us]itofp(undef) = 0, because the result value is bounded.
-def undef_to_fp_zero : GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_UITOFP, G_SITOFP):$root,
- [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
- (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>;
-
-def undef_to_int_zero: GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_AND, G_MUL):$root,
- [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
- (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
-
-def undef_to_negative_one: GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_OR):$root,
- [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
- (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>;
-
-def binop_left_undef_to_zero: GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_SHL, G_UDIV, G_UREM):$root,
- [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
- (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
-
-def binop_right_undef_to_undef: GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
- [{ return Helper.matchOperandIsUndef(*${root}, 2); }]),
- (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
-
-def unary_undef_to_zero: GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_ABS):$root,
- [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
- (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
-
-def unary_undef_to_undef_frags : GICombinePatFrag<
- (outs root:$dst), (ins),
- !foreach(op,
- [G_TRUNC, G_BITCAST, G_ANYEXT, G_PTRTOINT, G_INTTOPTR, G_FPTOSI,
- G_FPTOUI],
- (pattern (op $dst, $x), (G_IMPLICIT_DEF $x)))>;
-def unary_undef_to_undef : GICombineRule<
- (defs root:$dst),
- (match (unary_undef_to_undef_frags $dst)),
- (apply [{ Helper.replaceInstWithUndef(*${dst}.getParent()); }])>;
-
-// Instructions where if any source operand is undef, the instruction can be
-// replaced with undef.
-def propagate_undef_any_op: GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_ADD, G_SUB, G_XOR):$root,
- [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
- (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
-
-// Instructions where if all source operands are undef, the instruction can be
-// replaced with undef.
-def propagate_undef_all_ops: GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_SHUFFLE_VECTOR, G_BUILD_VECTOR):$root,
- [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
- (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
-
-// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
-def propagate_undef_shuffle_mask: GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
- [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]),
- (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
-
- // Replace an insert/extract element of an out of bounds index with undef.
- def insert_extract_vec_elt_out_of_bounds : GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT):$root,
- [{ return Helper.matchInsertExtractVecEltOutOfBounds(*${root}); }]),
- (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
-
-// Fold (cond ? x : x) -> x
-def select_same_val: GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_SELECT):$root,
- [{ return Helper.matchSelectSameVal(*${root}); }]),
- (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
->;
-
-// Fold (undef ? x : y) -> y
-def select_undef_cmp: GICombineRule<
- (defs root:$dst),
- (match (G_IMPLICIT_DEF $undef),
- (G_SELECT $dst, $undef, $x, $y)),
- (apply (GIReplaceReg $dst, $y))
->;
-
-// Fold (true ? x : y) -> x
-// Fold (false ? x : y) -> y
-def select_constant_cmp: GICombineRule<
- (defs root:$root, unsigned_matchinfo:$matchinfo),
- (match (wip_match_opcode G_SELECT):$root,
- [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }])
->;
-
-// Fold (C op x) -> (x op C)
-// TODO: handle more isCommutable opcodes
-// TODO: handle compares (currently not marked as isCommutable)
-def commute_int_constant_to_rhs : GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_ADD, G_MUL, G_AND, G_OR, G_XOR,
- G_SMIN, G_SMAX, G_UMIN, G_UMAX, G_UADDO, G_SADDO,
- G_UMULO, G_SMULO, G_UMULH, G_SMULH,
- G_UADDSAT, G_SADDSAT, G_SMULFIX, G_UMULFIX,
- G_SMULFIXSAT, G_UMULFIXSAT):$root,
- [{ return Helper.matchCommuteConstantToRHS(*${root}); }]),
- (apply [{ Helper.applyCommuteBinOpOperands(*${root}); }])
->;
-
-def commute_fp_constant_to_rhs : GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_FADD, G_FMUL, G_FMINNUM, G_FMAXNUM,
- G_FMINNUM_IEEE, G_FMAXNUM_IEEE,
- G_FMINIMUM, G_FMAXIMUM):$root,
- [{ return Helper.matchCommuteFPConstantToRHS(*${root}); }]),
- (apply [{ Helper.applyCommuteBinOpOperands(*${root}); }])
->;
-
-def commute_constant_to_rhs : GICombineGroup<[
- commute_int_constant_to_rhs,
- commute_fp_constant_to_rhs
-]>;
-
-// Fold x op 0 -> x
-def right_identity_zero_frags : GICombinePatFrag<
- (outs root:$dst), (ins $x),
- !foreach(op,
- [G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR,
- G_LSHR, G_PTR_ADD, G_ROTL, G_ROTR],
- (pattern (op $dst, $x, 0)))>;
-def right_identity_zero: GICombineRule<
- (defs root:$dst),
- (match (right_identity_zero_frags $dst, $lhs)),
- (apply (GIReplaceReg $dst, $lhs))
->;
-
-def right_identity_neg_zero_fp: GICombineRule<
- (defs root:$dst),
- (match (G_FADD $dst, $x, $y):$root,
- [{ return Helper.matchConstantFPOp(${y}, -0.0); }]),
- (apply (GIReplaceReg $dst, $x))
->;
-
-def right_identity_neg_zero_fp_nsz: GICombineRule<
- (defs root:$dst),
- (match (G_FADD $dst, $x, $y, (MIFlags FmNsz)):$root,
- [{ return Helper.matchConstantFPOp(${y}, 0.0); }]),
- (apply (GIReplaceReg $dst, $x))
->;
-
-// Fold x op 1 -> x
-def right_identity_one_int: GICombineRule<
- (defs root:$dst),
- (match (G_MUL $dst, $x, 1)),
- (apply (GIReplaceReg $dst, $x))
->;
-
-def right_identity_one_fp: GICombineRule<
- (defs root:$dst),
- (match (G_FMUL $dst, $x, $y):$root,
- [{ return Helper.matchConstantFPOp(${y}, 1.0); }]),
- (apply (GIReplaceReg $dst, $x))
->;
-
-def right_identity_neg_one_fp: GICombineRule<
- (defs root:$dst),
- (match (G_FMUL $dst, $x, $y):$root,
- [{ return Helper.matchConstantFPOp(${y}, -1.0); }]),
- (apply (G_FNEG $dst, $x))
->;
-
-def right_identity_one : GICombineGroup<[right_identity_one_int, right_identity_one_fp]>;
-
-// Fold (x op x) - > x
-def binop_same_val_frags : GICombinePatFrag<
- (outs root:$dst), (ins $x),
- [
- (pattern (G_AND $dst, $x, $x)),
- (pattern (G_OR $dst, $x, $x)),
- ]
->;
-def binop_same_val: GICombineRule<
- (defs root:$dst),
- (match (binop_same_val_frags $dst, $src)),
- (apply (GIReplaceReg $dst, $src))
->;
-
-// Fold (0 op x) - > 0
-def binop_left_to_zero_frags : GICombinePatFrag<
- (outs root:$dst, $zero), (ins $rhs),
- !foreach(op,
- [G_SHL, G_LSHR, G_ASHR, G_SDIV, G_UDIV, G_SREM, G_UREM, G_MUL],
- (pattern (G_CONSTANT $zero, 0), (op $dst, $zero, $rhs)))>;
-
-def binop_left_to_zero: GICombineRule<
- (defs root:$dst),
- (match (binop_left_to_zero_frags $dst, $zero, $rhs)),
- (apply (GIReplaceReg $dst, $zero))
->;
-
-def urem_pow2_to_mask : GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_UREM):$root,
- [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]),
- (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
->;
-
-// Push a binary operator through a select on constants.
-//
-// binop (select cond, K0, K1), K2 ->
-// select cond, (binop K0, K2), (binop K1, K2)
-
-// Every binary operator that has constant folding. We currently do
-// not have constant folding for G_FPOW, G_FMAXNUM_IEEE or
-// G_FMINNUM_IEEE.
-def fold_binop_into_select : GICombineRule<
- (defs root:$root, unsigned_matchinfo:$select_op_no),
- (match (wip_match_opcode
- G_ADD, G_SUB, G_PTR_ADD, G_AND, G_OR, G_XOR,
- G_SDIV, G_SREM, G_UDIV, G_UREM, G_LSHR, G_ASHR, G_SHL,
- G_SMIN, G_SMAX, G_UMIN, G_UMAX,
- G_FMUL, G_FADD, G_FSUB, G_FDIV, G_FREM,
- G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
- [{ return Helper.matchFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]),
- (apply [{ Helper.applyFoldBinOpIntoSelect(*${root}, ${select_op_no}); }])
->;
-
-// Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
-def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
-def div_rem_to_divrem : GICombineRule<
- (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo),
- (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
- [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }])
->;
-
-// Fold (x op 0) - > 0
-def binop_right_to_zero: GICombineRule<
- (defs root:$dst),
- (match (G_MUL $dst, $lhs, 0:$zero)),
- (apply (GIReplaceReg $dst, $zero))
->;
-
-// Erase stores of undef values.
-def erase_undef_store : GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_STORE):$root,
- [{ return Helper.matchUndefStore(*${root}); }]),
- (apply [{ Helper.eraseInst(*${root}); }])
->;
-
-def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
-def simplify_add_to_sub: GICombineRule <
- (defs root:$root, simplify_add_to_sub_matchinfo:$info),
- (match (wip_match_opcode G_ADD):$root,
- [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]),
- (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}])
->;
-
-// Fold fp_op(cst) to the constant result of the floating point operation.
-class constant_fold_unary_fp_op_rule<Instruction opcode> : GICombineRule <
- (defs root:$dst),
- (match (opcode $dst, $src0):$root, (G_FCONSTANT $src0, $cst)),
- (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${cst}.getFPImm()); }])
->;
-
-def constant_fold_fneg : constant_fold_unary_fp_op_rule<G_FNEG>;
-def constant_fold_fabs : constant_fold_unary_fp_op_rule<G_FABS>;
-def constant_fold_fsqrt : constant_fold_unary_fp_op_rule<G_FSQRT>;
-def constant_fold_flog2 : constant_fold_unary_fp_op_rule<G_FLOG2>;
-def constant_fold_fptrunc : constant_fold_unary_fp_op_rule<G_FPTRUNC>;
-def constant_fold_fpext : constant_fold_unary_fp_op_rule<G_FPEXT>;
-def constant_fold_fceil : constant_fold_unary_fp_op_rule<G_FCEIL>;
-def constant_fold_ffloor : constant_fold_unary_fp_op_rule<G_FFLOOR>;
-def constant_fold_intrinsic_trunc : constant_fold_unary_fp_op_rule<G_INTRINSIC_TRUNC>;
-def constant_fold_intrinsic_round : constant_fold_unary_fp_op_rule<G_INTRINSIC_ROUND>;
-def constant_fold_intrinsic_roundeven : constant_fold_unary_fp_op_rule<G_INTRINSIC_ROUNDEVEN>;
-def constant_fold_frint : constant_fold_unary_fp_op_rule<G_FRINT>;
-def constant_fold_fnearbyint : constant_fold_unary_fp_op_rule<G_FNEARBYINT>;
-
-// Fold constant zero int to fp conversions.
-class itof_const_zero_fold_rule<Instruction opcode> : GICombineRule <
- (defs root:$dst),
- (match (opcode $dst, 0)),
- // Can't use COPY $dst, 0 here because the 0 operand may be a smaller type
- // than the destination for itofp.
- (apply [{ Helper.replaceInstWithFConstant(*${dst}.getParent(), 0.0); }])
->;
-def itof_const_zero_fold_si : itof_const_zero_fold_rule<G_SITOFP>;
-def itof_const_zero_fold_ui : itof_const_zero_fold_rule<G_UITOFP>;
-
-def constant_fold_fp_ops : GICombineGroup<[
- constant_fold_fneg,
- constant_fold_fabs,
- constant_fold_fsqrt,
- constant_fold_flog2,
- constant_fold_fptrunc,
- constant_fold_fpext,
- constant_fold_fceil,
- constant_fold_ffloor,
- constant_fold_intrinsic_trunc,
- constant_fold_intrinsic_round,
- constant_fold_intrinsic_roundeven,
- constant_fold_frint,
- constant_fold_fnearbyint,
- itof_const_zero_fold_si,
- itof_const_zero_fold_ui
-]>;
-
-// Fold int2ptr(ptr2int(x)) -> x
-def p2i_to_i2p: GICombineRule<
- (defs root:$root, register_matchinfo:$info),
- (match (wip_match_opcode G_INTTOPTR):$root,
- [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]),
- (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }])
->;
-
-// Fold ptr2int(int2ptr(x)) -> x
-def i2p_to_p2i: GICombineRule<
- (defs root:$dst, register_matchinfo:$info),
- (match (G_INTTOPTR $t, $ptr),
- (G_PTRTOINT $dst, $t):$mi,
- [{ ${info} = ${ptr}.getReg(); return true; }]),
- (apply [{ Helper.applyCombineP2IToI2P(*${mi}, ${info}); }])
->;
-
-// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y
-def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
-def add_p2i_to_ptradd : GICombineRule<
- (defs root:$root, add_p2i_to_ptradd_matchinfo:$info),
- (match (wip_match_opcode G_ADD):$root,
- [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]),
- (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }])
->;
-
-// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
-def const_ptradd_to_i2p: GICombineRule<
- (defs root:$root, apint_matchinfo:$info),
- (match (wip_match_opcode G_PTR_ADD):$root,
- [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]),
- (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }])
->;
-
-// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
-def hoist_logic_op_with_same_opcode_hands: GICombineRule <
- (defs root:$root, instruction_steps_matchdata:$info),
- (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
- [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}])
->;
-
-// Fold ashr (shl x, C), C -> sext_inreg (C)
-def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">;
-def shl_ashr_to_sext_inreg : GICombineRule<
- (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info),
- (match (wip_match_opcode G_ASHR): $root,
- [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]),
- (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}])
->;
-
-// Fold sub 0, (and x, 1) -> sext_inreg x, 1
-def neg_and_one_to_sext_inreg : GICombineRule<
- (defs root:$dst),
- (match (G_AND $and, $x, 1),
- (G_SUB $dst, 0, $and),
- [{ return MRI.hasOneNonDBGUse(${and}.getReg()) &&
- Helper.isLegalOrBeforeLegalizer(
- {TargetOpcode::G_SEXT_INREG, {MRI.getType(${x}.getReg())}}); }]),
- (apply (G_SEXT_INREG $dst, $x, 1))
->;
-
-// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
-def overlapping_and: GICombineRule <
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_AND):$root,
- [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
->;
-
-// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y.
-def redundant_and: GICombineRule <
- (defs root:$root, register_matchinfo:$matchinfo),
- (match (wip_match_opcode G_AND):$root,
- [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
->;
-
-// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
-def redundant_or: GICombineRule <
- (defs root:$root, register_matchinfo:$matchinfo),
- (match (wip_match_opcode G_OR):$root,
- [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
->;
-
-// If the input is already sign extended, just drop the extension.
-// sext_inreg x, K ->
-// if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
-def redundant_sext_inreg: GICombineRule <
- (defs root:$root),
- (match (wip_match_opcode G_SEXT_INREG):$root,
- [{ return Helper.matchRedundantSExtInReg(*${root}); }]),
- (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
->;
-
-// Fold (anyext (trunc x)) -> x if the source type is same as
-// the destination type.
-def anyext_trunc_fold: GICombineRule <
- (defs root:$root, register_matchinfo:$matchinfo),
- (match (wip_match_opcode G_ANYEXT):$root,
- [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
->;
-
-// Fold (zext (trunc x)) -> x if the source type is same as the destination type
-// and truncated bits are known to be zero.
-def zext_trunc_fold: GICombineRule <
- (defs root:$root, register_matchinfo:$matchinfo),
- (match (wip_match_opcode G_ZEXT):$root,
- [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
->;
-
-def not_cmp_fold : GICombineRule<
- (defs root:$d, register_vector_matchinfo:$info),
- (match (wip_match_opcode G_XOR): $d,
- [{ return Helper.matchNotCmp(*${d}, ${info}); }]),
- (apply [{ Helper.applyNotCmp(*${d}, ${info}); }])
->;
-
-// Fold (fneg (fneg x)) -> x.
-def fneg_fneg_fold: GICombineRule <
- (defs root:$dst),
- (match (G_FNEG $t, $src),
- (G_FNEG $dst, $t)),
- (apply (GIReplaceReg $dst, $src))
->;
-
-// Fold (unmerge(merge x, y, z)) -> z, y, z.
-def unmerge_merge : GICombineRule<
- (defs root:$d, register_vector_matchinfo:$info),
- (match (wip_match_opcode G_UNMERGE_VALUES): $d,
- [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]),
- (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }])
->;
-
-// Fold merge(unmerge).
-def merge_unmerge : GICombineRule<
- (defs root:$dst, register_matchinfo:$src),
- (match (G_MERGE_VALUES $dst, GIVariadic<1>:$merge_srcs):$merge,
- [{
- // Check if first source comes from G_UNMERGE_VALUES.
- Register FirstMergeSrc = ${merge_srcs}[0].getReg();
- MachineInstr *UnmergeMI = MRI.getVRegDef(FirstMergeSrc);
- if (!UnmergeMI || UnmergeMI->getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
- return false;
-
- // Check counts match.
- unsigned NumMergeSrcs = ${merge_srcs}.size();
- unsigned NumUnmergeDefs = UnmergeMI->getNumDefs();
- if (NumMergeSrcs != NumUnmergeDefs)
- return false;
-
- // Verify all merge sources match unmerge defs in order.
- for (unsigned I = 0; I < NumMergeSrcs; ++I) {
- Register MergeSrc = ${merge_srcs}[I].getReg();
- Register UnmergeDef = UnmergeMI->getOperand(I).getReg();
-
- if (MergeSrc != UnmergeDef)
- return false;
-
- if (!MRI.hasOneNonDBGUse(MergeSrc))
- return false;
- }
-
- // Check size compatibility.
- ${src} = UnmergeMI->getOperand(NumUnmergeDefs).getReg();
- LLT SrcTy = MRI.getType(${src});
- LLT DstTy = MRI.getType(${dst}.getReg());
- if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
- return false;
-
- // Check bitcast legality.
- if (SrcTy != DstTy) {
- std::array<LLT, 2> Types = {{DstTy, SrcTy}};
- LegalityQuery Query(TargetOpcode::G_BITCAST, Types);
- if (!Helper.isLegalOrBeforeLegalizer(Query))
- return false;
- }
-
- return true;
- }]),
- (apply [{
- LLT SrcTy = MRI.getType(${src});
- LLT DstTy = MRI.getType(${dst}.getReg());
-
- Helper.getBuilder().setInstrAndDebugLoc(*${merge});
-
- if (SrcTy == DstTy) {
- Helper.replaceRegWith(MRI, ${dst}.getReg(), ${src});
- } else {
- Helper.getBuilder().buildBitcast(${dst}.getReg(), ${src});
- }
-
- ${merge}->eraseFromParent();
- }])
->;
-
-// Fold (fabs (fneg x)) -> (fabs x).
-def fabs_fneg_fold: GICombineRule <
- (defs root:$dst),
- (match (G_FNEG $tmp, $x),
- (G_FABS $dst, $tmp)),
- (apply (G_FABS $dst, $x))>;
-
-// Fold (unmerge cst) -> cst1, cst2, ...
-def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">;
-def unmerge_cst : GICombineRule<
- (defs root:$d, unmerge_cst_matchinfo:$info),
- (match (wip_match_opcode G_UNMERGE_VALUES): $d,
- [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]),
- (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
->;
-
-// Fold (unmerge undef) -> undef, undef, ...
-def unmerge_undef : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_UNMERGE_VALUES): $root,
- [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
->;
-
-// Transform x,y<dead> = unmerge z -> x = trunc z.
-def unmerge_dead_to_trunc : GICombineRule<
- (defs root:$d),
- (match (wip_match_opcode G_UNMERGE_VALUES): $d,
- [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]),
- (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }])
->;
-
-// Transform unmerge any build vector -> build vector anyext
-def unmerge_anyext_build_vector : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_UNMERGE_VALUES): $root,
- [{ return Helper.matchUnmergeValuesAnyExtBuildVector(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
->;
-
-// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0.
-def unmerge_zext_to_zext : GICombineRule<
- (defs root:$d),
- (match (wip_match_opcode G_UNMERGE_VALUES): $d,
- [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]),
- (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }])
->;
-
-/// Transform merge_x_undef -> anyext.
-def merge_of_x_and_undef : GICombineRule <
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_IMPLICIT_DEF $undef),
- (G_MERGE_VALUES $root, $x, $undef):$MI,
- [{ return Helper.matchMergeXAndUndef(*${MI}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${MI}, ${matchinfo}); }])>;
-
-/// Transform merge_x_zero -> zext.
-def merge_of_x_and_zero : GICombineRule <
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_CONSTANT $zero, 0),
- (G_MERGE_VALUES $root, $x, $zero):$MI,
- [{ return Helper.matchMergeXAndZero(*${MI}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${MI}, ${matchinfo}); }])>;
-
-// Transform build_vector(unmerge(src, 0), ... unmerge(src, n), undef, ..., undef)
-// => concat_vectors(src, undef)
-def combine_build_unmerge : GICombineRule<
- (defs root:$root, register_matchinfo:$unmergeSrc),
- (match (G_BUILD_VECTOR $dst, GIVariadic<>:$unused):$root,
- [{ return Helper.matchCombineBuildUnmerge(*${root}, MRI, ${unmergeSrc}); }]),
- (apply [{ Helper.applyCombineBuildUnmerge(*${root}, MRI, B, ${unmergeSrc}); }])
->;
-
-def merge_combines: GICombineGroup<[
- unmerge_anyext_build_vector,
- unmerge_merge,
- merge_unmerge,
- unmerge_cst,
- unmerge_undef,
- unmerge_dead_to_trunc,
- unmerge_zext_to_zext,
- merge_of_x_and_undef,
- merge_of_x_and_zero,
- combine_build_unmerge
-]>;
-
-// Under certain conditions, transform:
-// trunc (shl x, K) -> shl (trunc x), K//
-// trunc ([al]shr x, K) -> (trunc ([al]shr (trunc x), K))
-def trunc_shift_matchinfo : GIDefMatchData<"std::pair<MachineInstr*, LLT>">;
-def trunc_shift: GICombineRule <
- (defs root:$root, trunc_shift_matchinfo:$matchinfo),
- (match (wip_match_opcode G_TRUNC):$root,
- [{ return Helper.matchCombineTruncOfShift(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyCombineTruncOfShift(*${root}, ${matchinfo}); }])
->;
-
-// Transform (mul x, -1) -> (sub 0, x)
-def mul_by_neg_one: GICombineRule <
- (defs root:$dst),
- (match (G_MUL $dst, $x, -1)),
- (apply (G_SUB $dst, 0, $x))
->;
-
-// Fold (xor (and x, y), y) -> (and (not x), y)
-def xor_of_and_with_same_reg_matchinfo :
- GIDefMatchData<"std::pair<Register, Register>">;
-def xor_of_and_with_same_reg: GICombineRule <
- (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo),
- (match (wip_match_opcode G_XOR):$root,
- [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
->;
-
-// Transform (ptr_add 0, x) -> (int_to_ptr x)
-def ptr_add_with_zero: GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_PTR_ADD):$root,
- [{ return Helper.matchPtrAddZero(*${root}); }]),
- (apply [{ Helper.applyPtrAddZero(*${root}); }])>;
-
-def combine_insert_vec_elts_build_vector : GICombineRule<
- (defs root:$root, register_vector_matchinfo:$info),
- (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
- [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]),
- (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>;
-
-def load_or_combine : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_OR):$root,
- [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">;
-def extend_through_phis : GICombineRule<
- (defs root:$root, extend_through_phis_matchdata:$matchinfo),
- (match (wip_match_opcode G_PHI):$root,
- [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>;
-
-// Currently only the one combine above.
-def insert_vec_elt_combines : GICombineGroup<
- [combine_insert_vec_elts_build_vector]>;
-
-def extract_vec_elt_build_vec : GICombineRule<
- (defs root:$root, register_matchinfo:$matchinfo),
- (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
- [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>;
-
-// Fold away full elt extracts from a build_vector.
-def extract_all_elts_from_build_vector_matchinfo :
- GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">;
-def extract_all_elts_from_build_vector : GICombineRule<
- (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo),
- (match (wip_match_opcode G_BUILD_VECTOR):$root,
- [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>;
-
-def extract_vec_elt_combines : GICombineGroup<[
- extract_vec_elt_build_vec,
- extract_all_elts_from_build_vector]>;
-
-def funnel_shift_from_or_shift : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_OR):$root,
- [{ return Helper.matchOrShiftToFunnelShift(*${root}, false, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
->;
-
-def funnel_shift_from_or_shift_constants_are_legal : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_OR):$root,
- [{ return Helper.matchOrShiftToFunnelShift(*${root}, true, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
->;
-
-
-def funnel_shift_to_rotate : GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_FSHL, G_FSHR):$root,
- [{ return Helper.matchFunnelShiftToRotate(*${root}); }]),
- (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }])
->;
-
-// Fold fshr x, y, 0 -> y
-def funnel_shift_right_zero: GICombineRule<
- (defs root:$root),
- (match (G_FSHR $x, $y, $z, 0):$root),
- (apply (COPY $x, $z))
->;
-
-// Fold fshl x, y, 0 -> x
-def funnel_shift_left_zero: GICombineRule<
- (defs root:$root),
- (match (G_FSHL $x, $y, $z, 0):$root),
- (apply (COPY $x, $y))
->;
-
-// Fold fsh(l/r) x, y, C -> fsh(l/r) x, y, C % bw
-def funnel_shift_overshift: GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_FSHL, G_FSHR):$root,
- [{ return Helper.matchConstantLargerBitWidth(*${root}, 3); }]),
- (apply [{ Helper.applyFunnelShiftConstantModulo(*${root}); }])
->;
-
-// Transform: fshl x, z, y | shl x, y -> fshl x, z, y
-// Transform: shl x, y | fshl x, z, y -> fshl x, z, y
-// FIXME: TableGen didn't handle G_OR commutativity on its own,
-// necessitating the use of !foreach to handle it manually.
-def funnel_shift_or_shift_to_funnel_shift_left_frags : GICombinePatFrag<
- (outs root: $dst, $out1, $out2), (ins),
- !foreach(inst, [(G_OR $dst, $out1, $out2), (G_OR $dst, $out2, $out1)],
- (pattern (G_FSHL $out1, $x, $z, $y), (G_SHL $out2, $x, $y), inst))>;
-def funnel_shift_or_shift_to_funnel_shift_left: GICombineRule<
- (defs root:$root),
- (match (funnel_shift_or_shift_to_funnel_shift_left_frags $root, $out1, $out2)),
- (apply (GIReplaceReg $root, $out1))
->;
-
-// Transform: fshr z, x, y | srl x, y -> fshr z, x, y
-// Transform: srl x, y | fshr z, x, y -> fshr z, x, y
-// FIXME: TableGen didn't handle G_OR commutativity on its own,
-// necessitating the use of !foreach to handle it manually.
-def funnel_shift_or_shift_to_funnel_shift_right_frags : GICombinePatFrag<
- (outs root: $dst, $out1, $out2), (ins),
- !foreach(inst, [(G_OR $dst, $out1, $out2), (G_OR $dst, $out2, $out1)],
- (pattern (G_FSHR $out1, $z, $x, $y), (G_LSHR $out2, $x, $y), inst))>;
-def funnel_shift_or_shift_to_funnel_shift_right: GICombineRule<
- (defs root:$root),
- (match (funnel_shift_or_shift_to_funnel_shift_right_frags $root, $out1, $out2)),
- (apply (GIReplaceReg $root, $out1))
->;
-
-def rotate_out_of_range : GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_ROTR, G_ROTL):$root,
- [{ return Helper.matchRotateOutOfRange(*${root}); }]),
- (apply [{ Helper.applyRotateOutOfRange(*${root}); }])
->;
-
-def icmp_to_true_false_known_bits : GICombineRule<
- (defs root:$d, int64_matchinfo:$matchinfo),
- (match (wip_match_opcode G_ICMP):$d,
- [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]),
- (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
-
-def icmp_to_lhs_known_bits : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_ICMP):$root,
- [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-def redundant_binop_in_equality : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_ICMP):$root,
- [{ return Helper.matchRedundantBinOpInEquality(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-// Transform: (X == 0 & Y == 0) -> (X | Y) == 0
-def double_icmp_zero_and_combine: GICombineRule<
- (defs root:$root),
- (match (G_ICMP $d1, $p, $s1, 0),
- (G_ICMP $d2, $p, $s2, 0),
- (G_AND $root, $d1, $d2),
- [{ return ${p}.getPredicate() == CmpInst::ICMP_EQ &&
- !MRI.getType(${s1}.getReg()).getScalarType().isPointer() &&
- (MRI.getType(${s1}.getReg()) ==
- MRI.getType(${s2}.getReg())); }]),
- (apply (G_OR $ordst, $s1, $s2),
- (G_ICMP $root, $p, $ordst, 0))
->;
-
-// Transform: (X != 0 | Y != 0) -> (X | Y) != 0
-def double_icmp_zero_or_combine: GICombineRule<
- (defs root:$root),
- (match (G_ICMP $d1, $p, $s1, 0),
- (G_ICMP $d2, $p, $s2, 0),
- (G_OR $root, $d1, $d2),
- [{ return ${p}.getPredicate() == CmpInst::ICMP_NE &&
- !MRI.getType(${s1}.getReg()).getScalarType().isPointer() &&
- (MRI.getType(${s1}.getReg()) ==
- MRI.getType(${s2}.getReg())); }]),
- (apply (G_OR $ordst, $s1, $s2),
- (G_ICMP $root, $p, $ordst, 0))
->;
-
-def and_or_disjoint_mask : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_AND):$root,
- [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>;
-
-def bitfield_extract_from_and : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (G_CONSTANT $mask, $imm2),
- (G_CONSTANT $lsb, $imm1),
- (G_LSHR $shift, $x, $lsb),
- (G_AND $root, $shift, $mask):$root,
- [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift,
- funnel_shift_to_rotate,
- funnel_shift_right_zero,
- funnel_shift_left_zero,
- funnel_shift_overshift,
- funnel_shift_or_shift_to_funnel_shift_left,
- funnel_shift_or_shift_to_funnel_shift_right]>;
-
-def bitfield_extract_from_sext_inreg : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_SEXT_INREG):$root,
- [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-def bitfield_extract_from_shr : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_ASHR, G_LSHR):$root,
- [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-def bitfield_extract_from_shr_and : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_ASHR, G_LSHR):$root,
- [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg,
- bitfield_extract_from_and,
- bitfield_extract_from_shr,
- bitfield_extract_from_shr_and]>;
-
-def udiv_by_const : GICombineRule<
- (defs root:$root),
- (match (G_UDIV $dst, $x, $y):$root,
- [{ return Helper.matchUDivOrURemByConst(*${root}); }]),
- (apply [{ Helper.applyUDivOrURemByConst(*${root}); }])>;
-
-def sdiv_by_const : GICombineRule<
- (defs root:$root),
- (match (G_SDIV $dst, $x, $y):$root,
- [{ return Helper.matchSDivOrSRemByConst(*${root}); }]),
- (apply [{ Helper.applySDivOrSRemByConst(*${root}); }])>;
-
-def sdiv_by_pow2 : GICombineRule<
- (defs root:$root),
- (match (G_SDIV $dst, $x, $y, (MIFlags (not IsExact))):$root,
- [{ return Helper.matchDivByPow2(*${root}, /*IsSigned=*/true); }]),
- (apply [{ Helper.applySDivByPow2(*${root}); }])>;
-
-def udiv_by_pow2 : GICombineRule<
- (defs root:$root),
- (match (G_UDIV $dst, $x, $y, (MIFlags (not IsExact))):$root,
- [{ return Helper.matchDivByPow2(*${root}, /*IsSigned=*/false); }]),
- (apply [{ Helper.applyUDivByPow2(*${root}); }])>;
-
-def intdiv_combines : GICombineGroup<[udiv_by_pow2, sdiv_by_pow2,
- udiv_by_const, sdiv_by_const,]>;
-
-def urem_by_const : GICombineRule<
- (defs root:$root),
- (match (G_UREM $dst, $x, $y):$root,
- [{ return Helper.matchUDivOrURemByConst(*${root}); }]),
- (apply [{ Helper.applyUDivOrURemByConst(*${root}); }])>;
-
-def srem_by_const : GICombineRule<
- (defs root:$root),
- (match (G_SREM $dst, $x, $y):$root,
- [{ return Helper.matchSDivOrSRemByConst(*${root}); }]),
- (apply [{ Helper.applySDivOrSRemByConst(*${root}); }])>;
-
-def intrem_combines : GICombineGroup<[urem_by_const, srem_by_const]>;
-
-def reassoc_ptradd : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_PTR_ADD):$root,
- [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
-
-def reassoc_comm_binops : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_ADD $root, $src1, $src2):$root,
- [{ return Helper.matchReassocCommBinOp(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def reassocs : GICombineGroup<[reassoc_ptradd, reassoc_comm_binops]>;
-
-// Constant fold operations.
-def constant_fold_binop : GICombineRule<
- (defs root:$d, apint_matchinfo:$matchinfo),
- (match (wip_match_opcode G_ADD, G_PTR_ADD, G_AND, G_ASHR, G_LSHR, G_MUL, G_OR,
- G_SHL, G_SUB, G_XOR, G_UDIV, G_SDIV, G_UREM, G_SREM,
- G_SMIN, G_SMAX, G_UMIN, G_UMAX):$d,
- [{ return Helper.matchConstantFoldBinOp(*${d}, ${matchinfo}); }]),
- (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
-
-def constant_fold_fp_binop : GICombineRule<
- (defs root:$d, constantfp_matchinfo:$matchinfo),
- (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV):$d,
- [{ return Helper.matchConstantFoldFPBinOp(*${d}, ${matchinfo}); }]),
- (apply [{ Helper.replaceInstWithFConstant(*${d}, ${matchinfo}); }])>;
-
-
-def constant_fold_fma : GICombineRule<
- (defs root:$d, constantfp_matchinfo:$matchinfo),
- (match (wip_match_opcode G_FMAD, G_FMA):$d,
- [{ return Helper.matchConstantFoldFMA(*${d}, ${matchinfo}); }]),
- (apply [{ Helper.replaceInstWithFConstant(*${d}, ${matchinfo}); }])>;
-
-def constant_fold_cast_op : GICombineRule<
- (defs root:$d, apint_matchinfo:$matchinfo),
- (match (wip_match_opcode G_ZEXT, G_SEXT, G_ANYEXT):$d,
- [{ return Helper.matchConstantFoldCastOp(*${d}, ${matchinfo}); }]),
- (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
-
-def mulo_by_2: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_UMULO, G_SMULO):$root,
- [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
-
-def mulo_by_0: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_UMULO, G_SMULO):$root,
- [{ return Helper.matchMulOBy0(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-// Transform (uadde x, y, 0) -> (uaddo x, y)
-// (sadde x, y, 0) -> (saddo x, y)
-// (usube x, y, 0) -> (usubo x, y)
-// (ssube x, y, 0) -> (ssubo x, y)
-def adde_to_addo: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_UADDE, G_SADDE, G_USUBE, G_SSUBE):$root,
- [{ return Helper.matchAddEToAddO(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
-
-def mulh_to_lshr : GICombineRule<
- (defs root:$root),
- (match (wip_match_opcode G_UMULH):$root,
- [{ return Helper.matchUMulHToLShr(*${root}); }]),
- (apply [{ Helper.applyUMulHToLShr(*${root}); }])>;
-
-def mulh_combines : GICombineGroup<[mulh_to_lshr]>;
-
-def trunc_ssats : GICombineRule<
- (defs root:$root, register_matchinfo:$matchinfo),
- (match (G_TRUNC $dst, $src):$root,
- [{ return Helper.matchTruncSSatS(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyTruncSSatS(*${root}, ${matchinfo}); }])>;
-
-def trunc_ssatu : GICombineRule<
- (defs root:$root, register_matchinfo:$matchinfo),
- (match (G_TRUNC $dst, $src):$root,
- [{ return Helper.matchTruncSSatU(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyTruncSSatU(*${root}, ${matchinfo}); }])>;
-
-def trunc_usatu : GICombineRule<
- (defs root:$root),
- (match (G_UMIN $min, $x, $y):$Min,
- (G_TRUNC $dst, $min):$root,
- [{ return Helper.matchTruncUSatU(*${root}, *${Min}); }]),
- (apply (G_TRUNC_USAT_U $dst, $x))>;
-
-def truncusatu_to_fptouisat : GICombineRule<
- (defs root:$root),
- (match (G_FPTOUI $src, $x):$Src,
- (G_TRUNC_USAT_U $dst, $src):$root,
- [{ return Helper.matchTruncUSatUToFPTOUISat(*${root}, *${Src}); }]),
- (apply (G_FPTOUI_SAT $dst, $x))
->;
-
-def truncsat_combines : GICombineGroup<[trunc_ssats, trunc_ssatu, trunc_usatu, truncusatu_to_fptouisat]>;
-
-def redundant_neg_operands: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root,
- [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
-
-// Transform (fsub +-0.0, X) -> (fneg X)
-def fsub_to_fneg: GICombineRule<
- (defs root:$root, register_matchinfo:$matchinfo),
- (match (wip_match_opcode G_FSUB):$root,
- [{ return Helper.matchFsubToFneg(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyFsubToFneg(*${root}, ${matchinfo}); }])>;
-
-// Transform (fadd x, (fmul y, z)) -> (fma y, z, x)
-// (fadd x, (fmul y, z)) -> (fmad y, z, x)
-// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
-// (fadd (fmul x, y), z) -> (fmad x, y, z)
-def combine_fadd_fmul_to_fmad_or_fma: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_FADD):$root,
- [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root},
- ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
-// -> (fmad (fpext x), (fpext y), z)
-// Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
-// -> (fmad (fpext y), (fpext z), x)
-def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_FADD):$root,
- [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root},
- ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-// Transform (fadd (fma x, y, (fmul z, u)), v) -> (fma x, y, (fma z, u, v))
-// (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v))
-// Transform (fadd v, (fma x, y, (fmul z, u))) -> (fma x, y, (fma z, u, v))
-// (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v))
-def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_FADD):$root,
- [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root},
- ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) ->
-// (fma x, y, (fma (fpext u), (fpext v), z))
-def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_FADD):$root,
- [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
- *${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
-// -> (fmad x, y, -z)
-def combine_fsub_fmul_to_fmad_or_fma: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_FSUB):$root,
- [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root},
- ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
-// (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
-def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_FSUB):$root,
- [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root},
- ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-// Transform (fsub (fpext (fmul x, y)), z) ->
-// (fma (fpext x), (fpext y), (fneg z))
-def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_FSUB):$root,
- [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root},
- ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-// Transform (fsub (fneg (fpext (fmul x, y))), z) ->
-// (fneg (fma (fpext x), (fpext y), z))
-def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_FSUB):$root,
- [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA(
- *${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-def combine_minmax_nan: GICombineRule<
- (defs root:$root, unsigned_matchinfo:$info),
- (match (wip_match_opcode G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
- [{ return Helper.matchCombineFMinMaxNaN(*${root}, ${info}); }]),
- (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${info}); }])>;
-
-// Combine multiple FDIVs with the same divisor into multiple FMULs by the
-// reciprocal.
-def fdiv_repeated_divison: GICombineRule<
- (defs root:$root, mi_vector_matchinfo:$matchinfo),
- (match (G_FDIV $dst, $src1, $src2):$root,
- [{ return Helper.matchRepeatedFPDivisor(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyRepeatedFPDivisor(${matchinfo}); }])>;
-
-// Transform (add x, (sub y, x)) -> y
-// Transform (add (sub y, x), x) -> y
-def add_sub_reg_frags : GICombinePatFrag<
- (outs root:$dst), (ins $src),
- [
- (pattern (G_ADD $dst, $x, $tmp), (G_SUB $tmp, $src, $x)),
- (pattern (G_ADD $dst, $tmp, $x), (G_SUB $tmp, $src, $x))
- ]>;
-def add_sub_reg: GICombineRule <
- (defs root:$dst),
- (match (add_sub_reg_frags $dst, $src)),
- (apply (GIReplaceReg $dst, $src))>;
-
-def buildvector_identity_fold : GICombineRule<
- (defs root:$build_vector, register_matchinfo:$matchinfo),
- (match (wip_match_opcode G_BUILD_VECTOR_TRUNC, G_BUILD_VECTOR):$build_vector,
- [{ return Helper.matchBuildVectorIdentityFold(*${build_vector}, ${matchinfo}); }]),
- (apply [{ Helper.replaceSingleDefInstWithReg(*${build_vector}, ${matchinfo}); }])>;
-
-def trunc_buildvector_fold : GICombineRule<
- (defs root:$op, register_matchinfo:$matchinfo),
- (match (wip_match_opcode G_TRUNC):$op,
- [{ return Helper.matchTruncBuildVectorFold(*${op}, ${matchinfo}); }]),
- (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;
-
-def trunc_lshr_buildvector_fold : GICombineRule<
- (defs root:$op, register_matchinfo:$matchinfo),
- (match (wip_match_opcode G_TRUNC):$op,
- [{ return Helper.matchTruncLshrBuildVectorFold(*${op}, ${matchinfo}); }]),
- (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;
-
-// Transform:
-// (x + y) - y -> x
-// (x + y) - x -> y
-// x - (y + x) -> 0 - y
-// x - (x + z) -> 0 - z
-def sub_add_reg: GICombineRule <
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_SUB):$root,
- [{ return Helper.matchSubAddSameReg(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def bitcast_bitcast_fold : GICombineRule<
- (defs root:$dst),
- (match (G_BITCAST $dst, $src1):$op, (G_BITCAST $src1, $src0),
- [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]),
- (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>;
-
-
-def fptrunc_fpext_fold : GICombineRule<
- (defs root:$dst),
- (match (G_FPTRUNC $dst, $src1):$op, (G_FPEXT $src1, $src0),
- [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]),
- (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>;
-
-
-def select_to_minmax: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (wip_match_opcode G_SELECT):$root,
- [{ return Helper.matchSimplifySelectToMinMax(*${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
-
-def select_to_iminmax: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$info),
- (match (G_ICMP $tst, $tst1, $a, $b),
- (G_SELECT $root, $tst, $x, $y),
- [{ return Helper.matchSelectIMinMax(${root}, ${info}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${info}); }])>;
-
-def simplify_neg_minmax : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_SUB):$root,
- [{ return Helper.matchSimplifyNegMinMax(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def match_selects : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_SELECT):$root,
- [{ return Helper.matchSelect(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def match_ands : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_AND):$root,
- [{ return Helper.matchAnd(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def match_ors : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_OR):$root,
- [{ return Helper.matchOr(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def match_addos : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_SADDO, G_UADDO):$root,
- [{ return Helper.matchAddOverflow(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def match_subo_no_overflow : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_SSUBO, G_USUBO):$root,
- [{ return Helper.matchSuboCarryOut(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def match_extract_of_element_undef_vector: GICombineRule <
- (defs root:$root),
- (match (G_IMPLICIT_DEF $vector),
- (G_EXTRACT_VECTOR_ELT $root, $vector, $idx)),
- (apply (G_IMPLICIT_DEF $root))
->;
-
-def match_extract_of_element_undef_index: GICombineRule <
- (defs root:$root),
- (match (G_IMPLICIT_DEF $idx),
- (G_EXTRACT_VECTOR_ELT $root, $vector, $idx)),
- (apply (G_IMPLICIT_DEF $root))
->;
-
-def match_extract_of_element : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
- [{ return Helper.matchExtractVectorElement(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def extract_vector_element_not_const : GICombineRule<
- (defs root:$root),
- (match (G_INSERT_VECTOR_ELT $src, $x, $value, $idx),
- (G_EXTRACT_VECTOR_ELT $root, $src, $idx)),
- (apply (GIReplaceReg $root, $value))>;
-
-def extract_vector_element_different_indices : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_INSERT_VECTOR_ELT $src, $x, $value, $idx2),
- (G_EXTRACT_VECTOR_ELT $root, $src, $idx1),
- [{ return Helper.matchExtractVectorElementWithDifferentIndices(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def extract_vector_element_build_vector : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_CONSTANT $idx, $imm),
- (G_BUILD_VECTOR $src, GIVariadic<>:$unused):$Build,
- (G_EXTRACT_VECTOR_ELT $root, $src, $idx):$Extract,
- [{ return Helper.matchExtractVectorElementWithBuildVector(*${Extract}, *${Build},
- ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${Extract}, ${matchinfo}); }])>;
-
-def extract_vector_element_shuffle_vector : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_CONSTANT $idx, $imm),
- (G_SHUFFLE_VECTOR $src, $src1, $src2, $mask):$Shuffle,
- (G_EXTRACT_VECTOR_ELT $root, $src, $idx):$Extract,
- [{ return Helper.matchExtractVectorElementWithShuffleVector(*${Extract}, *${Shuffle},
- ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${Extract}, ${matchinfo}); }])>;
-
-def extract_vector_element_build_vector_trunc2 : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_BUILD_VECTOR_TRUNC $src, $x, $y),
- (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
- [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def extract_vector_element_build_vector_trunc3 : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z),
- (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
- [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def extract_vector_element_build_vector_trunc4 : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z, $a),
- (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
- [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def extract_vector_element_build_vector_trunc5 : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z, $a, $b),
- (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
- [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def extract_vector_element_build_vector_trunc6 : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z, $a, $b, $c),
- (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
- [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def extract_vector_element_build_vector_trunc7 : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z, $a, $b, $c, $d),
- (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
- [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def extract_vector_element_build_vector_trunc8 : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z, $a, $b, $c, $d, $e),
- (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
- [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def sext_trunc : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_TRUNC $src, $x),
- (G_SEXT $root, $src),
- [{ return Helper.matchSextOfTrunc(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def zext_trunc : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_TRUNC $src, $x, (MIFlags NoUWrap)),
- (G_ZEXT $root, $src),
- [{ return Helper.matchZextOfTrunc(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def nneg_zext : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_ZEXT $root, $x, (MIFlags NonNeg)),
- [{ return Helper.matchNonNegZext(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-// Combines concat operations
-def combine_concat_vector : GICombineRule<
- (defs root:$root, register_vector_matchinfo:$matchinfo),
- (match (wip_match_opcode G_CONCAT_VECTORS):$root,
- [{ return Helper.matchCombineConcatVectors(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyCombineConcatVectors(*${root}, ${matchinfo}); }])>;
-
-// Combines Shuffles of Concats
-// a = G_CONCAT_VECTORS x, y, undef, undef
-// b = G_CONCAT_VECTORS z, undef, undef, undef
-// c = G_SHUFFLE_VECTORS a, b, <0, 1, 4, undef>
-// ===>
-// c = G_CONCAT_VECTORS x, y, z, undef
-def combine_shuffle_concat : GICombineRule<
- (defs root:$root, register_vector_matchinfo:$matchinfo),
- (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
- [{ return Helper.matchCombineShuffleConcat(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyCombineShuffleConcat(*${root}, ${matchinfo}); }])>;
-
-// Combines shuffles of vector into build_vector
-def combine_shuffle_vector_to_build_vector : GICombineRule<
- (defs root:$root),
- (match (G_SHUFFLE_VECTOR $dst, $src1, $src2, $mask):$root),
- (apply [{ Helper.applyCombineShuffleToBuildVector(*${root}); }])>;
-
-def insert_vector_element_idx_undef : GICombineRule<
- (defs root:$root),
- (match (G_IMPLICIT_DEF $idx),
- (G_INSERT_VECTOR_ELT $root, $src, $elt, $idx)),
- (apply (G_IMPLICIT_DEF $root))>;
-
-def insert_vector_element_elt_undef : GICombineRule<
- (defs root:$root),
- (match (G_IMPLICIT_DEF $elt),
- (G_INSERT_VECTOR_ELT $root, $src, $elt, $idx),
- [{ return isGuaranteedNotToBePoison(${src}.getReg(), MRI); }]),
- (apply (GIReplaceReg $root, $src))>;
-
-def insert_vector_element_extract_vector_element : GICombineRule<
- (defs root:$root),
- (match (G_EXTRACT_VECTOR_ELT $elt, $src, $idx),
- (G_INSERT_VECTOR_ELT $root, $src, $elt, $idx)),
- (apply (GIReplaceReg $root, $src))>;
-
-def insert_vector_elt_oob : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
- [{ return Helper.matchInsertVectorElementOOB(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-// Combine v8i8 (buildvector i8 (trunc(unmerge)), i8 (trunc), i8 (trunc), i8 (trunc), undef, undef, undef, undef)
-def combine_use_vector_truncate : GICombineRule<
- (defs root:$root, register_matchinfo:$matchinfo),
- (match (G_BUILD_VECTOR $dst, GIVariadic<>:$unused):$root,
- [{ return Helper.matchUseVectorTruncate(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyUseVectorTruncate(*${root}, ${matchinfo}); }])>;
-
-def add_of_vscale : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_VSCALE $left, $imm1),
- (G_VSCALE $right, $imm2),
- (G_ADD $root, $left, $right, (MIFlags NoSWrap)),
- [{ return Helper.matchAddOfVScale(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def mul_of_vscale : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_VSCALE $left, $scale),
- (G_CONSTANT $x, $imm1),
- (G_MUL $root, $left, $x, (MIFlags NoSWrap)),
- [{ return Helper.matchMulOfVScale(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def shl_of_vscale : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_VSCALE $left, $imm),
- (G_CONSTANT $x, $imm1),
- (G_SHL $root, $left, $x, (MIFlags NoSWrap)),
- [{ return Helper.matchShlOfVScale(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def sub_of_vscale : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_VSCALE $right, $imm),
- (G_SUB $root, $x, $right, (MIFlags NoSWrap)),
- [{ return Helper.matchSubOfVScale(${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
-
-def expand_const_fpowi : GICombineRule<
- (defs root:$root),
- (match (G_CONSTANT $int, $imm),
- (G_FPOWI $dst, $float, $int):$root,
- [{ return Helper.matchFPowIExpansion(*${root}, ${imm}.getCImm()->getSExtValue()); }]),
- (apply [{ Helper.applyExpandFPowI(*${root}, ${imm}.getCImm()->getSExtValue()); }])>;
-
-def combine_shuffle_undef_rhs : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_IMPLICIT_DEF $undef),
- (G_SHUFFLE_VECTOR $root, $src1, $undef, $mask):$root,
- [{ return Helper.matchShuffleUndefRHS(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
->;
-
-def combine_shuffle_disjoint_mask : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
- [{ return Helper.matchShuffleDisjointMask(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
->;
-
-// match_extract_of_element and insert_vector_elt_oob must be the first!
-def vector_ops_combines: GICombineGroup<[
-match_extract_of_element_undef_vector,
-match_extract_of_element_undef_index,
-insert_vector_element_idx_undef,
-insert_vector_element_elt_undef,
-match_extract_of_element,
-insert_vector_elt_oob,
-extract_vector_element_not_const,
-extract_vector_element_different_indices,
-extract_vector_element_build_vector,
-extract_vector_element_build_vector_trunc2,
-extract_vector_element_build_vector_trunc3,
-extract_vector_element_build_vector_trunc4,
-extract_vector_element_build_vector_trunc5,
-extract_vector_element_build_vector_trunc6,
-extract_vector_element_build_vector_trunc7,
-extract_vector_element_build_vector_trunc8,
-extract_vector_element_shuffle_vector,
-insert_vector_element_extract_vector_element,
-add_of_vscale,
-mul_of_vscale,
-shl_of_vscale,
-sub_of_vscale,
-]>;
-
-// fold ((A+(B-C))-B) -> A-C
-def APlusBMinusCMinusB : GICombineRule<
- (defs root:$root),
- (match (G_SUB $sub1, $B, $C),
- (G_ADD $add1, $A, $sub1),
- (G_SUB $root, $add1, $B)),
- (apply (G_SUB $root, $A, $C))>;
-
-// fold ((A+(B+C))-B) -> A+C
-def APlusBPlusCMinusB_frags : GICombinePatFrag<
- (outs root:$root), (ins $x, $y, $n),
- [
- (pattern (G_ADD $add1, $y, $n),
- (G_ADD $add2, $x, $add1),
- (G_SUB $root, $add2, $y),
- [{ return MRI.hasOneNonDBGUse(${add2}.getReg()) &&
- MRI.hasOneNonDBGUse(${add1}.getReg()); }]),
- ]>;
-
-def APlusBPlusCMinusB : GICombineRule<
- (defs root:$root),
- (match (APlusBPlusCMinusB_frags $root, $x, $y, $n)),
- (apply (G_ADD $root, $x, $n))>;
-
-// fold ((A-(B-C))-C) -> A-B
-def AMinusBMinusCMinusC : GICombineRule<
- (defs root:$root),
- (match (G_SUB $sub1, $B, $C),
- (G_SUB $sub2, $A, $sub1),
- (G_SUB $root, $sub2, $C)),
- (apply (G_SUB $root, $A, $B))>;
-
-// fold ((0-A) + B) -> B-A
-def ZeroMinusAPlusB : GICombineRule<
- (defs root:$root),
- (match (G_SUB $sub, 0, $A),
- (G_ADD $root, $sub, $B)),
- (apply (G_SUB $root, $B, $A))>;
-
-// fold (A + (0-B)) -> A-B
-def APlusZeroMinusB : GICombineRule<
- (defs root:$root),
- (match (G_SUB $sub, 0, $B),
- (G_ADD $root, $A, $sub)),
- (apply (G_SUB $root, $A, $B))>;
-
- // fold (A+(B-A)) -> B
- def APlusBMinusB : GICombineRule<
- (defs root:$root),
- (match (G_SUB $sub, $B, $A),
- (G_ADD $root, $A, $sub)),
- (apply (GIReplaceReg $root, $B))>;
-
-// fold ((B-A)+A) -> B
- def BMinusAPlusA : GICombineRule<
- (defs root:$root),
- (match (G_SUB $sub, $B, $A),
- (G_ADD $root, $sub, $A)),
- (apply (GIReplaceReg $root, $B))>;
-
-// fold ((A-B)+(C-A)) -> (C-B)
-def AMinusBPlusCMinusA : GICombineRule<
- (defs root:$root),
- (match (G_SUB $sub1, $A, $B),
- (G_SUB $sub2, $C, $A),
- (G_ADD $root, $sub1, $sub2)),
- (apply (G_SUB $root, $C, $B))>;
-
-// fold ((A-B)+(B-C)) -> (A-C)
-def AMinusBPlusBMinusC : GICombineRule<
- (defs root:$root),
- (match (G_SUB $sub1, $A, $B),
- (G_SUB $sub2, $B, $C),
- (G_ADD $root, $sub1, $sub2)),
- (apply (G_SUB $root, $A, $C))>;
-
-// fold (A+(B-(A+C))) to (B-C)
-def APlusBMinusAplusC : GICombineRule<
- (defs root:$root),
- (match (G_ADD $add1, $A, $C),
- (G_SUB $sub1, $B, $add1),
- (G_ADD $root, $A, $sub1)),
- (apply (G_SUB $root, $B, $C))>;
-
-// fold (A+(B-(C+A))) to (B-C)
-def APlusBMinusCPlusA : GICombineRule<
- (defs root:$root),
- (match (G_ADD $add1, $C, $A),
- (G_SUB $sub1, $B, $add1),
- (G_ADD $root, $A, $sub1)),
- (apply (G_SUB $root, $B, $C))>;
-
-// fold (A+C1)-C2 -> A+(C1-C2)
-def APlusC1MinusC2: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_CONSTANT $c2, $imm2),
- (G_CONSTANT $c1, $imm1),
- (G_ADD $add, $A, $c1),
- (G_SUB $root, $add, $c2):$root,
- [{ return Helper.matchFoldAPlusC1MinusC2(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-// fold C2-(A+C1) -> (C2-C1)-A
-def C2MinusAPlusC1: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_CONSTANT $c2, $imm2),
- (G_CONSTANT $c1, $imm1),
- (G_ADD $add, $A, $c1),
- (G_SUB $root, $c2, $add):$root,
- [{ return Helper.matchFoldC2MinusAPlusC1(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-// fold (A-C1)-C2 -> A-(C1+C2)
-def AMinusC1MinusC2: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_CONSTANT $c2, $imm2),
- (G_CONSTANT $c1, $imm1),
- (G_SUB $sub1, $A, $c1),
- (G_SUB $root, $sub1, $c2):$root,
- [{ return Helper.matchFoldAMinusC1MinusC2(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-// fold (C1-A)-C2 -> (C1-C2)-A
-def C1Minus2MinusC2: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_CONSTANT $c2, $imm2),
- (G_CONSTANT $c1, $imm1),
- (G_SUB $sub1, $c1, $A),
- (G_SUB $root, $sub1, $c2):$root,
- [{ return Helper.matchFoldC1Minus2MinusC2(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-// fold ((A-C1)+C2) -> (A+(C2-C1))
-def AMinusC1PlusC2: GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_CONSTANT $c2, $imm2),
- (G_CONSTANT $c1, $imm1),
- (G_SUB $sub, $A, $c1),
- (G_ADD $root, $sub, $c2):$root,
- [{ return Helper.matchFoldAMinusC1PlusC2(*${root}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def integer_reassoc_combines: GICombineGroup<[
- APlusBMinusCMinusB,
- APlusBPlusCMinusB,
- AMinusBMinusCMinusC,
- ZeroMinusAPlusB,
- APlusZeroMinusB,
- APlusBMinusB,
- BMinusAPlusA,
- AMinusBPlusCMinusA,
- AMinusBPlusBMinusC,
- APlusBMinusAplusC,
- APlusBMinusCPlusA,
- APlusC1MinusC2,
- C2MinusAPlusC1,
- AMinusC1MinusC2,
- C1Minus2MinusC2,
- AMinusC1PlusC2
-]>;
-
-// fold (A+(shl (0-B), C)) -> (A-(shl B, C))
-// fold ((shl (0-B), C)+A) -> (A-(shl B, C))
-def add_shl_neg_frags : GICombinePatFrag<
- (outs root:$dst), (ins $x, $y, $n),
- [
- (pattern (G_CONSTANT $zero, 0),
- (G_SUB $neg_y, $zero, $y),
- (G_SHL $shl_neg, $neg_y, $n),
- (G_ADD $dst, $x, $shl_neg),
- [{ return MRI.hasOneNonDBGUse(${shl_neg}.getReg()) &&
- MRI.hasOneNonDBGUse(${neg_y}.getReg()); }]),
- (pattern (G_CONSTANT $zero, 0),
- (G_SUB $neg_y, $zero, $y),
- (G_SHL $shl_neg, $neg_y, $n),
- (G_ADD $dst, $shl_neg, $x),
- [{ return MRI.hasOneNonDBGUse(${shl_neg}.getReg()) &&
- MRI.hasOneNonDBGUse(${neg_y}.getReg()); }])
- ]>;
-
-def add_shift : GICombineRule<
- (defs root:$dst),
- (match (add_shl_neg_frags $dst, $x, $y, $n)),
- (apply (G_SHL $new_shl, $y, $n),
- (G_SUB $dst, $x, $new_shl))>;
-
-def freeze_of_non_undef_non_poison : GICombineRule<
- (defs root:$root),
- (match (G_FREEZE $root, $src),
- [{ return isGuaranteedNotToBeUndefOrPoison(${src}.getReg(), MRI); }]),
- (apply (GIReplaceReg $root, $src))>;
-
-def freeze_combines: GICombineGroup<[
- freeze_of_non_undef_non_poison,
- push_freeze_to_prevent_poison_from_propagating
-]>;
-
-/// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
-class truncate_of_opcode<Instruction extOpcode> : GICombineRule <
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (extOpcode $ext, $src):$ExtMI,
- (G_TRUNC $root, $ext):$root,
- [{ return Helper.matchTruncateOfExt(*${root}, *${ExtMI}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-def truncate_of_zext : truncate_of_opcode<G_ZEXT>;
-def truncate_of_sext : truncate_of_opcode<G_SEXT>;
-def truncate_of_anyext : truncate_of_opcode<G_ANYEXT>;
-
-// Push cast through select.
-class select_of_opcode<Instruction castOpcode> : GICombineRule <
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_SELECT $select, $cond, $true, $false):$Select,
- (castOpcode $root, $select):$Cast,
- [{ return Helper.matchCastOfSelect(*${Cast}, *${Select}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${Cast}, ${matchinfo}); }])>;
-
-def select_of_zext : select_of_opcode<G_ZEXT>;
-def select_of_anyext : select_of_opcode<G_ANYEXT>;
-def select_of_truncate : select_of_opcode<G_TRUNC>;
-
-// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x).
-class ext_of_ext_opcodes<Instruction ext1Opcode, Instruction ext2Opcode> : GICombineRule <
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (ext2Opcode $second, $src):$Second,
- (ext1Opcode $root, $second):$First,
- [{ return Helper.matchExtOfExt(*${First}, *${Second}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${First}, ${matchinfo}); }])>;
-
-def zext_of_zext : ext_of_ext_opcodes<G_ZEXT, G_ZEXT>;
-def zext_of_anyext : ext_of_ext_opcodes<G_ZEXT, G_ANYEXT>;
-def sext_of_sext : ext_of_ext_opcodes<G_SEXT, G_SEXT>;
-def sext_of_anyext : ext_of_ext_opcodes<G_SEXT, G_ANYEXT>;
-def anyext_of_anyext : ext_of_ext_opcodes<G_ANYEXT, G_ANYEXT>;
-def anyext_of_zext : ext_of_ext_opcodes<G_ANYEXT, G_ZEXT>;
-def anyext_of_sext : ext_of_ext_opcodes<G_ANYEXT, G_SEXT>;
-
-def sext_inreg_of_sext_inreg : GICombineRule<
- (defs root:$dst, build_fn_matchinfo:$matchinfo),
- (match (G_SEXT_INREG $x, $src, $a):$other,
- (G_SEXT_INREG $dst, $x, $b):$root,
- [{ return Helper.matchRedundantSextInReg(*${root}, *${other}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
-
-// Push cast through build vector.
-class buildvector_of_opcode<Instruction castOpcode> : GICombineRule <
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_BUILD_VECTOR $bv, GIVariadic<>:$unused):$Build,
- (castOpcode $root, $bv):$Cast,
- [{ return Helper.matchCastOfBuildVector(*${Cast}, *${Build}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${Cast}, ${matchinfo}); }])>;
-
-def buildvector_of_truncate : buildvector_of_opcode<G_TRUNC>;
-
-// narrow binop.
-// trunc (binop X, C) --> binop (trunc X, trunc C)
-class narrow_binop_opcode<Instruction binopOpcode> : GICombineRule <
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_CONSTANT $const, $imm),
- (binopOpcode $binop, $x, $const):$Binop,
- (G_TRUNC $root, $binop):$Trunc,
- [{ return Helper.matchNarrowBinop(*${Trunc}, *${Binop}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${Trunc}, ${matchinfo}); }])>;
-
-// Fold (ctlz (xor x, (sra x, bitwidth-1))) -> (add (ctls x), 1).
-// Fold (ctlz (or (shl (xor x, (sra x, bitwidth-1)), 1), 1) -> (ctls x)
-class ctlz_to_ctls_op<Instruction ctlzOpcode> : GICombineRule <
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (ctlzOpcode $dst, $src):$root,
- [{ return Helper.matchCtls(*${root}, ${matchinfo}); }]),
- (apply [{Helper.applyBuildFn(*${root}, ${matchinfo});}])>;
-
-def ctlz_to_ctls : ctlz_to_ctls_op<G_CTLZ>;
-def ctlz_zero_undef_to_ctls : ctlz_to_ctls_op<G_CTLZ_ZERO_UNDEF>;
-
-def ctls_combines : GICombineGroup<[
- ctlz_to_ctls,
- ctlz_zero_undef_to_ctls,
-]>;
-
-def narrow_binop_add : narrow_binop_opcode<G_ADD>;
-def narrow_binop_sub : narrow_binop_opcode<G_SUB>;
-def narrow_binop_mul : narrow_binop_opcode<G_MUL>;
-def narrow_binop_and : narrow_binop_opcode<G_AND>;
-def narrow_binop_or : narrow_binop_opcode<G_OR>;
-def narrow_binop_xor : narrow_binop_opcode<G_XOR>;
-
-// Cast of integer.
-class integer_of_opcode<Instruction castOpcode> : GICombineRule <
- (defs root:$root, apint_matchinfo:$matchinfo),
- (match (G_CONSTANT $int, $imm),
- (castOpcode $root, $int):$Cast,
- [{ return Helper.matchCastOfInteger(*${Cast}, ${matchinfo}); }]),
- (apply [{ Helper.replaceInstWithConstant(*${Cast}, ${matchinfo}); }])>;
-
-def integer_of_truncate : integer_of_opcode<G_TRUNC>;
-
-def cast_of_cast_combines: GICombineGroup<[
- truncate_of_zext,
- truncate_of_sext,
- truncate_of_anyext,
- zext_of_zext,
- zext_of_anyext,
- sext_of_sext,
- sext_of_anyext,
- anyext_of_anyext,
- anyext_of_zext,
- anyext_of_sext,
- sext_inreg_of_sext_inreg,
-]>;
-
-def cast_combines: GICombineGroup<[
- cast_of_cast_combines,
- select_of_zext,
- select_of_anyext,
- select_of_truncate,
- buildvector_of_truncate,
- narrow_binop_add,
- narrow_binop_sub,
- narrow_binop_mul,
- narrow_binop_and,
- narrow_binop_or,
- narrow_binop_xor,
- integer_of_truncate
-]>;
-
-def canonicalize_icmp : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_ICMP $root, $pred, $lhs, $rhs):$cmp,
- [{ return Helper.matchCanonicalizeICmp(*${cmp}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${cmp}, ${matchinfo}); }])>;
-
-def canonicalize_fcmp : GICombineRule<
- (defs root:$root, build_fn_matchinfo:$matchinfo),
- (match (G_FCMP $root, $pred, $lhs, $rhs):$cmp,
- [{ return Helper.matchCanonicalizeFCmp(*${cmp}, ${matchinfo}); }]),
- (apply [{ Helper.applyBuildFn(*${cmp}, ${matchinfo}); }])>;
-
-def cmp_combines: GICombineGroup<[
- canonicalize_icmp,
- canonicalize_fcmp,
- icmp_to_true_false_known_bits,
- icmp_to_lhs_known_bits,
- double_icmp_zero_and_combine,
- double_icmp_zero_or_combine,
- redundant_binop_in_equality
-]>;
-
-
-def overflow_combines: GICombineGroup<[
- match_addos,
- match_subo_no_overflow
-]>;
-
-// FIXME: These should use the custom predicate feature once it lands.
-def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
- undef_to_negative_one,
- binop_left_undef_to_zero,
- binop_right_undef_to_undef,
- unary_undef_to_zero,
- unary_undef_to_undef,
- propagate_undef_any_op,
- propagate_undef_all_ops,
- propagate_undef_shuffle_mask,
- erase_undef_store,
- insert_extract_vec_elt_out_of_bounds]>;
-
-def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
- binop_same_val, binop_left_to_zero,
- binop_right_to_zero, p2i_to_i2p,
- i2p_to_p2i, anyext_trunc_fold,
- fneg_fneg_fold, right_identity_one,
- add_sub_reg, buildvector_identity_fold,
- trunc_buildvector_fold,
- trunc_lshr_buildvector_fold,
- bitcast_bitcast_fold, fptrunc_fpext_fold,
- right_identity_neg_zero_fp, right_identity_neg_zero_fp_nsz,
- right_identity_neg_one_fp]>;
-
-def const_combines : GICombineGroup<[constant_fold_fp_ops, const_ptradd_to_i2p,
- overlapping_and, mulo_by_2, mulo_by_0,
- adde_to_addo,
- combine_minmax_nan, expand_const_fpowi]>;
-
-def known_bits_simplifications : GICombineGroup<[
- redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask,
- zext_trunc_fold,
- sext_inreg_to_zext_inreg]>;
-
-def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend,
- narrow_binop_feeding_and]>;
-
-def phi_combines : GICombineGroup<[extend_through_phis]>;
-
-def bitreverse_shift : GICombineGroup<[bitreverse_shl, bitreverse_lshr]>;
-
-def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp,
- select_to_iminmax, match_selects]>;
-
-def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, sub_to_add,
- add_p2i_to_ptradd, mul_by_neg_one,
- idempotent_prop]>;
-
-def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
- combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
- combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma,
- combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma,
- combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>;
-
-def constant_fold_binops : GICombineGroup<[constant_fold_binop,
- constant_fold_fp_binop]>;
-
-def prefer_sign_combines : GICombineGroup<[nneg_zext]>;
-
-def shuffle_combines : GICombineGroup<[combine_shuffle_concat,
- combine_shuffle_undef_rhs,
- combine_shuffle_disjoint_mask]>;
-
-def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
- vector_ops_combines, freeze_combines, cast_combines,
- insert_vec_elt_combines, extract_vec_elt_combines, combines_for_extload,
- combine_extracted_vector_load,
- undef_combines, identity_combines, phi_combines,
- simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands, shifts_too_big,
- reassocs, ptr_add_immed_chain, cmp_combines,
- shl_ashr_to_sext_inreg, neg_and_one_to_sext_inreg, sext_inreg_of_load,
- width_reduction_combines, select_combines,
- known_bits_simplifications, trunc_shift,
- not_cmp_fold, opt_brcond_by_inverting_cond,
- const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
- shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
- div_rem_to_divrem, funnel_shift_combines, bitreverse_shift, commute_shift,
- form_bitfield_extract, constant_fold_binops, constant_fold_fma,
- constant_fold_cast_op, fabs_fneg_fold,
- mulh_combines, redundant_neg_operands,
- and_or_disjoint_mask, fma_combines, fold_binop_into_select,
- intrem_combines, intdiv_combines, fdiv_repeated_divison,
- sub_add_reg, select_to_minmax,
- fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
- simplify_neg_minmax, combine_concat_vector,
- sext_trunc, zext_trunc, prefer_sign_combines, shuffle_combines,
- combine_use_vector_truncate, merge_combines, overflow_combines,
- truncsat_combines, lshr_of_trunc_of_lshr, ctls_combines, add_shift]>;
-
-// A combine group used to for prelegalizer combiners at -O0. The combines in
-// this group have been selected based on experiments to balance code size and
-// compile time performance.
-def optnone_combines : GICombineGroup<[trivial_combines,
- ptr_add_immed_chain, combines_for_extload,
- not_cmp_fold, opt_brcond_by_inverting_cond, combine_concat_vector]>;
+#endif // LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTORIMPL_H
\ No newline at end of file
>From a3327a38e48afdb0cdfe66b0e0288c81ac5b8aa2 Mon Sep 17 00:00:00 2001
From: Luisa Cicolini <48860705+luisacicolini at users.noreply.github.com>
Date: Sat, 14 Feb 2026 16:34:38 +0000
Subject: [PATCH 06/16] Apply suggestion from @luisacicolini
---
llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
index 8f720bb282206..6bb33f696e6fe 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
@@ -49,6 +49,7 @@ bool GIMatchTableExecutor::executeMatchTable(
const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI,
const PredicateBitset &AvailableFeatures,
CodeGenCoverage *CoverageInfo) const {
+
uint64_t CurrentIdx = 0;
SmallVector<uint64_t, 4> OnFailResumeAt;
NewMIVector OutMIs;
>From 2b613d33f360dd3b3cabc9a0cd45249cba435978 Mon Sep 17 00:00:00 2001
From: Luisa Cicolini <48860705+luisacicolini at users.noreply.github.com>
Date: Sat, 14 Feb 2026 16:34:59 +0000
Subject: [PATCH 07/16] Apply suggestion from @luisacicolini
---
llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
index 6bb33f696e6fe..4bb765b6746b4 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
@@ -215,6 +215,7 @@ bool GIMatchTableExecutor::executeMatchTable(
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
unsigned Opcode = State.MIs[InsnID]->getOpcode();
+
DEBUG_WITH_TYPE(TgtExecutor::getName(), {
dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
<< "], ExpectedOpcode=" << Expected0;
>From 1ee29f270ae9b25e16c9da80c63f0952dcc74e28 Mon Sep 17 00:00:00 2001
From: Luisa Cicolini <48860705+luisacicolini at users.noreply.github.com>
Date: Sat, 14 Feb 2026 16:35:17 +0000
Subject: [PATCH 08/16] Apply suggestion from @luisacicolini
---
llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h | 1 -
1 file changed, 1 deletion(-)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
index 4bb765b6746b4..290656759cece 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
@@ -238,7 +238,6 @@ bool GIMatchTableExecutor::executeMatchTable(
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
const int64_t Opcode = State.MIs[InsnID]->getOpcode();
-
DEBUG_WITH_TYPE(TgtExecutor::getName(), {
dbgs() << CurrentIdx << ": GIM_SwitchOpcode(MIs[" << InsnID << "], ["
<< LowerBound << ", " << UpperBound << "), Default=" << Default
>From 70688c84af18c597c9554f17ccd17367d366e6e9 Mon Sep 17 00:00:00 2001
From: Luisa Cicolini <48860705+luisacicolini at users.noreply.github.com>
Date: Sat, 14 Feb 2026 16:35:55 +0000
Subject: [PATCH 09/16] Apply suggestion from @luisacicolini
---
llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
index 290656759cece..8f6586e79d78a 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
@@ -238,6 +238,7 @@ bool GIMatchTableExecutor::executeMatchTable(
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
const int64_t Opcode = State.MIs[InsnID]->getOpcode();
+
DEBUG_WITH_TYPE(TgtExecutor::getName(), {
dbgs() << CurrentIdx << ": GIM_SwitchOpcode(MIs[" << InsnID << "], ["
<< LowerBound << ", " << UpperBound << "), Default=" << Default
>From 7fad695299faf83645b70ff2d0a419c4aa818ce2 Mon Sep 17 00:00:00 2001
From: luisacicolini <luisacicolini at gmail.com>
Date: Sat, 14 Feb 2026 16:37:21 +0000
Subject: [PATCH 10/16] chore: actual combine
---
.../include/llvm/Target/GlobalISel/Combine.td | 3831 ++++++++++-------
1 file changed, 2287 insertions(+), 1544 deletions(-)
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index e7b1ba29805ee..e70d90283d3be 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1,4 +1,4 @@
-//===- llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h -------*- C++ -*-===//
+//===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,1575 +6,2318 @@
//
//===----------------------------------------------------------------------===//
//
-/// \file This file implements GIMatchTableExecutor's `executeMatchTable`
-/// function. This is implemented in a separate file because the function is
-/// quite large.
+// Declare GlobalISel combine rules and provide mechanisms to opt-out.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTORIMPL_H
-#define LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTORIMPL_H
-
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h"
-#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
-#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
-#include "llvm/CodeGen/GlobalISel/Utils.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineOperand.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/RegisterBankInfo.h"
-#include "llvm/CodeGen/TargetInstrInfo.h"
-#include "llvm/CodeGen/TargetRegisterInfo.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Type.h"
-#include "llvm/Support/CodeGenCoverage.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-
-namespace llvm {
-
-template <class TgtExecutor, class PredicateBitset, class ComplexMatcherMemFn,
- class CustomRendererFn>
-bool GIMatchTableExecutor::executeMatchTable(
- TgtExecutor &Exec, MatcherState &State,
- const ExecInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
- &ExecInfo,
- MachineIRBuilder &Builder, const uint8_t *MatchTable,
- const TargetInstrInfo &TII, MachineRegisterInfo &MRI,
- const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI,
- const PredicateBitset &AvailableFeatures,
- CodeGenCoverage *CoverageInfo) const {
-
- uint64_t CurrentIdx = 0;
- SmallVector<uint64_t, 4> OnFailResumeAt;
- NewMIVector OutMIs;
-
- GISelChangeObserver *Observer = Builder.getObserver();
- // Bypass the flag check on the instruction, and only look at the MCInstrDesc.
- bool NoFPException = !State.MIs[0]->getDesc().mayRaiseFPException();
-
- const uint32_t Flags = State.MIs[0]->getFlags();
-
- enum RejectAction { RejectAndGiveUp, RejectAndResume };
- auto handleReject = [&]() -> RejectAction {
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": Rejected\n");
- if (OnFailResumeAt.empty())
- return RejectAndGiveUp;
- CurrentIdx = OnFailResumeAt.pop_back_val();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": Resume at " << CurrentIdx << " ("
- << OnFailResumeAt.size() << " try-blocks remain)\n");
- return RejectAndResume;
- };
-
- const auto propagateFlags = [&]() {
- for (auto MIB : OutMIs) {
- // Set the NoFPExcept flag when no original matched instruction could
- // raise an FP exception, but the new instruction potentially might.
- uint32_t MIBFlags = Flags | MIB.getInstr()->getFlags();
- if (NoFPException && MIB->mayRaiseFPException())
- MIBFlags |= MachineInstr::NoFPExcept;
- if (Observer)
- Observer->changingInstr(*MIB);
- MIB.setMIFlags(MIBFlags);
- if (Observer)
- Observer->changedInstr(*MIB);
- }
- };
-
- // If the index is >= 0, it's an index in the type objects generated by
- // TableGen. If the index is <0, it's an index in the recorded types object.
- const auto getTypeFromIdx = [&](int64_t Idx) -> LLT {
- if (Idx >= 0)
- return ExecInfo.TypeObjects[Idx];
- return State.RecordedTypes[1 - Idx];
- };
-
- const auto readULEB = [&]() {
- return fastDecodeULEB128(MatchTable, CurrentIdx);
- };
-
- // Convenience function to return a signed value. This avoids
- // us forgetting to first cast to int8_t before casting to a
- // wider signed int type.
- // if we casted uint8 directly to a wider type we'd lose
- // negative values.
- const auto readS8 = [&]() { return (int8_t)MatchTable[CurrentIdx++]; };
-
- const auto readU16 = [&]() {
- auto V = readBytesAs<uint16_t>(MatchTable + CurrentIdx);
- CurrentIdx += 2;
- return V;
- };
-
- const auto readU32 = [&]() {
- auto V = readBytesAs<uint32_t>(MatchTable + CurrentIdx);
- CurrentIdx += 4;
- return V;
- };
-
- const auto readU64 = [&]() {
- auto V = readBytesAs<uint64_t>(MatchTable + CurrentIdx);
- CurrentIdx += 8;
- return V;
- };
-
- const auto eraseImpl = [&](MachineInstr *MI) {
- // If we're erasing the insertion point, ensure we don't leave a dangling
- // pointer in the builder.
- if (Builder.getInsertPt() == MI)
- Builder.setInsertPt(*MI->getParent(), ++MI->getIterator());
- if (Observer)
- Observer->erasingInstr(*MI);
- MI->eraseFromParent();
- };
-
- while (true) {
- assert(CurrentIdx != ~0u && "Invalid MatchTable index");
- uint8_t MatcherOpcode = MatchTable[CurrentIdx++];
- switch (MatcherOpcode) {
- case GIM_Try: {
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": Begin try-block\n");
- OnFailResumeAt.push_back(readU32());
- break;
- }
-
- case GIM_RecordInsn:
- case GIM_RecordInsnIgnoreCopies: {
- uint64_t NewInsnID = readULEB();
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
-
- // As an optimisation we require that MIs[0] is always the root. Refuse
- // any attempt to modify it.
- assert(NewInsnID != 0 && "Refusing to modify MIs[0]");
-
- MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
- if (!MO.isReg()) {
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": Not a register\n");
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- if (MO.getReg().isPhysical()) {
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": Is a physical register\n");
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
-
- MachineInstr *NewMI;
- if (MatcherOpcode == GIM_RecordInsnIgnoreCopies)
- NewMI = getDefIgnoringCopies(MO.getReg(), MRI);
- else
- NewMI = MRI.getVRegDef(MO.getReg());
-
- if ((size_t)NewInsnID < State.MIs.size())
- State.MIs[NewInsnID] = NewMI;
- else {
- assert((size_t)NewInsnID == State.MIs.size() &&
- "Expected to store MIs in order");
- State.MIs.push_back(NewMI);
- }
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": MIs[" << NewInsnID
- << "] = GIM_RecordInsn(" << InsnID << ", " << OpIdx
- << ")\n");
- break;
- }
-
- case GIM_CheckFeatures: {
- uint16_t ExpectedBitsetID = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx
- << ": GIM_CheckFeatures(ExpectedBitsetID="
- << ExpectedBitsetID << ")\n");
- if ((AvailableFeatures & ExecInfo.FeatureBitsets[ExpectedBitsetID]) !=
- ExecInfo.FeatureBitsets[ExpectedBitsetID]) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_CheckOpcode:
- case GIM_CheckOpcodeIsEither: {
- uint64_t InsnID = readULEB();
- uint16_t Expected0 = readU16();
- uint16_t Expected1 = -1;
- if (MatcherOpcode == GIM_CheckOpcodeIsEither)
- Expected1 = readU16();
-
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- unsigned Opcode = State.MIs[InsnID]->getOpcode();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(), {
- dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
- << "], ExpectedOpcode=" << Expected0;
- if (MatcherOpcode == GIM_CheckOpcodeIsEither)
- dbgs() << " || " << Expected1;
- dbgs() << ") // Got=" << Opcode << "\n";
- });
-
- if (Opcode != Expected0 && Opcode != Expected1) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_SwitchOpcode: {
- uint64_t InsnID = readULEB();
- uint16_t LowerBound = readU16();
- uint16_t UpperBound = readU16();
- uint32_t Default = readU32();
-
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- const int64_t Opcode = State.MIs[InsnID]->getOpcode();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(), {
- dbgs() << CurrentIdx << ": GIM_SwitchOpcode(MIs[" << InsnID << "], ["
- << LowerBound << ", " << UpperBound << "), Default=" << Default
- << ", JumpTable...) // Got=" << Opcode << "\n";
- });
- if (Opcode < LowerBound || UpperBound <= Opcode) {
- CurrentIdx = Default;
- break;
- }
- const auto EntryIdx = (Opcode - LowerBound);
- // Each entry is 4 bytes
- CurrentIdx =
- readBytesAs<uint32_t>(MatchTable + CurrentIdx + (EntryIdx * 4));
- if (!CurrentIdx) {
- CurrentIdx = Default;
- break;
- }
- OnFailResumeAt.push_back(Default);
- break;
- }
- case GIM_SwitchType: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint16_t LowerBound = readU16();
- uint16_t UpperBound = readU16();
- int64_t Default = readU32();
-
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(), {
- dbgs() << CurrentIdx << ": GIM_SwitchType(MIs[" << InsnID
- << "]->getOperand(" << OpIdx << "), [" << LowerBound << ", "
- << UpperBound << "), Default=" << Default
- << ", JumpTable...) // Got=";
- if (!MO.isReg())
- dbgs() << "Not a VReg\n";
- else
- dbgs() << MRI.getType(MO.getReg()) << "\n";
- });
- if (!MO.isReg()) {
- CurrentIdx = Default;
- break;
- }
- const LLT Ty = MRI.getType(MO.getReg());
- const auto TyI = ExecInfo.TypeIDMap.find(Ty);
- if (TyI == ExecInfo.TypeIDMap.end()) {
- CurrentIdx = Default;
- break;
- }
- const int64_t TypeID = TyI->second;
- if (TypeID < LowerBound || UpperBound <= TypeID) {
- CurrentIdx = Default;
- break;
- }
- const auto NumEntry = (TypeID - LowerBound);
- // Each entry is 4 bytes
- CurrentIdx =
- readBytesAs<uint32_t>(MatchTable + CurrentIdx + (NumEntry * 4));
- if (!CurrentIdx) {
- CurrentIdx = Default;
- break;
- }
- OnFailResumeAt.push_back(Default);
- break;
- }
-
- case GIM_CheckNumOperandsGE:
- case GIM_CheckNumOperandsLE: {
- uint64_t InsnID = readULEB();
- uint64_t Expected = readULEB();
- const bool IsLE = (MatcherOpcode == GIM_CheckNumOperandsLE);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckNumOperands"
- << (IsLE ? "LE" : "GE") << "(MIs[" << InsnID
- << "], Expected=" << Expected << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- const unsigned NumOps = State.MIs[InsnID]->getNumOperands();
- if (IsLE ? (NumOps > Expected) : (NumOps < Expected)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_CheckNumOperands: {
- uint64_t InsnID = readULEB();
- uint64_t Expected = readULEB();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckNumOperands(MIs["
- << InsnID << "], Expected=" << Expected << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- if (State.MIs[InsnID]->getNumOperands() != Expected) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_CheckI64ImmPredicate:
- case GIM_CheckImmOperandPredicate: {
- uint64_t InsnID = readULEB();
- unsigned OpIdx =
- MatcherOpcode == GIM_CheckImmOperandPredicate ? readULEB() : 1;
- uint16_t Predicate = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckImmPredicate(MIs["
- << InsnID << "]->getOperand(" << OpIdx
- << "), Predicate=" << Predicate << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert((State.MIs[InsnID]->getOperand(OpIdx).isImm() ||
- State.MIs[InsnID]->getOperand(OpIdx).isCImm()) &&
- "Expected immediate operand");
- assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
- int64_t Value = 0;
- if (State.MIs[InsnID]->getOperand(OpIdx).isCImm())
- Value = State.MIs[InsnID]->getOperand(OpIdx).getCImm()->getSExtValue();
- else if (State.MIs[InsnID]->getOperand(OpIdx).isImm())
- Value = State.MIs[InsnID]->getOperand(OpIdx).getImm();
- else
- llvm_unreachable("Expected Imm or CImm operand");
-
- if (!testImmPredicate_I64(Predicate, Value))
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIM_CheckAPIntImmPredicate: {
- uint64_t InsnID = readULEB();
- uint16_t Predicate = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs()
- << CurrentIdx << ": GIM_CheckAPIntImmPredicate(MIs["
- << InsnID << "], Predicate=" << Predicate << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
- "Expected G_CONSTANT");
- assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
- if (!State.MIs[InsnID]->getOperand(1).isCImm())
- llvm_unreachable("Expected Imm or CImm operand");
-
- const APInt &Value =
- State.MIs[InsnID]->getOperand(1).getCImm()->getValue();
- if (!testImmPredicate_APInt(Predicate, Value))
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIM_CheckAPFloatImmPredicate: {
- uint64_t InsnID = readULEB();
- uint16_t Predicate = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs()
- << CurrentIdx << ": GIM_CheckAPFloatImmPredicate(MIs["
- << InsnID << "], Predicate=" << Predicate << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
- "Expected G_FCONSTANT");
- assert(State.MIs[InsnID]->getOperand(1).isFPImm() &&
- "Expected FPImm operand");
- assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
- const APFloat &Value =
- State.MIs[InsnID]->getOperand(1).getFPImm()->getValueAPF();
-
- if (!testImmPredicate_APFloat(Predicate, Value))
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIM_CheckLeafOperandPredicate: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint16_t Predicate = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx
- << ": GIM_CheckLeafOperandPredicate(MIs[" << InsnID
- << "]->getOperand(" << OpIdx
- << "), Predicate=" << Predicate << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert(State.MIs[InsnID]->getOperand(OpIdx).isReg() &&
- "Expected register operand");
- assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
- MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
-
- if (!testMOPredicate_MO(Predicate, MO, State))
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIM_CheckIsBuildVectorAllOnes:
- case GIM_CheckIsBuildVectorAllZeros: {
- uint64_t InsnID = readULEB();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx
- << ": GIM_CheckBuildVectorAll{Zeros|Ones}(MIs["
- << InsnID << "])\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
- const MachineInstr *MI = State.MIs[InsnID];
- assert((MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
- MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR_TRUNC) &&
- "Expected G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC");
-
- if (MatcherOpcode == GIM_CheckIsBuildVectorAllOnes) {
- if (!isBuildVectorAllOnes(*MI, MRI)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- } else {
- if (!isBuildVectorAllZeros(*MI, MRI)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- }
-
- break;
- }
- case GIM_CheckSimplePredicate: {
- // Note: we don't check for invalid here because this is purely a hook to
- // allow some executors (such as the combiner) to check arbitrary,
- // contextless predicates, such as whether a rule is enabled or not.
- uint16_t Predicate = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx
- << ": GIM_CheckSimplePredicate(Predicate="
- << Predicate << ")\n");
- assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
- if (!testSimplePredicate(Predicate)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_CheckCxxInsnPredicate: {
- uint64_t InsnID = readULEB();
- uint16_t Predicate = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs()
- << CurrentIdx << ": GIM_CheckCxxPredicate(MIs["
- << InsnID << "], Predicate=" << Predicate << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
-
- if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID], State))
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIM_CheckHasNoUse: {
- uint64_t InsnID = readULEB();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckHasNoUse(MIs["
- << InsnID << "]\n");
-
- const MachineInstr *MI = State.MIs[InsnID];
- assert(MI && "Used insn before defined");
- assert(MI->getNumDefs() > 0 && "No defs");
- const Register Res = MI->getOperand(0).getReg();
-
- if (!MRI.use_nodbg_empty(Res)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_CheckHasOneUse: {
- uint64_t InsnID = readULEB();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckHasOneUse(MIs["
- << InsnID << "]\n");
-
- const MachineInstr *MI = State.MIs[InsnID];
- assert(MI && "Used insn before defined");
- assert(MI->getNumDefs() > 0 && "No defs");
- const Register Res = MI->getOperand(0).getReg();
-
- if (!MRI.hasOneNonDBGUse(Res)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_CheckAtomicOrdering: {
- uint64_t InsnID = readULEB();
- auto Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
- << InsnID << "], " << (uint64_t)Ordering << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- if (!State.MIs[InsnID]->hasOneMemOperand())
- if (handleReject() == RejectAndGiveUp)
- return false;
-
- for (const auto &MMO : State.MIs[InsnID]->memoperands())
- if (MMO->getMergedOrdering() != Ordering)
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIM_CheckAtomicOrderingOrStrongerThan: {
- uint64_t InsnID = readULEB();
- auto Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx
- << ": GIM_CheckAtomicOrderingOrStrongerThan(MIs["
- << InsnID << "], " << (uint64_t)Ordering << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- if (!State.MIs[InsnID]->hasOneMemOperand())
- if (handleReject() == RejectAndGiveUp)
- return false;
-
- for (const auto &MMO : State.MIs[InsnID]->memoperands())
- if (!isAtLeastOrStrongerThan(MMO->getMergedOrdering(), Ordering))
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIM_CheckAtomicOrderingWeakerThan: {
- uint64_t InsnID = readULEB();
- auto Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx
- << ": GIM_CheckAtomicOrderingWeakerThan(MIs["
- << InsnID << "], " << (uint64_t)Ordering << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- if (!State.MIs[InsnID]->hasOneMemOperand())
- if (handleReject() == RejectAndGiveUp)
- return false;
-
- for (const auto &MMO : State.MIs[InsnID]->memoperands())
- if (!isStrongerThan(Ordering, MMO->getMergedOrdering()))
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIM_CheckMemoryAddressSpace: {
- uint64_t InsnID = readULEB();
- uint64_t MMOIdx = readULEB();
- // This accepts a list of possible address spaces.
- const uint64_t NumAddrSpace = MatchTable[CurrentIdx++];
-
- if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
-
- // Need to still jump to the end of the list of address spaces if we find
- // a match earlier.
- const uint64_t LastIdx = CurrentIdx + NumAddrSpace;
-
- const MachineMemOperand *MMO =
- *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
- const unsigned MMOAddrSpace = MMO->getAddrSpace();
-
- bool Success = false;
- for (unsigned I = 0; I != NumAddrSpace; ++I) {
- uint64_t AddrSpace = readULEB();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << "addrspace(" << MMOAddrSpace << ") vs "
- << AddrSpace << '\n');
-
- if (AddrSpace == MMOAddrSpace) {
- Success = true;
- break;
- }
- }
-
- CurrentIdx = LastIdx;
- if (!Success && handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIM_CheckMemoryAlignment: {
- uint64_t InsnID = readULEB();
- uint64_t MMOIdx = readULEB();
- uint64_t MinAlign = MatchTable[CurrentIdx++];
-
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
- if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
-
- MachineMemOperand *MMO =
- *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckMemoryAlignment"
- << "(MIs[" << InsnID << "]->memoperands() + "
- << MMOIdx << ")->getAlignment() >= " << MinAlign
- << ")\n");
- if (MMO->getAlign() < MinAlign && handleReject() == RejectAndGiveUp)
- return false;
-
- break;
- }
- case GIM_CheckMemorySizeEqualTo: {
- uint64_t InsnID = readULEB();
- uint64_t MMOIdx = readULEB();
- uint32_t Size = readU32();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckMemorySizeEqual(MIs["
- << InsnID << "]->memoperands() + " << MMOIdx
- << ", Size=" << Size << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
- if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
-
- MachineMemOperand *MMO =
- *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(), dbgs() << MMO->getSize()
- << " bytes vs " << Size
- << " bytes\n");
- if (MMO->getSize() != Size)
- if (handleReject() == RejectAndGiveUp)
- return false;
-
- break;
- }
- case GIM_CheckMemorySizeEqualToLLT:
- case GIM_CheckMemorySizeLessThanLLT:
- case GIM_CheckMemorySizeGreaterThanLLT: {
- uint64_t InsnID = readULEB();
- uint64_t MMOIdx = readULEB();
- uint64_t OpIdx = readULEB();
-
- DEBUG_WITH_TYPE(
- TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckMemorySize"
- << (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT ? "EqualTo"
- : MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT
- ? "GreaterThan"
- : "LessThan")
- << "LLT(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
- << ", OpIdx=" << OpIdx << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
- MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
- if (!MO.isReg()) {
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": Not a register\n");
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
-
- if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
-
- MachineMemOperand *MMO =
- *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
-
- const TypeSize Size = MRI.getType(MO.getReg()).getSizeInBits();
- if (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT &&
- MMO->getSizeInBits() != Size) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- } else if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT &&
- TypeSize::isKnownGE(MMO->getSizeInBits().getValue(), Size)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- } else if (MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT &&
- TypeSize::isKnownLE(MMO->getSizeInBits().getValue(), Size))
- if (handleReject() == RejectAndGiveUp)
- return false;
-
- break;
- }
- case GIM_RootCheckType:
- case GIM_CheckType: {
- uint64_t InsnID = (MatcherOpcode == GIM_RootCheckType) ? 0 : readULEB();
- uint64_t OpIdx = readULEB();
- int TypeID = readS8();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckType(MIs[" << InsnID
- << "]->getOperand(" << OpIdx
- << "), TypeID=" << TypeID << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
- if (!MO.isReg() || MRI.getType(MO.getReg()) != getTypeFromIdx(TypeID)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_CheckPointerToAny: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint64_t SizeInBits = readULEB();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs["
- << InsnID << "]->getOperand(" << OpIdx
- << "), SizeInBits=" << SizeInBits << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
- const LLT Ty = MRI.getType(MO.getReg());
-
- // iPTR must be looked up in the target.
- if (SizeInBits == 0) {
- MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
- const unsigned AddrSpace = Ty.getAddressSpace();
- SizeInBits = MF->getDataLayout().getPointerSizeInBits(AddrSpace);
- }
-
- assert(SizeInBits != 0 && "Pointer size must be known");
-
- if (MO.isReg()) {
- if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits)
- if (handleReject() == RejectAndGiveUp)
- return false;
- } else if (handleReject() == RejectAndGiveUp)
- return false;
-
- break;
- }
- case GIM_RecordNamedOperand: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint64_t StoreIdx = readULEB();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_RecordNamedOperand(MIs["
- << InsnID << "]->getOperand(" << OpIdx
- << "), StoreIdx=" << StoreIdx << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert(StoreIdx < State.RecordedOperands.size() && "Index out of range");
- State.RecordedOperands[StoreIdx] = &State.MIs[InsnID]->getOperand(OpIdx);
- break;
- }
- case GIM_RecordRegType: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- int TypeIdx = readS8();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_RecordRegType(MIs["
- << InsnID << "]->getOperand(" << OpIdx
- << "), TypeIdx=" << TypeIdx << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert(TypeIdx < 0 && "Temp types always have negative indexes!");
- // Indexes start at -1.
- TypeIdx = 1 - TypeIdx;
- const auto &Op = State.MIs[InsnID]->getOperand(OpIdx);
- if (State.RecordedTypes.size() <= (uint64_t)TypeIdx)
- State.RecordedTypes.resize(TypeIdx + 1, LLT());
- State.RecordedTypes[TypeIdx] = MRI.getType(Op.getReg());
- break;
- }
+//===----------------------------------------------------------------------===//
+// Base Classes
+//
+// These are the core classes that the combiner backend relies on.
+//===----------------------------------------------------------------------===//
- case GIM_RootCheckRegBankForClass:
- case GIM_CheckRegBankForClass: {
- uint64_t InsnID =
- (MatcherOpcode == GIM_RootCheckRegBankForClass) ? 0 : readULEB();
- uint64_t OpIdx = readULEB();
- uint16_t RCEnum = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckRegBankForClass(MIs["
- << InsnID << "]->getOperand(" << OpIdx
- << "), RCEnum=" << RCEnum << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
- if (!MO.isReg() ||
- &RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum),
- MRI.getType(MO.getReg())) !=
- RBI.getRegBank(MO.getReg(), MRI, TRI)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
+/// All arguments of the defs operator must be subclasses of GIDefKind or
+/// sub-dags whose operator is GIDefKindWithArgs.
+class GIDefKind;
+class GIDefKindWithArgs;
- case GIM_CheckComplexPattern: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint16_t RendererID = readU16();
- uint16_t ComplexPredicateID = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": State.Renderers[" << RendererID
- << "] = GIM_CheckComplexPattern(MIs[" << InsnID
- << "]->getOperand(" << OpIdx
- << "), ComplexPredicateID=" << ComplexPredicateID
- << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- // FIXME: Use std::invoke() when it's available.
- ComplexRendererFns Renderer =
- (Exec.*ExecInfo.ComplexPredicates[ComplexPredicateID])(
- State.MIs[InsnID]->getOperand(OpIdx));
- if (Renderer)
- State.Renderers[RendererID] = *Renderer;
- else if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
+/// Declare a root node. There must be at least one of these in every combine
+/// rule.
+def root : GIDefKind;
- case GIM_CheckConstantInt:
- case GIM_CheckConstantInt8: {
- const bool IsInt8 = (MatcherOpcode == GIM_CheckConstantInt8);
-
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint64_t Value = IsInt8 ? (int64_t)readS8() : readU64();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckConstantInt(MIs["
- << InsnID << "]->getOperand(" << OpIdx
- << "), Value=" << Value << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
- if (MO.isReg()) {
- // isOperandImmEqual() will sign-extend to 64-bits, so should we.
- LLT Ty = MRI.getType(MO.getReg());
- // If the type is > 64 bits, it can't be a constant int, so we bail
- // early because SignExtend64 will assert otherwise.
- if (Ty.getScalarSizeInBits() > 64) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
-
- Value = SignExtend64(Value, Ty.getScalarSizeInBits());
- if (!isOperandImmEqual(MO, Value, MRI, /*Splat=*/true)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- } else if (handleReject() == RejectAndGiveUp)
- return false;
-
- break;
- }
+def defs;
- case GIM_CheckLiteralInt: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- int64_t Value = readU64();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckLiteralInt(MIs["
- << InsnID << "]->getOperand(" << OpIdx
- << "), Value=" << Value << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
- if (MO.isImm() && MO.getImm() == Value)
- break;
-
- if (MO.isCImm() && MO.getCImm()->equalsInt(Value))
- break;
-
- if (handleReject() == RejectAndGiveUp)
- return false;
-
- break;
- }
+def pattern;
+def match;
+def apply;
+def combine;
+def empty_action;
- case GIM_CheckIntrinsicID: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint16_t Value = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckIntrinsicID(MIs["
- << InsnID << "]->getOperand(" << OpIdx
- << "), Value=" << Value << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
- if (!MO.isIntrinsicID() || MO.getIntrinsicID() != Value)
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIM_CheckCmpPredicate: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint16_t Value = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckCmpPredicate(MIs["
- << InsnID << "]->getOperand(" << OpIdx
- << "), Value=" << Value << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
- if (!MO.isPredicate() || MO.getPredicate() != Value)
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIM_CheckIsMBB: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckIsMBB(MIs[" << InsnID
- << "]->getOperand(" << OpIdx << "))\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB()) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_CheckIsImm: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckIsImm(MIs[" << InsnID
- << "]->getOperand(" << OpIdx << "))\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- if (!State.MIs[InsnID]->getOperand(OpIdx).isImm()) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_CheckIsSafeToFold: {
- uint64_t NumInsn = MatchTable[CurrentIdx++];
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckIsSafeToFold(N = "
- << NumInsn << ")\n");
- MachineInstr &Root = *State.MIs[0];
- for (unsigned K = 1, E = NumInsn + 1; K < E; ++K) {
- if (!isObviouslySafeToFold(*State.MIs[K], Root)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- }
- break;
- }
- case GIM_CheckIsSameOperand:
- case GIM_CheckIsSameOperandIgnoreCopies: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint64_t OtherInsnID = readULEB();
- uint64_t OtherOpIdx = readULEB();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckIsSameOperand(MIs["
- << InsnID << "][" << OpIdx << "], MIs["
- << OtherInsnID << "][" << OtherOpIdx << "])\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert(State.MIs[OtherInsnID] != nullptr && "Used insn before defined");
-
- MachineOperand &Op = State.MIs[InsnID]->getOperand(OpIdx);
- MachineOperand &OtherOp = State.MIs[OtherInsnID]->getOperand(OtherOpIdx);
-
- if (MatcherOpcode == GIM_CheckIsSameOperandIgnoreCopies) {
- if (Op.isReg() && OtherOp.isReg()) {
- if (getSrcRegIgnoringCopies(Op.getReg(), MRI) ==
- getSrcRegIgnoringCopies(OtherOp.getReg(), MRI))
- break;
- }
- }
-
- if (!Op.isIdenticalTo(OtherOp)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_CheckCanReplaceReg: {
- uint64_t OldInsnID = readULEB();
- uint64_t OldOpIdx = readULEB();
- uint64_t NewInsnID = readULEB();
- uint64_t NewOpIdx = readULEB();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckCanReplaceReg(MIs["
- << OldInsnID << "][" << OldOpIdx << "] = MIs["
- << NewInsnID << "][" << NewOpIdx << "])\n");
-
- Register Old = State.MIs[OldInsnID]->getOperand(OldOpIdx).getReg();
- Register New = State.MIs[NewInsnID]->getOperand(NewOpIdx).getReg();
- if (!canReplaceReg(Old, New, MRI)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_MIFlags: {
- uint64_t InsnID = readULEB();
- uint32_t Flags = readU32();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_MIFlags(MIs[" << InsnID
- << "], " << Flags << ")\n");
- if ((State.MIs[InsnID]->getFlags() & Flags) != Flags) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_MIFlagsNot: {
- uint64_t InsnID = readULEB();
- uint32_t Flags = readU32();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_MIFlagsNot(MIs[" << InsnID
- << "], " << Flags << ")\n");
- if ((State.MIs[InsnID]->getFlags() & Flags)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- break;
- }
- case GIM_Reject:
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIM_Reject\n");
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- case GIR_MutateOpcode: {
- uint64_t OldInsnID = readULEB();
- uint64_t NewInsnID = readULEB();
- uint32_t NewOpcode = readU16();
- if (NewInsnID >= OutMIs.size())
- OutMIs.resize(NewInsnID + 1);
-
- MachineInstr *OldMI = State.MIs[OldInsnID];
- if (Observer)
- Observer->changingInstr(*OldMI);
- OutMIs[NewInsnID] = MachineInstrBuilder(*OldMI->getMF(), OldMI);
- OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
- if (Observer)
- Observer->changedInstr(*OldMI);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_MutateOpcode(OutMIs["
- << NewInsnID << "], MIs[" << OldInsnID << "], "
- << NewOpcode << ")\n");
- break;
- }
+def wip_match_opcode;
- case GIR_BuildRootMI:
- case GIR_BuildMI: {
- uint64_t NewInsnID = (MatcherOpcode == GIR_BuildRootMI) ? 0 : readULEB();
- uint32_t Opcode = readU16();
- if (NewInsnID >= OutMIs.size())
- OutMIs.resize(NewInsnID + 1);
-
- OutMIs[NewInsnID] = Builder.buildInstr(Opcode);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_BuildMI(OutMIs["
- << NewInsnID << "], " << Opcode << ")\n");
- break;
- }
+// Common base class for GICombineRule and GICombineGroup.
+class GICombine {
+ // See GICombineGroup. We only declare it here to make the tablegen pass
+ // simpler.
+ list<GICombine> Rules = ?;
+}
- case GIR_BuildConstant: {
- uint64_t TempRegID = readULEB();
- uint64_t Imm = readU64();
- Builder.buildConstant(State.TempRegisters[TempRegID], Imm);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_BuildConstant(TempReg["
- << TempRegID << "], Imm=" << Imm << ")\n");
- break;
- }
+// A group of combine rules that can be added to a GICombiner or another group.
+class GICombineGroup<list<GICombine> rules> : GICombine {
+ // The rules contained in this group. The rules in a group are flattened into
+ // a single list and sorted into whatever order is most efficient. However,
+ // they will never be re-ordered such that behaviour differs from the
+ // specified order. It is therefore possible to use the order of rules in this
+ // list to describe priorities.
+ let Rules = rules;
+}
- case GIR_RootToRootCopy:
- case GIR_Copy: {
- uint64_t NewInsnID =
- (MatcherOpcode == GIR_RootToRootCopy) ? 0 : readULEB();
- uint64_t OldInsnID =
- (MatcherOpcode == GIR_RootToRootCopy) ? 0 : readULEB();
- uint64_t OpIdx = readULEB();
- assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
- OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs()
- << CurrentIdx << ": GIR_Copy(OutMIs[" << NewInsnID
- << "], MIs[" << OldInsnID << "], " << OpIdx << ")\n");
- break;
- }
+// Declares a combiner implementation class
+class GICombiner<string classname, list<GICombine> rules>
+ : GICombineGroup<rules> {
+ // The class name to use in the generated output.
+ string Classname = classname;
+ // Combiners can use this so they're free to define tryCombineAll themselves
+ // and do extra work before/after calling the TableGen-erated code.
+ string CombineAllMethodName = "tryCombineAll";
+}
- case GIR_CopyRemaining: {
- uint64_t NewInsnID = readULEB();
- uint64_t OldInsnID = readULEB();
- uint64_t OpIdx = readULEB();
- assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
- MachineInstr &OldMI = *State.MIs[OldInsnID];
- MachineInstrBuilder &NewMI = OutMIs[NewInsnID];
- for (const auto &Op : drop_begin(OldMI.operands(), OpIdx))
- NewMI.add(Op);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_CopyRemaining(OutMIs["
- << NewInsnID << "], MIs[" << OldInsnID
- << "], /*start=*/" << OpIdx << ")\n");
- break;
- }
+/// Declares data that is passed from the match stage to the apply stage.
+class GIDefMatchData<string type> {
+ /// A C++ type name indicating the storage type.
+ string Type = type;
+}
- case GIR_CopyOrAddZeroReg: {
- uint64_t NewInsnID = readULEB();
- uint64_t OldInsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint16_t ZeroReg = readU16();
- assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
- MachineOperand &MO = State.MIs[OldInsnID]->getOperand(OpIdx);
- if (isOperandImmEqual(MO, 0, MRI))
- OutMIs[NewInsnID].addReg(ZeroReg);
- else
- OutMIs[NewInsnID].add(MO);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_CopyOrAddZeroReg(OutMIs["
- << NewInsnID << "], MIs[" << OldInsnID << "], "
- << OpIdx << ", " << ZeroReg << ")\n");
- break;
- }
+class GICombineRule<dag defs, dag a0, dag a1 = (empty_action)> : GICombine {
+ /// Defines the external interface of the match rule. This includes:
+ /// * The names of the root nodes (requires at least one)
+ /// See GIDefKind for details.
+ dag Defs = defs;
+
+ // The patterns that will be used. Two types of list can exist:
+ // match (Action0) + apply (Action1).
+ // combine (Action0) + empty_action (Action1).
+ dag Action0 = a0;
+ dag Action1 = a1;
+
+ /// Defines the predicates that are checked before the match function
+ /// is called. Targets can use this to, for instance, check Subtarget
+ /// features.
+ list<Predicate> Predicates = [];
+
+ // Maximum number of permutations of this rule that can be emitted.
+ // Set to -1 to disable the limit.
+ int MaxPermutations = 16;
+}
- case GIR_CopySubReg: {
- uint64_t NewInsnID = readULEB();
- uint64_t OldInsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint16_t SubRegIdx = readU16();
- assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
- OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
- {}, SubRegIdx);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_CopySubReg(OutMIs["
- << NewInsnID << "], MIs[" << OldInsnID << "], "
- << OpIdx << ", " << SubRegIdx << ")\n");
- break;
- }
+def gi_mo;
+def gi_imm;
- case GIR_AddImplicitDef: {
- uint64_t InsnID = readULEB();
- uint16_t RegNum = readU16();
- RegState Flags = static_cast<RegState>(readU16());
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- Flags |= RegState::Implicit;
- OutMIs[InsnID].addDef(RegNum, Flags);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_AddImplicitDef(OutMIs["
- << InsnID << "], " << RegNum << ", "
- << static_cast<uint16_t>(Flags) << ")\n");
- break;
- }
+// This is an equivalent of PatFrags but for MIR Patterns.
+//
+// GICombinePatFrags can be used in place of instructions for 'match' patterns.
+// Much like normal instructions, the defs (outs) come first, and the ins second
+//
+// Out operands can only be of type "root" or "gi_mo", and they must be defined
+// by an instruction pattern in all alternatives.
+//
+// In operands can be gi_imm or gi_mo. They cannot be redefined in any alternative
+// pattern and may only appear in the C++ code, or in the output operand of an
+// instruction pattern.
+class GICombinePatFrag<dag outs, dag ins, list<dag> alts> {
+ dag InOperands = ins;
+ dag OutOperands = outs;
+ list<dag> Alternatives = alts;
+}
- case GIR_AddImplicitUse: {
- uint64_t InsnID = readULEB();
- uint16_t RegNum = readU16();
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_AddImplicitUse(OutMIs["
- << InsnID << "], " << RegNum << ")\n");
- break;
- }
+//===----------------------------------------------------------------------===//
+// Pattern Special Types
+//===----------------------------------------------------------------------===//
- case GIR_AddRegister: {
- uint64_t InsnID = readULEB();
- uint16_t RegNum = readU16();
- RegState RegFlags = static_cast<RegState>(readU16());
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- OutMIs[InsnID].addReg(RegNum, RegFlags);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_AddRegister(OutMIs["
- << InsnID << "], " << RegNum << ", "
- << static_cast<uint16_t>(RegFlags) << ")\n");
- break;
- }
- case GIR_AddIntrinsicID: {
- uint64_t InsnID = readULEB();
- uint16_t Value = readU16();
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- OutMIs[InsnID].addIntrinsicID((Intrinsic::ID)Value);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_AddIntrinsicID(OutMIs["
- << InsnID << "], " << Value << ")\n");
- break;
- }
- case GIR_SetImplicitDefDead: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_SetImplicitDefDead(OutMIs["
- << InsnID << "], OpIdx=" << OpIdx << ")\n");
- MachineInstr *MI = OutMIs[InsnID];
- assert(MI && "Modifying undefined instruction");
- MI->getOperand(MI->getNumExplicitOperands() + OpIdx).setIsDead();
- break;
- }
- case GIR_SetMIFlags: {
- uint64_t InsnID = readULEB();
- uint32_t Flags = readU32();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_SetMIFlags(OutMIs["
- << InsnID << "], " << Flags << ")\n");
- MachineInstr *MI = OutMIs[InsnID];
- MI->setFlags(MI->getFlags() | Flags);
- break;
- }
- case GIR_UnsetMIFlags: {
- uint64_t InsnID = readULEB();
- uint32_t Flags = readU32();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_UnsetMIFlags(OutMIs["
- << InsnID << "], " << Flags << ")\n");
- MachineInstr *MI = OutMIs[InsnID];
- MI->setFlags(MI->getFlags() & ~Flags);
- break;
- }
- case GIR_CopyMIFlags: {
- uint64_t InsnID = readULEB();
- uint64_t OldInsnID = readULEB();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_CopyMIFlags(OutMIs["
- << InsnID << "], MIs[" << OldInsnID << "])\n");
- MachineInstr *MI = OutMIs[InsnID];
- MI->setFlags(MI->getFlags() | State.MIs[OldInsnID]->getFlags());
- break;
- }
- case GIR_AddSimpleTempRegister:
- case GIR_AddTempRegister:
- case GIR_AddTempSubRegister: {
- uint64_t InsnID = readULEB();
- uint64_t TempRegID = readULEB();
- RegState TempRegFlags = {};
- if (MatcherOpcode != GIR_AddSimpleTempRegister)
- TempRegFlags = static_cast<RegState>(readU16());
- uint16_t SubReg = 0;
- if (MatcherOpcode == GIR_AddTempSubRegister)
- SubReg = readU16();
-
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
-
- OutMIs[InsnID].addReg(State.TempRegisters[TempRegID], TempRegFlags,
- SubReg);
- DEBUG_WITH_TYPE(
- TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_AddTempRegister(OutMIs[" << InsnID
- << "], TempRegisters[" << TempRegID << "]";
- if (SubReg) dbgs() << '.' << TRI.getSubRegIndexName(SubReg);
- dbgs() << ", " << static_cast<uint16_t>(TempRegFlags) << ")\n");
- break;
- }
+class GISpecialType;
- case GIR_AddImm8:
- case GIR_AddImm: {
- const bool IsAdd8 = (MatcherOpcode == GIR_AddImm8);
- uint64_t InsnID = readULEB();
- uint64_t Imm = IsAdd8 ? (int64_t)readS8() : readU64();
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- OutMIs[InsnID].addImm(Imm);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_AddImm(OutMIs[" << InsnID
- << "], " << Imm << ")\n");
- break;
- }
+// In an apply pattern, GITypeOf can be used to set the type of a new temporary
+// register to match the type of a matched register.
+//
+// This can only be used on temporary registers defined by the apply pattern.
+//
+// TODO: Make this work in matchers as well?
+//
+// FIXME: Syntax is very ugly.
+class GITypeOf<string opName> : GISpecialType {
+ string OpName = opName;
+}
- case GIR_AddCImm: {
- uint64_t InsnID = readULEB();
- int TypeID = readS8();
- uint64_t Imm = readU64();
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
-
- unsigned Width = ExecInfo.TypeObjects[TypeID].getScalarSizeInBits();
- LLVMContext &Ctx = MF->getFunction().getContext();
- OutMIs[InsnID].addCImm(
- ConstantInt::get(IntegerType::get(Ctx, Width), Imm, /*signed*/ true));
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_AddCImm(OutMIs[" << InsnID
- << "], TypeID=" << TypeID << ", Imm=" << Imm
- << ")\n");
- break;
- }
+// The type of an operand that can match a variable amount of operands.
+// This type contains a minimum and maximum number of operands to match.
+// The minimum must be 1 or more, as we cannot have an operand representing
+// zero operands, and the max can be zero (which means "unlimited") or a value
+// greater than the minimum.
+class GIVariadic<int min = 1, int max = 0> : GISpecialType {
+ int MinArgs = min;
+ int MaxArgs = max;
+}
- case GIR_ComplexRenderer: {
- uint64_t InsnID = readULEB();
- uint16_t RendererID = readU16();
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- for (const auto &RenderOpFn : State.Renderers[RendererID])
- RenderOpFn(OutMIs[InsnID]);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_ComplexRenderer(OutMIs["
- << InsnID << "], " << RendererID << ")\n");
- break;
- }
- case GIR_ComplexSubOperandRenderer: {
- uint64_t InsnID = readULEB();
- uint16_t RendererID = readU16();
- uint64_t RenderOpID = readULEB();
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- State.Renderers[RendererID][RenderOpID](OutMIs[InsnID]);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx
- << ": GIR_ComplexSubOperandRenderer(OutMIs["
- << InsnID << "], " << RendererID << ", "
- << RenderOpID << ")\n");
- break;
- }
- case GIR_ComplexSubOperandSubRegRenderer: {
- uint64_t InsnID = readULEB();
- uint16_t RendererID = readU16();
- uint64_t RenderOpID = readULEB();
- uint16_t SubRegIdx = readU16();
- MachineInstrBuilder &MI = OutMIs[InsnID];
- assert(MI && "Attempted to add to undefined instruction");
- State.Renderers[RendererID][RenderOpID](MI);
- MI->getOperand(MI->getNumOperands() - 1).setSubReg(SubRegIdx);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx
- << ": GIR_ComplexSubOperandSubRegRenderer(OutMIs["
- << InsnID << "], " << RendererID << ", "
- << RenderOpID << ", " << SubRegIdx << ")\n");
- break;
- }
+//===----------------------------------------------------------------------===//
+// Pattern Builtins
+//===----------------------------------------------------------------------===//
- case GIR_CopyConstantAsSImm: {
- uint64_t NewInsnID = readULEB();
- uint64_t OldInsnID = readULEB();
- assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
- assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
- "Expected G_CONSTANT");
- if (State.MIs[OldInsnID]->getOperand(1).isCImm()) {
- OutMIs[NewInsnID].addImm(
- State.MIs[OldInsnID]->getOperand(1).getCImm()->getSExtValue());
- } else if (State.MIs[OldInsnID]->getOperand(1).isImm())
- OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(1));
- else
- llvm_unreachable("Expected Imm or CImm operand");
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_CopyConstantAsSImm(OutMIs["
- << NewInsnID << "], MIs[" << OldInsnID << "])\n");
- break;
- }
+// "Magic" Builtin instructions for MIR patterns.
+// The definitions that implement
+class GIBuiltinInst;
- // TODO: Needs a test case once we have a pattern that uses this.
- case GIR_CopyFConstantAsFPImm: {
- uint64_t NewInsnID = readULEB();
- uint64_t OldInsnID = readULEB();
- assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
- assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
- "Expected G_FCONSTANT");
- if (State.MIs[OldInsnID]->getOperand(1).isFPImm())
- OutMIs[NewInsnID].addFPImm(
- State.MIs[OldInsnID]->getOperand(1).getFPImm());
- else
- llvm_unreachable("Expected FPImm operand");
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs()
- << CurrentIdx << ": GIR_CopyFPConstantAsFPImm(OutMIs["
- << NewInsnID << "], MIs[" << OldInsnID << "])\n");
- break;
- }
+// Replace all references to a register with another one.
+//
+// Usage:
+// (apply (GIReplaceReg $old, $new))
+//
+// Operands:
+// - $old (out) register defined by a matched instruction
+// - $new (in) register
+//
+// Semantics:
+// - Can only appear in an 'apply' pattern.
+// - If both old/new are operands of matched instructions,
+// "canReplaceReg" is checked before applying the rule.
+def GIReplaceReg : GIBuiltinInst;
- case GIR_CustomRenderer: {
- uint64_t InsnID = readULEB();
- uint64_t OldInsnID = readULEB();
- uint16_t RendererFnID = readU16();
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_CustomRenderer(OutMIs["
- << InsnID << "], MIs[" << OldInsnID << "], "
- << RendererFnID << ")\n");
- (Exec.*ExecInfo.CustomRenderers[RendererFnID])(
- OutMIs[InsnID], *State.MIs[OldInsnID],
- -1); // Not a source operand of the old instruction.
- break;
- }
- case GIR_DoneWithCustomAction: {
- uint16_t FnID = readU16();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_DoneWithCustomAction(FnID="
- << FnID << ")\n");
- assert(FnID > GICXXCustomAction_Invalid && "Expected a valid FnID");
- if (runCustomAction(FnID, State, OutMIs)) {
- propagateFlags();
- return true;
- }
-
- if (handleReject() == RejectAndGiveUp)
- return false;
- break;
- }
- case GIR_CustomOperandRenderer: {
- uint64_t InsnID = readULEB();
- uint64_t OldInsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint16_t RendererFnID = readU16();
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx
- << ": GIR_CustomOperandRenderer(OutMIs[" << InsnID
- << "], MIs[" << OldInsnID << "]->getOperand("
- << OpIdx << "), " << RendererFnID << ")\n");
- (Exec.*ExecInfo.CustomRenderers[RendererFnID])(
- OutMIs[InsnID], *State.MIs[OldInsnID], OpIdx);
- break;
- }
- case GIR_ConstrainOperandRC: {
- uint64_t InsnID = readULEB();
- uint64_t OpIdx = readULEB();
- uint16_t RCEnum = readU16();
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- MachineInstr &I = *OutMIs[InsnID].getInstr();
- MachineFunction &MF = *I.getParent()->getParent();
- MachineRegisterInfo &MRI = MF.getRegInfo();
- const TargetRegisterClass &RC = *TRI.getRegClass(RCEnum);
- MachineOperand &MO = I.getOperand(OpIdx);
- constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, RC, MO);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_ConstrainOperandRC(OutMIs["
- << InsnID << "], " << OpIdx << ", " << RCEnum
- << ")\n");
- break;
- }
+// Apply action that erases the match root.
+//
+// Usage:
+// (apply (GIEraseRoot))
+//
+// Semantics:
+// - Can only appear as the only pattern of an 'apply' pattern list.
+// - The root cannot have any output operands.
+// - The root must be a CodeGenInstruction
+//
+// TODO: Allow using this directly, like (apply GIEraseRoot)
+def GIEraseRoot : GIBuiltinInst;
- case GIR_RootConstrainSelectedInstOperands:
- case GIR_ConstrainSelectedInstOperands: {
- uint64_t InsnID = (MatcherOpcode == GIR_RootConstrainSelectedInstOperands)
- ? 0
- : readULEB();
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
- RBI);
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx
- << ": GIR_ConstrainSelectedInstOperands(OutMIs["
- << InsnID << "])\n");
- break;
- }
- case GIR_MergeMemOperands: {
- uint64_t InsnID = readULEB();
- uint64_t NumInsn = MatchTable[CurrentIdx++];
- assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_MergeMemOperands(OutMIs["
- << InsnID << "]");
- for (unsigned K = 0; K < NumInsn; ++K) {
- uint64_t NextID = readULEB();
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << ", MIs[" << NextID << "]");
- for (const auto &MMO : State.MIs[NextID]->memoperands())
- OutMIs[InsnID].addMemOperand(MMO);
- }
- DEBUG_WITH_TYPE(TgtExecutor::getName(), dbgs() << ")\n");
- break;
- }
- case GIR_EraseFromParent: {
- uint64_t InsnID = readULEB();
- MachineInstr *MI = State.MIs[InsnID];
- assert(MI && "Attempted to erase an undefined instruction");
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_EraseFromParent(MIs["
- << InsnID << "])\n");
- eraseImpl(MI);
- break;
- }
- case GIR_EraseRootFromParent_Done: {
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs()
- << CurrentIdx << ": GIR_EraseRootFromParent_Done\n");
- eraseImpl(State.MIs[0]);
- propagateFlags();
- return true;
- }
- case GIR_MakeTempReg: {
- uint64_t TempRegID = readULEB();
- int TypeID = readS8();
-
- State.TempRegisters[TempRegID] =
- MRI.createGenericVirtualRegister(getTypeFromIdx(TypeID));
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": TempRegs[" << TempRegID
- << "] = GIR_MakeTempReg(" << TypeID << ")\n");
- break;
- }
- case GIR_ReplaceReg: {
- uint64_t OldInsnID = readULEB();
- uint64_t OldOpIdx = readULEB();
- uint64_t NewInsnID = readULEB();
- uint64_t NewOpIdx = readULEB();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_ReplaceReg(MIs["
- << OldInsnID << "][" << OldOpIdx << "] = MIs["
- << NewInsnID << "][" << NewOpIdx << "])\n");
-
- Register Old = State.MIs[OldInsnID]->getOperand(OldOpIdx).getReg();
- Register New = State.MIs[NewInsnID]->getOperand(NewOpIdx).getReg();
- if (Observer)
- Observer->changingAllUsesOfReg(MRI, Old);
- MRI.replaceRegWith(Old, New);
- if (Observer)
- Observer->finishedChangingAllUsesOfReg();
- break;
- }
- case GIR_ReplaceRegWithTempReg: {
- uint64_t OldInsnID = readULEB();
- uint64_t OldOpIdx = readULEB();
- uint64_t TempRegID = readULEB();
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_ReplaceRegWithTempReg(MIs["
- << OldInsnID << "][" << OldOpIdx << "] = TempRegs["
- << TempRegID << "])\n");
-
- Register Old = State.MIs[OldInsnID]->getOperand(OldOpIdx).getReg();
- Register New = State.TempRegisters[TempRegID];
- if (Observer)
- Observer->changingAllUsesOfReg(MRI, Old);
- MRI.replaceRegWith(Old, New);
- if (Observer)
- Observer->finishedChangingAllUsesOfReg();
- break;
- }
- case GIR_Coverage: {
- uint32_t RuleID = readU32();
- assert(CoverageInfo);
- CoverageInfo->setCovered(RuleID);
-
- DEBUG_WITH_TYPE(TgtExecutor::getName(), dbgs() << CurrentIdx
- << ": GIR_Coverage("
- << RuleID << ")");
- break;
- }
+//===----------------------------------------------------------------------===//
+// Pattern MIFlags
+//===----------------------------------------------------------------------===//
- case GIR_Done:
- DEBUG_WITH_TYPE(TgtExecutor::getName(),
- dbgs() << CurrentIdx << ": GIR_Done\n");
- propagateFlags();
- return true;
- default:
- llvm_unreachable("Unexpected command");
- }
- }
+class MIFlagEnum<string enumName> {
+ string EnumName = "MachineInstr::" # enumName;
}
-} // end namespace llvm
+def FmNoNans : MIFlagEnum<"FmNoNans">;
+def FmNoInfs : MIFlagEnum<"FmNoInfs">;
+def FmNsz : MIFlagEnum<"FmNsz">;
+def FmArcp : MIFlagEnum<"FmArcp">;
+def FmContract : MIFlagEnum<"FmContract">;
+def FmAfn : MIFlagEnum<"FmAfn">;
+def FmReassoc : MIFlagEnum<"FmReassoc">;
+def IsExact : MIFlagEnum<"IsExact">;
+def NoSWrap : MIFlagEnum<"NoSWrap">;
+def NoUWrap : MIFlagEnum<"NoUWrap">;
+def NonNeg : MIFlagEnum<"NonNeg">;
+def InBounds : MIFlagEnum<"InBounds">;
+
+def MIFlags;
+// def not; -> Already defined as a SDNode
-#endif // LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTORIMPL_H
\ No newline at end of file
+//===----------------------------------------------------------------------===//
+
+def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
+def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
+def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;
+
+def register_matchinfo: GIDefMatchData<"Register">;
+def int64_matchinfo: GIDefMatchData<"int64_t">;
+def apint_matchinfo : GIDefMatchData<"APInt">;
+def constantfp_matchinfo : GIDefMatchData<"ConstantFP*">;
+def build_fn_matchinfo :
+GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
+def unsigned_matchinfo: GIDefMatchData<"unsigned">;
+def register_vector_matchinfo : GIDefMatchData<"SmallVector<Register>">;
+def mi_vector_matchinfo : GIDefMatchData<"SmallVector<MachineInstr *>">;
+
+def copy_prop : GICombineRule<
+ (defs root:$d),
+ (match (COPY $d, $s):$mi,
+ [{ return Helper.matchCombineCopy(*${mi}); }]),
+ (apply [{ Helper.applyCombineCopy(*${mi}); }])>;
+
+// idempotent operations
+// Fold (freeze (freeze x)) -> (freeze x).
+// Fold (fabs (fabs x)) -> (fabs x).
+// Fold (fcanonicalize (fcanonicalize x)) -> (fcanonicalize x).
+def idempotent_prop_frags : GICombinePatFrag<
+ (outs root:$dst, $src), (ins),
+ !foreach(op, [G_FREEZE, G_FABS, G_FCANONICALIZE],
+ (pattern (op $dst, $src), (op $src, $x)))>;
+
+def idempotent_prop : GICombineRule<
+ (defs root:$dst),
+ (match (idempotent_prop_frags $dst, $src)),
+ (apply (GIReplaceReg $dst, $src))>;
+
+// Convert freeze(Op(Op0, NonPoisonOps...)) to Op(freeze(Op0), NonPoisonOps...)
+// when Op0 is not guaranteed non-poison
+def push_freeze_to_prevent_poison_from_propagating : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_FREEZE $dst, $src):$root,
+ [{ return !isGuaranteedNotToBePoison(${src}.getReg(), MRI) && Helper.matchFreezeOfSingleMaybePoisonOperand(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def extending_loads : GICombineRule<
+ (defs root:$root, extending_load_matchdata:$matchinfo),
+ (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root,
+ [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>;
+
+def load_and_mask : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>;
+
+def sext_trunc_sextload : GICombineRule<
+ (defs root:$d),
+ (match (wip_match_opcode G_SEXT_INREG):$d,
+ [{ return Helper.matchSextTruncSextLoad(*${d}); }]),
+ (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>;
+
+def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">;
+def sext_inreg_of_load : GICombineRule<
+ (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo),
+ (match (wip_match_opcode G_SEXT_INREG):$root,
+ [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>;
+
+def sext_inreg_to_zext_inreg : GICombineRule<
+ (defs root:$dst),
+ (match
+ (G_SEXT_INREG $dst, $src, $imm):$root,
+ [{
+ unsigned BitWidth = MRI.getType(${src}.getReg()).getScalarSizeInBits();
+ return Helper.getValueTracking()->maskedValueIsZero(${src}.getReg(),
+ APInt::getOneBitSet(BitWidth, ${imm}.getImm() - 1)); }]),
+ (apply [{
+ Helper.getBuilder().setInstrAndDebugLoc(*${root});
+ Helper.getBuilder().buildZExtInReg(${dst}, ${src}, ${imm}.getImm());
+ ${root}->eraseFromParent();
+ }])
+>;
+
+def combine_extracted_vector_load : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
+ [{ return Helper.matchCombineExtractedVectorLoad(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def combine_indexed_load_store : GICombineRule<
+ (defs root:$root, indexed_load_store_matchdata:$matchinfo),
+ (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root,
+ [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>;
+
+def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">;
+def opt_brcond_by_inverting_cond : GICombineRule<
+ (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo),
+ (match (wip_match_opcode G_BR):$root,
+ [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>;
+
+def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">;
+def ptr_add_immed_chain : GICombineRule<
+ (defs root:$d, ptr_add_immed_matchdata:$matchinfo),
+ (match (wip_match_opcode G_PTR_ADD):$d,
+ [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>;
+
+def shift_const_op : GICombinePatFrag<
+ (outs root:$dst), (ins),
+ !foreach(op,
+ [G_SHL, G_ASHR, G_LSHR],
+ (pattern (op $dst, $shifted, $amt)))>;
+def shift_result_matchdata : GIDefMatchData<"std::optional<int64_t>">;
+def shifts_too_big : GICombineRule<
+ (defs root:$root, shift_result_matchdata:$matchinfo),
+ (match (shift_const_op $root):$mi,
+ [{ return Helper.matchShiftsTooBig(*${mi}, ${matchinfo}); }]),
+ (apply [{
+ if (${matchinfo}) {
+ Helper.replaceInstWithConstant(*${mi}, *${matchinfo});
+ } else {
+ Helper.replaceInstWithUndef(*${mi});
+ }
+ }])>;
+
+// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same
+def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">;
+def shift_immed_chain : GICombineRule<
+ (defs root:$d, shift_immed_matchdata:$matchinfo),
+ (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d,
+ [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>;
+
+// Transform shift (logic (shift X, C0), Y), C1
+// -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same
+def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">;
+def shift_of_shifted_logic_chain : GICombineRule<
+ (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo),
+ (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d,
+ [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>;
+
+def mul_to_shl : GICombineRule<
+ (defs root:$d, unsigned_matchinfo:$matchinfo),
+ (match (G_MUL $d, $op1, $op2):$mi,
+ [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>;
+
+// (sub x, C) -> (add x, -C)
+def sub_to_add : GICombineRule<
+ (defs root:$d, build_fn_matchinfo:$matchinfo),
+ (match (G_CONSTANT $c, $imm),
+ (G_SUB $d, $op1, $c):$mi,
+ [{ return Helper.matchCombineSubToAdd(*${mi}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${mi}, ${matchinfo}); }])>;
+
+// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int
+def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">;
+def reduce_shl_of_extend : GICombineRule<
+ (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo),
+ (match (G_SHL $dst, $src0, $src1):$mi,
+ [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;
+
+// Combine bitreverse(shl (bitreverse x), y)) -> (lshr x, y)
+def bitreverse_shl : GICombineRule<
+ (defs root:$d),
+ (match (G_BITREVERSE $rev, $val),
+ (G_SHL $src, $rev, $amt):$mi,
+ (G_BITREVERSE $d, $src),
+ [{ return Helper.isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR,
+ {MRI.getType(${val}.getReg()),
+ MRI.getType(${amt}.getReg())}}); }]),
+ (apply (G_LSHR $d, $val, $amt))>;
+
+// Combine bitreverse(lshr (bitreverse x), y)) -> (shl x, y)
+def bitreverse_lshr : GICombineRule<
+ (defs root:$d),
+ (match (G_BITREVERSE $rev, $val),
+ (G_LSHR $src, $rev, $amt):$mi,
+ (G_BITREVERSE $d, $src),
+ [{ return Helper.isLegalOrBeforeLegalizer({TargetOpcode::G_SHL,
+ {MRI.getType(${val}.getReg()),
+ MRI.getType(${amt}.getReg())}}); }]),
+ (apply (G_SHL $d, $val, $amt))>;
+
+// Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
+// Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
+def commute_shift : GICombineRule<
+ (defs root:$d, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_SHL):$d,
+ [{ return Helper.matchCommuteShift(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${d}, ${matchinfo}); }])>;
+
+// Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (lshr x, (C1 + C2))
+def lshr_of_trunc_of_lshr_matchdata : GIDefMatchData<"LshrOfTruncOfLshr">;
+def lshr_of_trunc_of_lshr : GICombineRule<
+ (defs root:$root, lshr_of_trunc_of_lshr_matchdata:$matchinfo),
+ (match (G_LSHR $d1, $x, $y):$Shift,
+ (G_TRUNC $d2, $d1),
+ (G_LSHR $dst, $d2, $z):$root,
+ [{ return Helper.matchLshrOfTruncOfLshr(*${root}, ${matchinfo}, *${Shift}); }]),
+ (apply [{ Helper.applyLshrOfTruncOfLshr(*${root}, ${matchinfo}); }])>;
+
+def narrow_binop_feeding_and : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
+
+// [us]itofp(undef) = 0, because the result value is bounded.
+def undef_to_fp_zero : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_UITOFP, G_SITOFP):$root,
+ [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
+ (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>;
+
+def undef_to_int_zero: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_AND, G_MUL):$root,
+ [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
+
+def undef_to_negative_one: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_OR):$root,
+ [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>;
+
+def binop_left_undef_to_zero: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SHL, G_UDIV, G_UREM):$root,
+ [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
+
+def binop_right_undef_to_undef: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
+ [{ return Helper.matchOperandIsUndef(*${root}, 2); }]),
+ (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+
+def unary_undef_to_zero: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_ABS):$root,
+ [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
+
+def unary_undef_to_undef_frags : GICombinePatFrag<
+ (outs root:$dst), (ins),
+ !foreach(op,
+ [G_TRUNC, G_BITCAST, G_ANYEXT, G_PTRTOINT, G_INTTOPTR, G_FPTOSI,
+ G_FPTOUI],
+ (pattern (op $dst, $x), (G_IMPLICIT_DEF $x)))>;
+def unary_undef_to_undef : GICombineRule<
+ (defs root:$dst),
+ (match (unary_undef_to_undef_frags $dst)),
+ (apply [{ Helper.replaceInstWithUndef(*${dst}.getParent()); }])>;
+
+// Instructions where if any source operand is undef, the instruction can be
+// replaced with undef.
+def propagate_undef_any_op: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_ADD, G_SUB, G_XOR):$root,
+ [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
+ (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+
+// Instructions where if all source operands are undef, the instruction can be
+// replaced with undef.
+def propagate_undef_all_ops: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SHUFFLE_VECTOR, G_BUILD_VECTOR):$root,
+ [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
+ (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+
+// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
+def propagate_undef_shuffle_mask: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
+ [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]),
+ (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+
+ // Replace an insert/extract element of an out of bounds index with undef.
+ def insert_extract_vec_elt_out_of_bounds : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT):$root,
+ [{ return Helper.matchInsertExtractVecEltOutOfBounds(*${root}); }]),
+ (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+
+// Fold (cond ? x : x) -> x
+def select_same_val: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SELECT):$root,
+ [{ return Helper.matchSelectSameVal(*${root}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
+>;
+
+// Fold (undef ? x : y) -> y
+def select_undef_cmp: GICombineRule<
+ (defs root:$dst),
+ (match (G_IMPLICIT_DEF $undef),
+ (G_SELECT $dst, $undef, $x, $y)),
+ (apply (GIReplaceReg $dst, $y))
+>;
+
+// Fold (true ? x : y) -> x
+// Fold (false ? x : y) -> y
+def select_constant_cmp: GICombineRule<
+ (defs root:$root, unsigned_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_SELECT):$root,
+ [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }])
+>;
+
+// Fold (C op x) -> (x op C)
+// TODO: handle more isCommutable opcodes
+// TODO: handle compares (currently not marked as isCommutable)
+def commute_int_constant_to_rhs : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_ADD, G_MUL, G_AND, G_OR, G_XOR,
+ G_SMIN, G_SMAX, G_UMIN, G_UMAX, G_UADDO, G_SADDO,
+ G_UMULO, G_SMULO, G_UMULH, G_SMULH,
+ G_UADDSAT, G_SADDSAT, G_SMULFIX, G_UMULFIX,
+ G_SMULFIXSAT, G_UMULFIXSAT):$root,
+ [{ return Helper.matchCommuteConstantToRHS(*${root}); }]),
+ (apply [{ Helper.applyCommuteBinOpOperands(*${root}); }])
+>;
+
+def commute_fp_constant_to_rhs : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_FADD, G_FMUL, G_FMINNUM, G_FMAXNUM,
+ G_FMINNUM_IEEE, G_FMAXNUM_IEEE,
+ G_FMINIMUM, G_FMAXIMUM):$root,
+ [{ return Helper.matchCommuteFPConstantToRHS(*${root}); }]),
+ (apply [{ Helper.applyCommuteBinOpOperands(*${root}); }])
+>;
+
+def commute_constant_to_rhs : GICombineGroup<[
+ commute_int_constant_to_rhs,
+ commute_fp_constant_to_rhs
+]>;
+
+// Fold x op 0 -> x
+def right_identity_zero_frags : GICombinePatFrag<
+ (outs root:$dst), (ins $x),
+ !foreach(op,
+ [G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR,
+ G_LSHR, G_PTR_ADD, G_ROTL, G_ROTR],
+ (pattern (op $dst, $x, 0)))>;
+def right_identity_zero: GICombineRule<
+ (defs root:$dst),
+ (match (right_identity_zero_frags $dst, $lhs)),
+ (apply (GIReplaceReg $dst, $lhs))
+>;
+
+def right_identity_neg_zero_fp: GICombineRule<
+ (defs root:$dst),
+ (match (G_FADD $dst, $x, $y):$root,
+ [{ return Helper.matchConstantFPOp(${y}, -0.0); }]),
+ (apply (GIReplaceReg $dst, $x))
+>;
+
+def right_identity_neg_zero_fp_nsz: GICombineRule<
+ (defs root:$dst),
+ (match (G_FADD $dst, $x, $y, (MIFlags FmNsz)):$root,
+ [{ return Helper.matchConstantFPOp(${y}, 0.0); }]),
+ (apply (GIReplaceReg $dst, $x))
+>;
+
+// Fold x op 1 -> x
+def right_identity_one_int: GICombineRule<
+ (defs root:$dst),
+ (match (G_MUL $dst, $x, 1)),
+ (apply (GIReplaceReg $dst, $x))
+>;
+
+def right_identity_one_fp: GICombineRule<
+ (defs root:$dst),
+ (match (G_FMUL $dst, $x, $y):$root,
+ [{ return Helper.matchConstantFPOp(${y}, 1.0); }]),
+ (apply (GIReplaceReg $dst, $x))
+>;
+
+def right_identity_neg_one_fp: GICombineRule<
+ (defs root:$dst),
+ (match (G_FMUL $dst, $x, $y):$root,
+ [{ return Helper.matchConstantFPOp(${y}, -1.0); }]),
+ (apply (G_FNEG $dst, $x))
+>;
+
+def right_identity_one : GICombineGroup<[right_identity_one_int, right_identity_one_fp]>;
+
+// Fold (x op x) - > x
+def binop_same_val_frags : GICombinePatFrag<
+ (outs root:$dst), (ins $x),
+ [
+ (pattern (G_AND $dst, $x, $x)),
+ (pattern (G_OR $dst, $x, $x)),
+ ]
+>;
+def binop_same_val: GICombineRule<
+ (defs root:$dst),
+ (match (binop_same_val_frags $dst, $src)),
+ (apply (GIReplaceReg $dst, $src))
+>;
+
+// Fold (0 op x) - > 0
+def binop_left_to_zero_frags : GICombinePatFrag<
+ (outs root:$dst, $zero), (ins $rhs),
+ !foreach(op,
+ [G_SHL, G_LSHR, G_ASHR, G_SDIV, G_UDIV, G_SREM, G_UREM, G_MUL],
+ (pattern (G_CONSTANT $zero, 0), (op $dst, $zero, $rhs)))>;
+
+def binop_left_to_zero: GICombineRule<
+ (defs root:$dst),
+ (match (binop_left_to_zero_frags $dst, $zero, $rhs)),
+ (apply (GIReplaceReg $dst, $zero))
+>;
+
+def urem_pow2_to_mask : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_UREM):$root,
+ [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]),
+ (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
+>;
+
+// Push a binary operator through a select on constants.
+//
+// binop (select cond, K0, K1), K2 ->
+// select cond, (binop K0, K2), (binop K1, K2)
+
+// Every binary operator that has constant folding. We currently do
+// not have constant folding for G_FPOW, G_FMAXNUM_IEEE or
+// G_FMINNUM_IEEE.
+def fold_binop_into_select : GICombineRule<
+ (defs root:$root, unsigned_matchinfo:$select_op_no),
+ (match (wip_match_opcode
+ G_ADD, G_SUB, G_PTR_ADD, G_AND, G_OR, G_XOR,
+ G_SDIV, G_SREM, G_UDIV, G_UREM, G_LSHR, G_ASHR, G_SHL,
+ G_SMIN, G_SMAX, G_UMIN, G_UMAX,
+ G_FMUL, G_FADD, G_FSUB, G_FDIV, G_FREM,
+ G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
+ [{ return Helper.matchFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]),
+ (apply [{ Helper.applyFoldBinOpIntoSelect(*${root}, ${select_op_no}); }])
+>;
+
+// Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
+def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
+def div_rem_to_divrem : GICombineRule<
+ (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo),
+ (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
+ [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }])
+>;
+
+// Fold (x op 0) - > 0
+def binop_right_to_zero: GICombineRule<
+ (defs root:$dst),
+ (match (G_MUL $dst, $lhs, 0:$zero)),
+ (apply (GIReplaceReg $dst, $zero))
+>;
+
+// Erase stores of undef values.
+def erase_undef_store : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_STORE):$root,
+ [{ return Helper.matchUndefStore(*${root}); }]),
+ (apply [{ Helper.eraseInst(*${root}); }])
+>;
+
+def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
+def simplify_add_to_sub: GICombineRule <
+ (defs root:$root, simplify_add_to_sub_matchinfo:$info),
+ (match (wip_match_opcode G_ADD):$root,
+ [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]),
+ (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}])
+>;
+
+// Fold fp_op(cst) to the constant result of the floating point operation.
+class constant_fold_unary_fp_op_rule<Instruction opcode> : GICombineRule <
+ (defs root:$dst),
+ (match (opcode $dst, $src0):$root, (G_FCONSTANT $src0, $cst)),
+ (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${cst}.getFPImm()); }])
+>;
+
+def constant_fold_fneg : constant_fold_unary_fp_op_rule<G_FNEG>;
+def constant_fold_fabs : constant_fold_unary_fp_op_rule<G_FABS>;
+def constant_fold_fsqrt : constant_fold_unary_fp_op_rule<G_FSQRT>;
+def constant_fold_flog2 : constant_fold_unary_fp_op_rule<G_FLOG2>;
+def constant_fold_fptrunc : constant_fold_unary_fp_op_rule<G_FPTRUNC>;
+def constant_fold_fpext : constant_fold_unary_fp_op_rule<G_FPEXT>;
+def constant_fold_fceil : constant_fold_unary_fp_op_rule<G_FCEIL>;
+def constant_fold_ffloor : constant_fold_unary_fp_op_rule<G_FFLOOR>;
+def constant_fold_intrinsic_trunc : constant_fold_unary_fp_op_rule<G_INTRINSIC_TRUNC>;
+def constant_fold_intrinsic_round : constant_fold_unary_fp_op_rule<G_INTRINSIC_ROUND>;
+def constant_fold_intrinsic_roundeven : constant_fold_unary_fp_op_rule<G_INTRINSIC_ROUNDEVEN>;
+def constant_fold_frint : constant_fold_unary_fp_op_rule<G_FRINT>;
+def constant_fold_fnearbyint : constant_fold_unary_fp_op_rule<G_FNEARBYINT>;
+
+// Fold constant zero int to fp conversions.
+class itof_const_zero_fold_rule<Instruction opcode> : GICombineRule <
+ (defs root:$dst),
+ (match (opcode $dst, 0)),
+ // Can't use COPY $dst, 0 here because the 0 operand may be a smaller type
+ // than the destination for itofp.
+ (apply [{ Helper.replaceInstWithFConstant(*${dst}.getParent(), 0.0); }])
+>;
+def itof_const_zero_fold_si : itof_const_zero_fold_rule<G_SITOFP>;
+def itof_const_zero_fold_ui : itof_const_zero_fold_rule<G_UITOFP>;
+
+def constant_fold_fp_ops : GICombineGroup<[
+ constant_fold_fneg,
+ constant_fold_fabs,
+ constant_fold_fsqrt,
+ constant_fold_flog2,
+ constant_fold_fptrunc,
+ constant_fold_fpext,
+ constant_fold_fceil,
+ constant_fold_ffloor,
+ constant_fold_intrinsic_trunc,
+ constant_fold_intrinsic_round,
+ constant_fold_intrinsic_roundeven,
+ constant_fold_frint,
+ constant_fold_fnearbyint,
+ itof_const_zero_fold_si,
+ itof_const_zero_fold_ui
+]>;
+
+// Fold int2ptr(ptr2int(x)) -> x
+def p2i_to_i2p: GICombineRule<
+ (defs root:$root, register_matchinfo:$info),
+ (match (wip_match_opcode G_INTTOPTR):$root,
+ [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]),
+ (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }])
+>;
+
+// Fold ptr2int(int2ptr(x)) -> x
+def i2p_to_p2i: GICombineRule<
+ (defs root:$dst, register_matchinfo:$info),
+ (match (G_INTTOPTR $t, $ptr),
+ (G_PTRTOINT $dst, $t):$mi,
+ [{ ${info} = ${ptr}.getReg(); return true; }]),
+ (apply [{ Helper.applyCombineP2IToI2P(*${mi}, ${info}); }])
+>;
+
+// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y
+def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
+def add_p2i_to_ptradd : GICombineRule<
+ (defs root:$root, add_p2i_to_ptradd_matchinfo:$info),
+ (match (wip_match_opcode G_ADD):$root,
+ [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]),
+ (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }])
+>;
+
+// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
+def const_ptradd_to_i2p: GICombineRule<
+ (defs root:$root, apint_matchinfo:$info),
+ (match (wip_match_opcode G_PTR_ADD):$root,
+ [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]),
+ (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }])
+>;
+
+// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
+def hoist_logic_op_with_same_opcode_hands: GICombineRule <
+ (defs root:$root, instruction_steps_matchdata:$info),
+ (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
+ [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}])
+>;
+
+// Fold ashr (shl x, C), C -> sext_inreg (C)
+def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">;
+def shl_ashr_to_sext_inreg : GICombineRule<
+ (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info),
+ (match (wip_match_opcode G_ASHR): $root,
+ [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]),
+ (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}])
+>;
+
+// Fold sub 0, (and x, 1) -> sext_inreg x, 1
+def neg_and_one_to_sext_inreg : GICombineRule<
+ (defs root:$dst),
+ (match (G_AND $and, $x, 1),
+ (G_SUB $dst, 0, $and),
+ [{ return MRI.hasOneNonDBGUse(${and}.getReg()) &&
+ Helper.isLegalOrBeforeLegalizer(
+ {TargetOpcode::G_SEXT_INREG, {MRI.getType(${x}.getReg())}}); }]),
+ (apply (G_SEXT_INREG $dst, $x, 1))
+>;
+
+// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
+def overlapping_and: GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
+>;
+
+// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y.
+def redundant_and: GICombineRule <
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
+>;
+
+// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
+def redundant_or: GICombineRule <
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_OR):$root,
+ [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
+>;
+
+// If the input is already sign extended, just drop the extension.
+// sext_inreg x, K ->
+// if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
+def redundant_sext_inreg: GICombineRule <
+ (defs root:$root),
+ (match (wip_match_opcode G_SEXT_INREG):$root,
+ [{ return Helper.matchRedundantSExtInReg(*${root}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
+>;
+
+// Fold (anyext (trunc x)) -> x if the source type is same as
+// the destination type.
+def anyext_trunc_fold: GICombineRule <
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_ANYEXT):$root,
+ [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
+>;
+
+// Fold (zext (trunc x)) -> x if the source type is same as the destination type
+// and truncated bits are known to be zero.
+def zext_trunc_fold: GICombineRule <
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_ZEXT):$root,
+ [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
+>;
+
+def not_cmp_fold : GICombineRule<
+ (defs root:$d, register_vector_matchinfo:$info),
+ (match (wip_match_opcode G_XOR): $d,
+ [{ return Helper.matchNotCmp(*${d}, ${info}); }]),
+ (apply [{ Helper.applyNotCmp(*${d}, ${info}); }])
+>;
+
+// Fold (fneg (fneg x)) -> x.
+def fneg_fneg_fold: GICombineRule <
+ (defs root:$dst),
+ (match (G_FNEG $t, $src),
+ (G_FNEG $dst, $t)),
+ (apply (GIReplaceReg $dst, $src))
+>;
+
+// Fold (unmerge(merge x, y, z)) -> z, y, z.
+def unmerge_merge : GICombineRule<
+ (defs root:$d, register_vector_matchinfo:$info),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $d,
+ [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]),
+ (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }])
+>;
+
+// Fold merge(unmerge).
+def merge_unmerge : GICombineRule<
+ (defs root:$dst, register_matchinfo:$src),
+ (match (G_MERGE_VALUES $dst, GIVariadic<1>:$merge_srcs):$merge,
+ [{
+ // Check if first source comes from G_UNMERGE_VALUES.
+ Register FirstMergeSrc = ${merge_srcs}[0].getReg();
+ MachineInstr *UnmergeMI = MRI.getVRegDef(FirstMergeSrc);
+ if (!UnmergeMI || UnmergeMI->getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
+ return false;
+
+ // Check counts match.
+ unsigned NumMergeSrcs = ${merge_srcs}.size();
+ unsigned NumUnmergeDefs = UnmergeMI->getNumDefs();
+ if (NumMergeSrcs != NumUnmergeDefs)
+ return false;
+
+ // Verify all merge sources match unmerge defs in order.
+ for (unsigned I = 0; I < NumMergeSrcs; ++I) {
+ Register MergeSrc = ${merge_srcs}[I].getReg();
+ Register UnmergeDef = UnmergeMI->getOperand(I).getReg();
+
+ if (MergeSrc != UnmergeDef)
+ return false;
+
+ if (!MRI.hasOneNonDBGUse(MergeSrc))
+ return false;
+ }
+
+ // Check size compatibility.
+ ${src} = UnmergeMI->getOperand(NumUnmergeDefs).getReg();
+ LLT SrcTy = MRI.getType(${src});
+ LLT DstTy = MRI.getType(${dst}.getReg());
+ if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
+ return false;
+
+ // Check bitcast legality.
+ if (SrcTy != DstTy) {
+ std::array<LLT, 2> Types = {{DstTy, SrcTy}};
+ LegalityQuery Query(TargetOpcode::G_BITCAST, Types);
+ if (!Helper.isLegalOrBeforeLegalizer(Query))
+ return false;
+ }
+
+ return true;
+ }]),
+ (apply [{
+ LLT SrcTy = MRI.getType(${src});
+ LLT DstTy = MRI.getType(${dst}.getReg());
+
+ Helper.getBuilder().setInstrAndDebugLoc(*${merge});
+
+ if (SrcTy == DstTy) {
+ Helper.replaceRegWith(MRI, ${dst}.getReg(), ${src});
+ } else {
+ Helper.getBuilder().buildBitcast(${dst}.getReg(), ${src});
+ }
+
+ ${merge}->eraseFromParent();
+ }])
+>;
+
+// Fold (fabs (fneg x)) -> (fabs x).
+def fabs_fneg_fold: GICombineRule <
+ (defs root:$dst),
+ (match (G_FNEG $tmp, $x),
+ (G_FABS $dst, $tmp)),
+ (apply (G_FABS $dst, $x))>;
+
+// Fold (unmerge cst) -> cst1, cst2, ...
+def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">;
+def unmerge_cst : GICombineRule<
+ (defs root:$d, unmerge_cst_matchinfo:$info),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $d,
+ [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]),
+ (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
+>;
+
+// Fold (unmerge undef) -> undef, undef, ...
+def unmerge_undef : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $root,
+ [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
+>;
+
+// Transform x,y<dead> = unmerge z -> x = trunc z.
+def unmerge_dead_to_trunc : GICombineRule<
+ (defs root:$d),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $d,
+ [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]),
+ (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }])
+>;
+
+// Transform unmerge any build vector -> build vector anyext
+def unmerge_anyext_build_vector : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $root,
+ [{ return Helper.matchUnmergeValuesAnyExtBuildVector(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
+>;
+
+// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0.
+def unmerge_zext_to_zext : GICombineRule<
+ (defs root:$d),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $d,
+ [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]),
+ (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }])
+>;
+
+/// Transform merge_x_undef -> anyext.
+def merge_of_x_and_undef : GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_IMPLICIT_DEF $undef),
+ (G_MERGE_VALUES $root, $x, $undef):$MI,
+ [{ return Helper.matchMergeXAndUndef(*${MI}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${MI}, ${matchinfo}); }])>;
+
+/// Transform merge_x_zero -> zext.
+def merge_of_x_and_zero : GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_CONSTANT $zero, 0),
+ (G_MERGE_VALUES $root, $x, $zero):$MI,
+ [{ return Helper.matchMergeXAndZero(*${MI}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${MI}, ${matchinfo}); }])>;
+
+// Transform build_vector(unmerge(src, 0), ... unmerge(src, n), undef, ..., undef)
+// => concat_vectors(src, undef)
+def combine_build_unmerge : GICombineRule<
+ (defs root:$root, register_matchinfo:$unmergeSrc),
+ (match (G_BUILD_VECTOR $dst, GIVariadic<>:$unused):$root,
+ [{ return Helper.matchCombineBuildUnmerge(*${root}, MRI, ${unmergeSrc}); }]),
+ (apply [{ Helper.applyCombineBuildUnmerge(*${root}, MRI, B, ${unmergeSrc}); }])
+>;
+
+def merge_combines: GICombineGroup<[
+ unmerge_anyext_build_vector,
+ unmerge_merge,
+ merge_unmerge,
+ unmerge_cst,
+ unmerge_undef,
+ unmerge_dead_to_trunc,
+ unmerge_zext_to_zext,
+ merge_of_x_and_undef,
+ merge_of_x_and_zero,
+ combine_build_unmerge
+]>;
+
+// Under certain conditions, transform:
+// trunc (shl x, K) -> shl (trunc x), K//
+// trunc ([al]shr x, K) -> (trunc ([al]shr (trunc x), K))
+def trunc_shift_matchinfo : GIDefMatchData<"std::pair<MachineInstr*, LLT>">;
+def trunc_shift: GICombineRule <
+ (defs root:$root, trunc_shift_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_TRUNC):$root,
+ [{ return Helper.matchCombineTruncOfShift(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineTruncOfShift(*${root}, ${matchinfo}); }])
+>;
+
+// Transform (mul x, -1) -> (sub 0, x)
+def mul_by_neg_one: GICombineRule <
+ (defs root:$dst),
+ (match (G_MUL $dst, $x, -1)),
+ (apply (G_SUB $dst, 0, $x))
+>;
+
+// Fold (xor (and x, y), y) -> (and (not x), y)
+def xor_of_and_with_same_reg_matchinfo :
+ GIDefMatchData<"std::pair<Register, Register>">;
+def xor_of_and_with_same_reg: GICombineRule <
+ (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_XOR):$root,
+ [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
+>;
+
+// Transform (ptr_add 0, x) -> (int_to_ptr x)
+def ptr_add_with_zero: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_PTR_ADD):$root,
+ [{ return Helper.matchPtrAddZero(*${root}); }]),
+ (apply [{ Helper.applyPtrAddZero(*${root}); }])>;
+
+def combine_insert_vec_elts_build_vector : GICombineRule<
+ (defs root:$root, register_vector_matchinfo:$info),
+ (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
+ [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]),
+ (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>;
+
+def load_or_combine : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_OR):$root,
+ [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">;
+def extend_through_phis : GICombineRule<
+ (defs root:$root, extend_through_phis_matchdata:$matchinfo),
+ (match (wip_match_opcode G_PHI):$root,
+ [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>;
+
+// Currently only the one combine above.
+def insert_vec_elt_combines : GICombineGroup<
+ [combine_insert_vec_elts_build_vector]>;
+
+def extract_vec_elt_build_vec : GICombineRule<
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
+ [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>;
+
+// Fold away full elt extracts from a build_vector.
+def extract_all_elts_from_build_vector_matchinfo :
+ GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">;
+def extract_all_elts_from_build_vector : GICombineRule<
+ (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_BUILD_VECTOR):$root,
+ [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>;
+
+def extract_vec_elt_combines : GICombineGroup<[
+ extract_vec_elt_build_vec,
+ extract_all_elts_from_build_vector]>;
+
+def funnel_shift_from_or_shift : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_OR):$root,
+ [{ return Helper.matchOrShiftToFunnelShift(*${root}, false, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
+>;
+
+def funnel_shift_from_or_shift_constants_are_legal : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_OR):$root,
+ [{ return Helper.matchOrShiftToFunnelShift(*${root}, true, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
+>;
+
+
+def funnel_shift_to_rotate : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_FSHL, G_FSHR):$root,
+ [{ return Helper.matchFunnelShiftToRotate(*${root}); }]),
+ (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }])
+>;
+
+// Fold fshr x, y, 0 -> y
+def funnel_shift_right_zero: GICombineRule<
+ (defs root:$root),
+ (match (G_FSHR $x, $y, $z, 0):$root),
+ (apply (COPY $x, $z))
+>;
+
+// Fold fshl x, y, 0 -> x
+def funnel_shift_left_zero: GICombineRule<
+ (defs root:$root),
+ (match (G_FSHL $x, $y, $z, 0):$root),
+ (apply (COPY $x, $y))
+>;
+
+// Fold fsh(l/r) x, y, C -> fsh(l/r) x, y, C % bw
+def funnel_shift_overshift: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_FSHL, G_FSHR):$root,
+ [{ return Helper.matchConstantLargerBitWidth(*${root}, 3); }]),
+ (apply [{ Helper.applyFunnelShiftConstantModulo(*${root}); }])
+>;
+
+// Transform: fshl x, z, y | shl x, y -> fshl x, z, y
+// Transform: shl x, y | fshl x, z, y -> fshl x, z, y
+// FIXME: TableGen didn't handle G_OR commutativity on its own,
+// necessitating the use of !foreach to handle it manually.
+def funnel_shift_or_shift_to_funnel_shift_left_frags : GICombinePatFrag<
+ (outs root: $dst, $out1, $out2), (ins),
+ !foreach(inst, [(G_OR $dst, $out1, $out2), (G_OR $dst, $out2, $out1)],
+ (pattern (G_FSHL $out1, $x, $z, $y), (G_SHL $out2, $x, $y), inst))>;
+def funnel_shift_or_shift_to_funnel_shift_left: GICombineRule<
+ (defs root:$root),
+ (match (funnel_shift_or_shift_to_funnel_shift_left_frags $root, $out1, $out2)),
+ (apply (GIReplaceReg $root, $out1))
+>;
+
+// Transform: fshr z, x, y | srl x, y -> fshr z, x, y
+// Transform: srl x, y | fshr z, x, y -> fshr z, x, y
+// FIXME: TableGen didn't handle G_OR commutativity on its own,
+// necessitating the use of !foreach to handle it manually.
+def funnel_shift_or_shift_to_funnel_shift_right_frags : GICombinePatFrag<
+ (outs root: $dst, $out1, $out2), (ins),
+ !foreach(inst, [(G_OR $dst, $out1, $out2), (G_OR $dst, $out2, $out1)],
+ (pattern (G_FSHR $out1, $z, $x, $y), (G_LSHR $out2, $x, $y), inst))>;
+def funnel_shift_or_shift_to_funnel_shift_right: GICombineRule<
+ (defs root:$root),
+ (match (funnel_shift_or_shift_to_funnel_shift_right_frags $root, $out1, $out2)),
+ (apply (GIReplaceReg $root, $out1))
+>;
+
+def rotate_out_of_range : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_ROTR, G_ROTL):$root,
+ [{ return Helper.matchRotateOutOfRange(*${root}); }]),
+ (apply [{ Helper.applyRotateOutOfRange(*${root}); }])
+>;
+
+def icmp_to_true_false_known_bits : GICombineRule<
+ (defs root:$d, int64_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_ICMP):$d,
+ [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
+
+def icmp_to_lhs_known_bits : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_ICMP):$root,
+ [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def redundant_binop_in_equality : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_ICMP):$root,
+ [{ return Helper.matchRedundantBinOpInEquality(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform: (X == 0 & Y == 0) -> (X | Y) == 0
+def double_icmp_zero_and_combine: GICombineRule<
+ (defs root:$root),
+ (match (G_ICMP $d1, $p, $s1, 0),
+ (G_ICMP $d2, $p, $s2, 0),
+ (G_AND $root, $d1, $d2),
+ [{ return ${p}.getPredicate() == CmpInst::ICMP_EQ &&
+ !MRI.getType(${s1}.getReg()).getScalarType().isPointer() &&
+ (MRI.getType(${s1}.getReg()) ==
+ MRI.getType(${s2}.getReg())); }]),
+ (apply (G_OR $ordst, $s1, $s2),
+ (G_ICMP $root, $p, $ordst, 0))
+>;
+
+// Transform: (X != 0 | Y != 0) -> (X | Y) != 0
+def double_icmp_zero_or_combine: GICombineRule<
+ (defs root:$root),
+ (match (G_ICMP $d1, $p, $s1, 0),
+ (G_ICMP $d2, $p, $s2, 0),
+ (G_OR $root, $d1, $d2),
+ [{ return ${p}.getPredicate() == CmpInst::ICMP_NE &&
+ !MRI.getType(${s1}.getReg()).getScalarType().isPointer() &&
+ (MRI.getType(${s1}.getReg()) ==
+ MRI.getType(${s2}.getReg())); }]),
+ (apply (G_OR $ordst, $s1, $s2),
+ (G_ICMP $root, $p, $ordst, 0))
+>;
+
+def and_or_disjoint_mask : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>;
+
+def bitfield_extract_from_and : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (G_CONSTANT $mask, $imm2),
+ (G_CONSTANT $lsb, $imm1),
+ (G_LSHR $shift, $x, $lsb),
+ (G_AND $root, $shift, $mask):$root,
+ [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift,
+ funnel_shift_to_rotate,
+ funnel_shift_right_zero,
+ funnel_shift_left_zero,
+ funnel_shift_overshift,
+ funnel_shift_or_shift_to_funnel_shift_left,
+ funnel_shift_or_shift_to_funnel_shift_right]>;
+
+def bitfield_extract_from_sext_inreg : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_SEXT_INREG):$root,
+ [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def bitfield_extract_from_shr : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_ASHR, G_LSHR):$root,
+ [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def bitfield_extract_from_shr_and : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_ASHR, G_LSHR):$root,
+ [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg,
+ bitfield_extract_from_and,
+ bitfield_extract_from_shr,
+ bitfield_extract_from_shr_and]>;
+
+def udiv_by_const : GICombineRule<
+ (defs root:$root),
+ (match (G_UDIV $dst, $x, $y):$root,
+ [{ return Helper.matchUDivOrURemByConst(*${root}); }]),
+ (apply [{ Helper.applyUDivOrURemByConst(*${root}); }])>;
+
+def sdiv_by_const : GICombineRule<
+ (defs root:$root),
+ (match (G_SDIV $dst, $x, $y):$root,
+ [{ return Helper.matchSDivOrSRemByConst(*${root}); }]),
+ (apply [{ Helper.applySDivOrSRemByConst(*${root}); }])>;
+
+def sdiv_by_pow2 : GICombineRule<
+ (defs root:$root),
+ (match (G_SDIV $dst, $x, $y, (MIFlags (not IsExact))):$root,
+ [{ return Helper.matchDivByPow2(*${root}, /*IsSigned=*/true); }]),
+ (apply [{ Helper.applySDivByPow2(*${root}); }])>;
+
+def udiv_by_pow2 : GICombineRule<
+ (defs root:$root),
+ (match (G_UDIV $dst, $x, $y, (MIFlags (not IsExact))):$root,
+ [{ return Helper.matchDivByPow2(*${root}, /*IsSigned=*/false); }]),
+ (apply [{ Helper.applyUDivByPow2(*${root}); }])>;
+
+def intdiv_combines : GICombineGroup<[udiv_by_pow2, sdiv_by_pow2,
+ udiv_by_const, sdiv_by_const,]>;
+
+def urem_by_const : GICombineRule<
+ (defs root:$root),
+ (match (G_UREM $dst, $x, $y):$root,
+ [{ return Helper.matchUDivOrURemByConst(*${root}); }]),
+ (apply [{ Helper.applyUDivOrURemByConst(*${root}); }])>;
+
+def srem_by_const : GICombineRule<
+ (defs root:$root),
+ (match (G_SREM $dst, $x, $y):$root,
+ [{ return Helper.matchSDivOrSRemByConst(*${root}); }]),
+ (apply [{ Helper.applySDivOrSRemByConst(*${root}); }])>;
+
+def intrem_combines : GICombineGroup<[urem_by_const, srem_by_const]>;
+
+def reassoc_ptradd : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_PTR_ADD):$root,
+ [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
+
+def reassoc_comm_binops : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_ADD $root, $src1, $src2):$root,
+ [{ return Helper.matchReassocCommBinOp(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def reassocs : GICombineGroup<[reassoc_ptradd, reassoc_comm_binops]>;
+
+// Constant fold operations.
+def constant_fold_binop : GICombineRule<
+ (defs root:$d, apint_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_ADD, G_PTR_ADD, G_AND, G_ASHR, G_LSHR, G_MUL, G_OR,
+ G_SHL, G_SUB, G_XOR, G_UDIV, G_SDIV, G_UREM, G_SREM,
+ G_SMIN, G_SMAX, G_UMIN, G_UMAX):$d,
+ [{ return Helper.matchConstantFoldBinOp(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
+
+def constant_fold_fp_binop : GICombineRule<
+ (defs root:$d, constantfp_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV):$d,
+ [{ return Helper.matchConstantFoldFPBinOp(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceInstWithFConstant(*${d}, ${matchinfo}); }])>;
+
+
+def constant_fold_fma : GICombineRule<
+ (defs root:$d, constantfp_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_FMAD, G_FMA):$d,
+ [{ return Helper.matchConstantFoldFMA(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceInstWithFConstant(*${d}, ${matchinfo}); }])>;
+
+def constant_fold_cast_op : GICombineRule<
+ (defs root:$d, apint_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_ZEXT, G_SEXT, G_ANYEXT):$d,
+ [{ return Helper.matchConstantFoldCastOp(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
+
+def mulo_by_2: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_UMULO, G_SMULO):$root,
+ [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
+
+def mulo_by_0: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_UMULO, G_SMULO):$root,
+ [{ return Helper.matchMulOBy0(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+// Transform (uadde x, y, 0) -> (uaddo x, y)
+// (sadde x, y, 0) -> (saddo x, y)
+// (usube x, y, 0) -> (usubo x, y)
+// (ssube x, y, 0) -> (ssubo x, y)
+def adde_to_addo: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_UADDE, G_SADDE, G_USUBE, G_SSUBE):$root,
+ [{ return Helper.matchAddEToAddO(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
+
+def mulh_to_lshr : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_UMULH):$root,
+ [{ return Helper.matchUMulHToLShr(*${root}); }]),
+ (apply [{ Helper.applyUMulHToLShr(*${root}); }])>;
+
+def mulh_combines : GICombineGroup<[mulh_to_lshr]>;
+
+def trunc_ssats : GICombineRule<
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (G_TRUNC $dst, $src):$root,
+ [{ return Helper.matchTruncSSatS(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyTruncSSatS(*${root}, ${matchinfo}); }])>;
+
+def trunc_ssatu : GICombineRule<
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (G_TRUNC $dst, $src):$root,
+ [{ return Helper.matchTruncSSatU(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyTruncSSatU(*${root}, ${matchinfo}); }])>;
+
+def trunc_usatu : GICombineRule<
+ (defs root:$root),
+ (match (G_UMIN $min, $x, $y):$Min,
+ (G_TRUNC $dst, $min):$root,
+ [{ return Helper.matchTruncUSatU(*${root}, *${Min}); }]),
+ (apply (G_TRUNC_USAT_U $dst, $x))>;
+
+def truncusatu_to_fptouisat : GICombineRule<
+ (defs root:$root),
+ (match (G_FPTOUI $src, $x):$Src,
+ (G_TRUNC_USAT_U $dst, $src):$root,
+ [{ return Helper.matchTruncUSatUToFPTOUISat(*${root}, *${Src}); }]),
+ (apply (G_FPTOUI_SAT $dst, $x))
+>;
+
+def truncsat_combines : GICombineGroup<[trunc_ssats, trunc_ssatu, trunc_usatu, truncusatu_to_fptouisat]>;
+
+def redundant_neg_operands: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root,
+ [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
+
+// Transform (fsub +-0.0, X) -> (fneg X)
+def fsub_to_fneg: GICombineRule<
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_FSUB):$root,
+ [{ return Helper.matchFsubToFneg(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyFsubToFneg(*${root}, ${matchinfo}); }])>;
+
+// Transform (fadd x, (fmul y, z)) -> (fma y, z, x)
+// (fadd x, (fmul y, z)) -> (fmad y, z, x)
+// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
+// (fadd (fmul x, y), z) -> (fmad x, y, z)
+def combine_fadd_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FADD):$root,
+ [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
+// -> (fmad (fpext x), (fpext y), z)
+// Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
+// -> (fmad (fpext y), (fpext z), x)
+def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FADD):$root,
+ [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fadd (fma x, y, (fmul z, u)), v) -> (fma x, y, (fma z, u, v))
+// (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v))
+// Transform (fadd v, (fma x, y, (fmul z, u))) -> (fma x, y, (fma z, u, v))
+// (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v))
+def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FADD):$root,
+ [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) ->
+// (fma x, y, (fma (fpext u), (fpext v), z))
+def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FADD):$root,
+ [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
+ *${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
+// -> (fmad x, y, -z)
+def combine_fsub_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FSUB):$root,
+ [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
+// (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
+def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FSUB):$root,
+ [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fsub (fpext (fmul x, y)), z) ->
+// (fma (fpext x), (fpext y), (fneg z))
+def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FSUB):$root,
+ [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fsub (fneg (fpext (fmul x, y))), z) ->
+// (fneg (fma (fpext x), (fpext y), z))
+def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FSUB):$root,
+ [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA(
+ *${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def combine_minmax_nan: GICombineRule<
+ (defs root:$root, unsigned_matchinfo:$info),
+ (match (wip_match_opcode G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
+ [{ return Helper.matchCombineFMinMaxNaN(*${root}, ${info}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${info}); }])>;
+
+// Combine multiple FDIVs with the same divisor into multiple FMULs by the
+// reciprocal.
+def fdiv_repeated_divison: GICombineRule<
+ (defs root:$root, mi_vector_matchinfo:$matchinfo),
+ (match (G_FDIV $dst, $src1, $src2):$root,
+ [{ return Helper.matchRepeatedFPDivisor(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyRepeatedFPDivisor(${matchinfo}); }])>;
+
+// Transform (add x, (sub y, x)) -> y
+// Transform (add (sub y, x), x) -> y
+def add_sub_reg_frags : GICombinePatFrag<
+ (outs root:$dst), (ins $src),
+ [
+ (pattern (G_ADD $dst, $x, $tmp), (G_SUB $tmp, $src, $x)),
+ (pattern (G_ADD $dst, $tmp, $x), (G_SUB $tmp, $src, $x))
+ ]>;
+def add_sub_reg: GICombineRule <
+ (defs root:$dst),
+ (match (add_sub_reg_frags $dst, $src)),
+ (apply (GIReplaceReg $dst, $src))>;
+
+def buildvector_identity_fold : GICombineRule<
+ (defs root:$build_vector, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_BUILD_VECTOR_TRUNC, G_BUILD_VECTOR):$build_vector,
+ [{ return Helper.matchBuildVectorIdentityFold(*${build_vector}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithReg(*${build_vector}, ${matchinfo}); }])>;
+
+def trunc_buildvector_fold : GICombineRule<
+ (defs root:$op, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_TRUNC):$op,
+ [{ return Helper.matchTruncBuildVectorFold(*${op}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;
+
+def trunc_lshr_buildvector_fold : GICombineRule<
+ (defs root:$op, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_TRUNC):$op,
+ [{ return Helper.matchTruncLshrBuildVectorFold(*${op}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;
+
+// Transform:
+// (x + y) - y -> x
+// (x + y) - x -> y
+// x - (y + x) -> 0 - y
+// x - (x + z) -> 0 - z
+def sub_add_reg: GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_SUB):$root,
+ [{ return Helper.matchSubAddSameReg(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def bitcast_bitcast_fold : GICombineRule<
+ (defs root:$dst),
+ (match (G_BITCAST $dst, $src1):$op, (G_BITCAST $src1, $src0),
+ [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]),
+ (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>;
+
+
+def fptrunc_fpext_fold : GICombineRule<
+ (defs root:$dst),
+ (match (G_FPTRUNC $dst, $src1):$op, (G_FPEXT $src1, $src0),
+ [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]),
+ (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>;
+
+
+def select_to_minmax: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_SELECT):$root,
+ [{ return Helper.matchSimplifySelectToMinMax(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def select_to_iminmax: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (G_ICMP $tst, $tst1, $a, $b),
+ (G_SELECT $root, $tst, $x, $y),
+ [{ return Helper.matchSelectIMinMax(${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${info}); }])>;
+
+def simplify_neg_minmax : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_SUB):$root,
+ [{ return Helper.matchSimplifyNegMinMax(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def match_selects : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_SELECT):$root,
+ [{ return Helper.matchSelect(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def match_ands : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchAnd(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def match_ors : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_OR):$root,
+ [{ return Helper.matchOr(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def match_addos : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_SADDO, G_UADDO):$root,
+ [{ return Helper.matchAddOverflow(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def match_subo_no_overflow : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_SSUBO, G_USUBO):$root,
+ [{ return Helper.matchSuboCarryOut(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def match_extract_of_element_undef_vector: GICombineRule <
+ (defs root:$root),
+ (match (G_IMPLICIT_DEF $vector),
+ (G_EXTRACT_VECTOR_ELT $root, $vector, $idx)),
+ (apply (G_IMPLICIT_DEF $root))
+>;
+
+def match_extract_of_element_undef_index: GICombineRule <
+ (defs root:$root),
+ (match (G_IMPLICIT_DEF $idx),
+ (G_EXTRACT_VECTOR_ELT $root, $vector, $idx)),
+ (apply (G_IMPLICIT_DEF $root))
+>;
+
+def match_extract_of_element : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
+ [{ return Helper.matchExtractVectorElement(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def extract_vector_element_not_const : GICombineRule<
+ (defs root:$root),
+ (match (G_INSERT_VECTOR_ELT $src, $x, $value, $idx),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx)),
+ (apply (GIReplaceReg $root, $value))>;
+
+def extract_vector_element_different_indices : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_INSERT_VECTOR_ELT $src, $x, $value, $idx2),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx1),
+ [{ return Helper.matchExtractVectorElementWithDifferentIndices(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def extract_vector_element_build_vector : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_CONSTANT $idx, $imm),
+ (G_BUILD_VECTOR $src, GIVariadic<>:$unused):$Build,
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx):$Extract,
+ [{ return Helper.matchExtractVectorElementWithBuildVector(*${Extract}, *${Build},
+ ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${Extract}, ${matchinfo}); }])>;
+
+def extract_vector_element_shuffle_vector : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_CONSTANT $idx, $imm),
+ (G_SHUFFLE_VECTOR $src, $src1, $src2, $mask):$Shuffle,
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx):$Extract,
+ [{ return Helper.matchExtractVectorElementWithShuffleVector(*${Extract}, *${Shuffle},
+ ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${Extract}, ${matchinfo}); }])>;
+
+def extract_vector_element_build_vector_trunc2 : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_BUILD_VECTOR_TRUNC $src, $x, $y),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
+ [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def extract_vector_element_build_vector_trunc3 : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
+ [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def extract_vector_element_build_vector_trunc4 : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z, $a),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
+ [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def extract_vector_element_build_vector_trunc5 : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z, $a, $b),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
+ [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def extract_vector_element_build_vector_trunc6 : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z, $a, $b, $c),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
+ [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def extract_vector_element_build_vector_trunc7 : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z, $a, $b, $c, $d),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
+ [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def extract_vector_element_build_vector_trunc8 : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_BUILD_VECTOR_TRUNC $src, $x, $y, $z, $a, $b, $c, $d, $e),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
+ [{ return Helper.matchExtractVectorElementWithBuildVectorTrunc(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def sext_trunc : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_TRUNC $src, $x),
+ (G_SEXT $root, $src),
+ [{ return Helper.matchSextOfTrunc(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def zext_trunc : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_TRUNC $src, $x, (MIFlags NoUWrap)),
+ (G_ZEXT $root, $src),
+ [{ return Helper.matchZextOfTrunc(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def nneg_zext : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_ZEXT $root, $x, (MIFlags NonNeg)),
+ [{ return Helper.matchNonNegZext(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+// Combines concat operations
+def combine_concat_vector : GICombineRule<
+ (defs root:$root, register_vector_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_CONCAT_VECTORS):$root,
+ [{ return Helper.matchCombineConcatVectors(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineConcatVectors(*${root}, ${matchinfo}); }])>;
+
+// Combines Shuffles of Concats
+// a = G_CONCAT_VECTORS x, y, undef, undef
+// b = G_CONCAT_VECTORS z, undef, undef, undef
+// c = G_SHUFFLE_VECTORS a, b, <0, 1, 4, undef>
+// ===>
+// c = G_CONCAT_VECTORS x, y, z, undef
+def combine_shuffle_concat : GICombineRule<
+ (defs root:$root, register_vector_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
+ [{ return Helper.matchCombineShuffleConcat(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineShuffleConcat(*${root}, ${matchinfo}); }])>;
+
+// Combines shuffles of vector into build_vector
+def combine_shuffle_vector_to_build_vector : GICombineRule<
+ (defs root:$root),
+ (match (G_SHUFFLE_VECTOR $dst, $src1, $src2, $mask):$root),
+ (apply [{ Helper.applyCombineShuffleToBuildVector(*${root}); }])>;
+
+def insert_vector_element_idx_undef : GICombineRule<
+ (defs root:$root),
+ (match (G_IMPLICIT_DEF $idx),
+ (G_INSERT_VECTOR_ELT $root, $src, $elt, $idx)),
+ (apply (G_IMPLICIT_DEF $root))>;
+
+def insert_vector_element_elt_undef : GICombineRule<
+ (defs root:$root),
+ (match (G_IMPLICIT_DEF $elt),
+ (G_INSERT_VECTOR_ELT $root, $src, $elt, $idx),
+ [{ return isGuaranteedNotToBePoison(${src}.getReg(), MRI); }]),
+ (apply (GIReplaceReg $root, $src))>;
+
+def insert_vector_element_extract_vector_element : GICombineRule<
+ (defs root:$root),
+ (match (G_EXTRACT_VECTOR_ELT $elt, $src, $idx),
+ (G_INSERT_VECTOR_ELT $root, $src, $elt, $idx)),
+ (apply (GIReplaceReg $root, $src))>;
+
+def insert_vector_elt_oob : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
+ [{ return Helper.matchInsertVectorElementOOB(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+// Combine v8i8 (buildvector i8 (trunc(unmerge)), i8 (trunc), i8 (trunc), i8 (trunc), undef, undef, undef, undef)
+def combine_use_vector_truncate : GICombineRule<
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (G_BUILD_VECTOR $dst, GIVariadic<>:$unused):$root,
+ [{ return Helper.matchUseVectorTruncate(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyUseVectorTruncate(*${root}, ${matchinfo}); }])>;
+
+def add_of_vscale : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_VSCALE $left, $imm1),
+ (G_VSCALE $right, $imm2),
+ (G_ADD $root, $left, $right, (MIFlags NoSWrap)),
+ [{ return Helper.matchAddOfVScale(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def mul_of_vscale : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_VSCALE $left, $scale),
+ (G_CONSTANT $x, $imm1),
+ (G_MUL $root, $left, $x, (MIFlags NoSWrap)),
+ [{ return Helper.matchMulOfVScale(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def shl_of_vscale : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_VSCALE $left, $imm),
+ (G_CONSTANT $x, $imm1),
+ (G_SHL $root, $left, $x, (MIFlags NoSWrap)),
+ [{ return Helper.matchShlOfVScale(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def sub_of_vscale : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_VSCALE $right, $imm),
+ (G_SUB $root, $x, $right, (MIFlags NoSWrap)),
+ [{ return Helper.matchSubOfVScale(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def expand_const_fpowi : GICombineRule<
+ (defs root:$root),
+ (match (G_CONSTANT $int, $imm),
+ (G_FPOWI $dst, $float, $int):$root,
+ [{ return Helper.matchFPowIExpansion(*${root}, ${imm}.getCImm()->getSExtValue()); }]),
+ (apply [{ Helper.applyExpandFPowI(*${root}, ${imm}.getCImm()->getSExtValue()); }])>;
+
+def combine_shuffle_undef_rhs : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_IMPLICIT_DEF $undef),
+ (G_SHUFFLE_VECTOR $root, $src1, $undef, $mask):$root,
+ [{ return Helper.matchShuffleUndefRHS(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
+>;
+
+def combine_shuffle_disjoint_mask : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
+ [{ return Helper.matchShuffleDisjointMask(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
+>;
+
+// match_extract_of_element and insert_vector_elt_oob must be the first!
+def vector_ops_combines: GICombineGroup<[
+match_extract_of_element_undef_vector,
+match_extract_of_element_undef_index,
+insert_vector_element_idx_undef,
+insert_vector_element_elt_undef,
+match_extract_of_element,
+insert_vector_elt_oob,
+extract_vector_element_not_const,
+extract_vector_element_different_indices,
+extract_vector_element_build_vector,
+extract_vector_element_build_vector_trunc2,
+extract_vector_element_build_vector_trunc3,
+extract_vector_element_build_vector_trunc4,
+extract_vector_element_build_vector_trunc5,
+extract_vector_element_build_vector_trunc6,
+extract_vector_element_build_vector_trunc7,
+extract_vector_element_build_vector_trunc8,
+extract_vector_element_shuffle_vector,
+insert_vector_element_extract_vector_element,
+add_of_vscale,
+mul_of_vscale,
+shl_of_vscale,
+sub_of_vscale,
+]>;
+
+// fold ((A+(B-C))-B) -> A-C
+def APlusBMinusCMinusB : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub1, $B, $C),
+ (G_ADD $add1, $A, $sub1),
+ (G_SUB $root, $add1, $B)),
+ (apply (G_SUB $root, $A, $C))>;
+
+// fold ((A+(B+C))-B) -> A+C
+def APlusBPlusCMinusB_frags : GICombinePatFrag<
+ (outs root:$root), (ins $x, $y, $n),
+ [
+ (pattern (G_ADD $add1, $y, $n),
+ (G_ADD $add2, $x, $add1),
+ (G_SUB $root, $add2, $y),
+ [{ return MRI.hasOneNonDBGUse(${add2}.getReg()) &&
+ MRI.hasOneNonDBGUse(${add1}.getReg()); }]),
+ ]>;
+
+def APlusBPlusCMinusB : GICombineRule<
+ (defs root:$root),
+ (match (APlusBPlusCMinusB_frags $root, $x, $y, $n)),
+ (apply (G_ADD $root, $x, $n))>;
+
+// fold ((A-(B-C))-C) -> A-B
+def AMinusBMinusCMinusC : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub1, $B, $C),
+ (G_SUB $sub2, $A, $sub1),
+ (G_SUB $root, $sub2, $C)),
+ (apply (G_SUB $root, $A, $B))>;
+
+// fold ((0-A) + B) -> B-A
+def ZeroMinusAPlusB : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub, 0, $A),
+ (G_ADD $root, $sub, $B)),
+ (apply (G_SUB $root, $B, $A))>;
+
+// fold (A + (0-B)) -> A-B
+def APlusZeroMinusB : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub, 0, $B),
+ (G_ADD $root, $A, $sub)),
+ (apply (G_SUB $root, $A, $B))>;
+
+ // fold (A+(B-A)) -> B
+ def APlusBMinusB : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub, $B, $A),
+ (G_ADD $root, $A, $sub)),
+ (apply (GIReplaceReg $root, $B))>;
+
+// fold ((B-A)+A) -> B
+ def BMinusAPlusA : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub, $B, $A),
+ (G_ADD $root, $sub, $A)),
+ (apply (GIReplaceReg $root, $B))>;
+
+// fold ((A-B)+(C-A)) -> (C-B)
+def AMinusBPlusCMinusA : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub1, $A, $B),
+ (G_SUB $sub2, $C, $A),
+ (G_ADD $root, $sub1, $sub2)),
+ (apply (G_SUB $root, $C, $B))>;
+
+// fold ((A-B)+(B-C)) -> (A-C)
+def AMinusBPlusBMinusC : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub1, $A, $B),
+ (G_SUB $sub2, $B, $C),
+ (G_ADD $root, $sub1, $sub2)),
+ (apply (G_SUB $root, $A, $C))>;
+
+// fold (A+(B-(A+C))) to (B-C)
+def APlusBMinusAplusC : GICombineRule<
+ (defs root:$root),
+ (match (G_ADD $add1, $A, $C),
+ (G_SUB $sub1, $B, $add1),
+ (G_ADD $root, $A, $sub1)),
+ (apply (G_SUB $root, $B, $C))>;
+
+// fold (A+(B-(C+A))) to (B-C)
+def APlusBMinusCPlusA : GICombineRule<
+ (defs root:$root),
+ (match (G_ADD $add1, $C, $A),
+ (G_SUB $sub1, $B, $add1),
+ (G_ADD $root, $A, $sub1)),
+ (apply (G_SUB $root, $B, $C))>;
+
+// fold (A+C1)-C2 -> A+(C1-C2)
+def APlusC1MinusC2: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_CONSTANT $c2, $imm2),
+ (G_CONSTANT $c1, $imm1),
+ (G_ADD $add, $A, $c1),
+ (G_SUB $root, $add, $c2):$root,
+ [{ return Helper.matchFoldAPlusC1MinusC2(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+// fold C2-(A+C1) -> (C2-C1)-A
+def C2MinusAPlusC1: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_CONSTANT $c2, $imm2),
+ (G_CONSTANT $c1, $imm1),
+ (G_ADD $add, $A, $c1),
+ (G_SUB $root, $c2, $add):$root,
+ [{ return Helper.matchFoldC2MinusAPlusC1(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+// fold (A-C1)-C2 -> A-(C1+C2)
+def AMinusC1MinusC2: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_CONSTANT $c2, $imm2),
+ (G_CONSTANT $c1, $imm1),
+ (G_SUB $sub1, $A, $c1),
+ (G_SUB $root, $sub1, $c2):$root,
+ [{ return Helper.matchFoldAMinusC1MinusC2(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+// fold (C1-A)-C2 -> (C1-C2)-A
+def C1Minus2MinusC2: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_CONSTANT $c2, $imm2),
+ (G_CONSTANT $c1, $imm1),
+ (G_SUB $sub1, $c1, $A),
+ (G_SUB $root, $sub1, $c2):$root,
+ [{ return Helper.matchFoldC1Minus2MinusC2(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+// fold ((A-C1)+C2) -> (A+(C2-C1))
+def AMinusC1PlusC2: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_CONSTANT $c2, $imm2),
+ (G_CONSTANT $c1, $imm1),
+ (G_SUB $sub, $A, $c1),
+ (G_ADD $root, $sub, $c2):$root,
+ [{ return Helper.matchFoldAMinusC1PlusC2(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def integer_reassoc_combines: GICombineGroup<[
+ APlusBMinusCMinusB,
+ APlusBPlusCMinusB,
+ AMinusBMinusCMinusC,
+ ZeroMinusAPlusB,
+ APlusZeroMinusB,
+ APlusBMinusB,
+ BMinusAPlusA,
+ AMinusBPlusCMinusA,
+ AMinusBPlusBMinusC,
+ APlusBMinusAplusC,
+ APlusBMinusCPlusA,
+ APlusC1MinusC2,
+ C2MinusAPlusC1,
+ AMinusC1MinusC2,
+ C1Minus2MinusC2,
+ AMinusC1PlusC2
+]>;
+
+// fold (A+(shl (0-B), C)) -> (A-(shl B, C))
+// fold ((shl (0-B), C)+A) -> (A-(shl B, C))
+def add_shl_neg_frags : GICombinePatFrag<
+ (outs root:$dst), (ins $x, $y, $n),
+ [
+ (pattern (G_CONSTANT $zero, 0),
+ (G_SUB $neg_y, $zero, $y),
+ (G_SHL $shl_neg, $neg_y, $n),
+ (G_ADD $dst, $x, $shl_neg),
+ [{ return MRI.hasOneNonDBGUse(${shl_neg}.getReg()) &&
+ MRI.hasOneNonDBGUse(${neg_y}.getReg()); }]),
+ (pattern (G_CONSTANT $zero, 0),
+ (G_SUB $neg_y, $zero, $y),
+ (G_SHL $shl_neg, $neg_y, $n),
+ (G_ADD $dst, $shl_neg, $x),
+ [{ return MRI.hasOneNonDBGUse(${shl_neg}.getReg()) &&
+ MRI.hasOneNonDBGUse(${neg_y}.getReg()); }])
+ ]>;
+
+def add_shift : GICombineRule<
+ (defs root:$dst),
+ (match (add_shl_neg_frags $dst, $x, $y, $n)),
+ (apply (G_SHL $new_shl, $y, $n),
+ (G_SUB $dst, $x, $new_shl))>;
+
+def freeze_of_non_undef_non_poison : GICombineRule<
+ (defs root:$root),
+ (match (G_FREEZE $root, $src),
+ [{ return isGuaranteedNotToBeUndefOrPoison(${src}.getReg(), MRI); }]),
+ (apply (GIReplaceReg $root, $src))>;
+
+def freeze_combines: GICombineGroup<[
+ freeze_of_non_undef_non_poison,
+ push_freeze_to_prevent_poison_from_propagating
+]>;
+
+/// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
+class truncate_of_opcode<Instruction extOpcode> : GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (extOpcode $ext, $src):$ExtMI,
+ (G_TRUNC $root, $ext):$root,
+ [{ return Helper.matchTruncateOfExt(*${root}, *${ExtMI}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+def truncate_of_zext : truncate_of_opcode<G_ZEXT>;
+def truncate_of_sext : truncate_of_opcode<G_SEXT>;
+def truncate_of_anyext : truncate_of_opcode<G_ANYEXT>;
+
+// Push cast through select.
+class select_of_opcode<Instruction castOpcode> : GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_SELECT $select, $cond, $true, $false):$Select,
+ (castOpcode $root, $select):$Cast,
+ [{ return Helper.matchCastOfSelect(*${Cast}, *${Select}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${Cast}, ${matchinfo}); }])>;
+
+def select_of_zext : select_of_opcode<G_ZEXT>;
+def select_of_anyext : select_of_opcode<G_ANYEXT>;
+def select_of_truncate : select_of_opcode<G_TRUNC>;
+
+// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x).
+class ext_of_ext_opcodes<Instruction ext1Opcode, Instruction ext2Opcode> : GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (ext2Opcode $second, $src):$Second,
+ (ext1Opcode $root, $second):$First,
+ [{ return Helper.matchExtOfExt(*${First}, *${Second}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${First}, ${matchinfo}); }])>;
+
+def zext_of_zext : ext_of_ext_opcodes<G_ZEXT, G_ZEXT>;
+def zext_of_anyext : ext_of_ext_opcodes<G_ZEXT, G_ANYEXT>;
+def sext_of_sext : ext_of_ext_opcodes<G_SEXT, G_SEXT>;
+def sext_of_anyext : ext_of_ext_opcodes<G_SEXT, G_ANYEXT>;
+def anyext_of_anyext : ext_of_ext_opcodes<G_ANYEXT, G_ANYEXT>;
+def anyext_of_zext : ext_of_ext_opcodes<G_ANYEXT, G_ZEXT>;
+def anyext_of_sext : ext_of_ext_opcodes<G_ANYEXT, G_SEXT>;
+
+def sext_inreg_of_sext_inreg : GICombineRule<
+ (defs root:$dst, build_fn_matchinfo:$matchinfo),
+ (match (G_SEXT_INREG $x, $src, $a):$other,
+ (G_SEXT_INREG $dst, $x, $b):$root,
+ [{ return Helper.matchRedundantSextInReg(*${root}, *${other}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
+// Push cast through build vector.
+class buildvector_of_opcode<Instruction castOpcode> : GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_BUILD_VECTOR $bv, GIVariadic<>:$unused):$Build,
+ (castOpcode $root, $bv):$Cast,
+ [{ return Helper.matchCastOfBuildVector(*${Cast}, *${Build}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${Cast}, ${matchinfo}); }])>;
+
+def buildvector_of_truncate : buildvector_of_opcode<G_TRUNC>;
+
+// narrow binop.
+// trunc (binop X, C) --> binop (trunc X, trunc C)
+class narrow_binop_opcode<Instruction binopOpcode> : GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_CONSTANT $const, $imm),
+ (binopOpcode $binop, $x, $const):$Binop,
+ (G_TRUNC $root, $binop):$Trunc,
+ [{ return Helper.matchNarrowBinop(*${Trunc}, *${Binop}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${Trunc}, ${matchinfo}); }])>;
+
+// Fold (ctlz (xor x, (sra x, bitwidth-1))) -> (add (ctls x), 1).
+// Fold (ctlz (or (shl (xor x, (sra x, bitwidth-1)), 1), 1) -> (ctls x)
+class ctlz_to_ctls_op<Instruction ctlzOpcode> : GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (ctlzOpcode $dst, $src):$root,
+ [{ return Helper.matchCtls(*${root}, ${matchinfo}); }]),
+ (apply [{Helper.applyBuildFn(*${root}, ${matchinfo});}])>;
+
+def ctlz_to_ctls : ctlz_to_ctls_op<G_CTLZ>;
+def ctlz_zero_undef_to_ctls : ctlz_to_ctls_op<G_CTLZ_ZERO_UNDEF>;
+
+def ctls_combines : GICombineGroup<[
+ ctlz_to_ctls,
+ ctlz_zero_undef_to_ctls,
+]>;
+
+def narrow_binop_add : narrow_binop_opcode<G_ADD>;
+def narrow_binop_sub : narrow_binop_opcode<G_SUB>;
+def narrow_binop_mul : narrow_binop_opcode<G_MUL>;
+def narrow_binop_and : narrow_binop_opcode<G_AND>;
+def narrow_binop_or : narrow_binop_opcode<G_OR>;
+def narrow_binop_xor : narrow_binop_opcode<G_XOR>;
+
+// Cast of integer.
+class integer_of_opcode<Instruction castOpcode> : GICombineRule <
+ (defs root:$root, apint_matchinfo:$matchinfo),
+ (match (G_CONSTANT $int, $imm),
+ (castOpcode $root, $int):$Cast,
+ [{ return Helper.matchCastOfInteger(*${Cast}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${Cast}, ${matchinfo}); }])>;
+
+def integer_of_truncate : integer_of_opcode<G_TRUNC>;
+
+def cast_of_cast_combines: GICombineGroup<[
+ truncate_of_zext,
+ truncate_of_sext,
+ truncate_of_anyext,
+ zext_of_zext,
+ zext_of_anyext,
+ sext_of_sext,
+ sext_of_anyext,
+ anyext_of_anyext,
+ anyext_of_zext,
+ anyext_of_sext,
+ sext_inreg_of_sext_inreg,
+]>;
+
+def cast_combines: GICombineGroup<[
+ cast_of_cast_combines,
+ select_of_zext,
+ select_of_anyext,
+ select_of_truncate,
+ buildvector_of_truncate,
+ narrow_binop_add,
+ narrow_binop_sub,
+ narrow_binop_mul,
+ narrow_binop_and,
+ narrow_binop_or,
+ narrow_binop_xor,
+ integer_of_truncate
+]>;
+
+def canonicalize_icmp : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_ICMP $root, $pred, $lhs, $rhs):$cmp,
+ [{ return Helper.matchCanonicalizeICmp(*${cmp}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${cmp}, ${matchinfo}); }])>;
+
+def canonicalize_fcmp : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_FCMP $root, $pred, $lhs, $rhs):$cmp,
+ [{ return Helper.matchCanonicalizeFCmp(*${cmp}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${cmp}, ${matchinfo}); }])>;
+
+def cmp_combines: GICombineGroup<[
+ canonicalize_icmp,
+ canonicalize_fcmp,
+ icmp_to_true_false_known_bits,
+ icmp_to_lhs_known_bits,
+ double_icmp_zero_and_combine,
+ double_icmp_zero_or_combine,
+ redundant_binop_in_equality
+]>;
+
+
+def overflow_combines: GICombineGroup<[
+ match_addos,
+ match_subo_no_overflow
+]>;
+
+// FIXME: These should use the custom predicate feature once it lands.
+def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
+ undef_to_negative_one,
+ binop_left_undef_to_zero,
+ binop_right_undef_to_undef,
+ unary_undef_to_zero,
+ unary_undef_to_undef,
+ propagate_undef_any_op,
+ propagate_undef_all_ops,
+ propagate_undef_shuffle_mask,
+ erase_undef_store,
+ insert_extract_vec_elt_out_of_bounds]>;
+
+def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
+ binop_same_val, binop_left_to_zero,
+ binop_right_to_zero, p2i_to_i2p,
+ i2p_to_p2i, anyext_trunc_fold,
+ fneg_fneg_fold, right_identity_one,
+ add_sub_reg, buildvector_identity_fold,
+ trunc_buildvector_fold,
+ trunc_lshr_buildvector_fold,
+ bitcast_bitcast_fold, fptrunc_fpext_fold,
+ right_identity_neg_zero_fp, right_identity_neg_zero_fp_nsz,
+ right_identity_neg_one_fp]>;
+
+def const_combines : GICombineGroup<[constant_fold_fp_ops, const_ptradd_to_i2p,
+ overlapping_and, mulo_by_2, mulo_by_0,
+ adde_to_addo,
+ combine_minmax_nan, expand_const_fpowi]>;
+
+def known_bits_simplifications : GICombineGroup<[
+ redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask,
+ zext_trunc_fold,
+ sext_inreg_to_zext_inreg]>;
+
+def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend,
+ narrow_binop_feeding_and]>;
+
+def phi_combines : GICombineGroup<[extend_through_phis]>;
+
+def bitreverse_shift : GICombineGroup<[bitreverse_shl, bitreverse_lshr]>;
+
+def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp,
+ select_to_iminmax, match_selects]>;
+
+def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, sub_to_add,
+ add_p2i_to_ptradd, mul_by_neg_one,
+ idempotent_prop]>;
+
+def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
+ combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
+ combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma,
+ combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma,
+ combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>;
+
+def constant_fold_binops : GICombineGroup<[constant_fold_binop,
+ constant_fold_fp_binop]>;
+
+def prefer_sign_combines : GICombineGroup<[nneg_zext]>;
+
+def shuffle_combines : GICombineGroup<[combine_shuffle_concat,
+ combine_shuffle_undef_rhs,
+ combine_shuffle_disjoint_mask]>;
+
+def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
+ vector_ops_combines, freeze_combines, cast_combines,
+ insert_vec_elt_combines, extract_vec_elt_combines, combines_for_extload,
+ combine_extracted_vector_load,
+ undef_combines, identity_combines, phi_combines,
+ simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands, shifts_too_big,
+ reassocs, ptr_add_immed_chain, cmp_combines,
+ shl_ashr_to_sext_inreg, neg_and_one_to_sext_inreg, sext_inreg_of_load,
+ width_reduction_combines, select_combines,
+ known_bits_simplifications, trunc_shift,
+ not_cmp_fold, opt_brcond_by_inverting_cond,
+ const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
+ shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
+ div_rem_to_divrem, funnel_shift_combines, bitreverse_shift, commute_shift,
+ form_bitfield_extract, constant_fold_binops, constant_fold_fma,
+ constant_fold_cast_op, fabs_fneg_fold,
+ mulh_combines, redundant_neg_operands,
+ and_or_disjoint_mask, fma_combines, fold_binop_into_select,
+ intrem_combines, intdiv_combines, fdiv_repeated_divison,
+ sub_add_reg, select_to_minmax,
+ fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
+ simplify_neg_minmax, combine_concat_vector,
+ sext_trunc, zext_trunc, prefer_sign_combines, shuffle_combines,
+ combine_use_vector_truncate, merge_combines, overflow_combines,
+ truncsat_combines, lshr_of_trunc_of_lshr, ctls_combines, add_shift]>;
+
+// A combine group used to for prelegalizer combiners at -O0. The combines in
+// this group have been selected based on experiments to balance code size and
+// compile time performance.
+def optnone_combines : GICombineGroup<[trivial_combines,
+ ptr_add_immed_chain, combines_for_extload,
+ not_cmp_fold, opt_brcond_by_inverting_cond, combine_concat_vector]>;
>From b7fc072677a70889f01c9dccd52e458f0f3eda71 Mon Sep 17 00:00:00 2001
From: Luisa Cicolini <48860705+luisacicolini at users.noreply.github.com>
Date: Sat, 14 Feb 2026 17:31:38 +0000
Subject: [PATCH 11/16] chore: replace pattern
Co-authored-by: Osman Yasar <osmanyas05 at gmail.com>
---
llvm/include/llvm/Target/GlobalISel/Combine.td | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index e70d90283d3be..9f1c74c11111b 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1892,7 +1892,11 @@ def APlusBPlusCMinusB_frags : GICombinePatFrag<
def APlusBPlusCMinusB : GICombineRule<
(defs root:$root),
- (match (APlusBPlusCMinusB_frags $root, $x, $y, $n)),
+ (match (G_ADD $add1, $B, $C),
+ (G_ADD $add2, $A, $add1),
+ (G_SUB $root, $add2, $B),
+ [{ return MRI.hasOneNonDBGUse(${add1}.getReg()) &&
+ MRI.hasOneNonDBGUse(${add2}.getReg()); }]),
(apply (G_ADD $root, $x, $n))>;
// fold ((A-(B-C))-C) -> A-B
>From 3bbdab8adcf39c0595a3c1f44b0f9320b9db639b Mon Sep 17 00:00:00 2001
From: Luisa Cicolini <48860705+luisacicolini at users.noreply.github.com>
Date: Sat, 14 Feb 2026 17:32:04 +0000
Subject: [PATCH 12/16] Apply suggestion from @luisacicolini
---
llvm/include/llvm/Target/GlobalISel/Combine.td | 10 ----------
1 file changed, 10 deletions(-)
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 9f1c74c11111b..2ecfe402b4fc8 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1880,16 +1880,6 @@ def APlusBMinusCMinusB : GICombineRule<
(apply (G_SUB $root, $A, $C))>;
// fold ((A+(B+C))-B) -> A+C
-def APlusBPlusCMinusB_frags : GICombinePatFrag<
- (outs root:$root), (ins $x, $y, $n),
- [
- (pattern (G_ADD $add1, $y, $n),
- (G_ADD $add2, $x, $add1),
- (G_SUB $root, $add2, $y),
- [{ return MRI.hasOneNonDBGUse(${add2}.getReg()) &&
- MRI.hasOneNonDBGUse(${add1}.getReg()); }]),
- ]>;
-
def APlusBPlusCMinusB : GICombineRule<
(defs root:$root),
(match (G_ADD $add1, $B, $C),
>From 344f86ffe43a997709568599f47a452be9de6767 Mon Sep 17 00:00:00 2001
From: luisacicolini <luisacicolini at gmail.com>
Date: Sat, 14 Feb 2026 17:33:02 +0000
Subject: [PATCH 13/16] chore: indent
---
llvm/include/llvm/Target/GlobalISel/Combine.td | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 2ecfe402b4fc8..5cdd2cc5484e3 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1883,8 +1883,8 @@ def APlusBMinusCMinusB : GICombineRule<
def APlusBPlusCMinusB : GICombineRule<
(defs root:$root),
(match (G_ADD $add1, $B, $C),
- (G_ADD $add2, $A, $add1),
- (G_SUB $root, $add2, $B),
+ (G_ADD $add2, $A, $add1),
+ (G_SUB $root, $add2, $B),
[{ return MRI.hasOneNonDBGUse(${add1}.getReg()) &&
MRI.hasOneNonDBGUse(${add2}.getReg()); }]),
(apply (G_ADD $root, $x, $n))>;
>From 41312205f63fead124df7bd27913254bb32b6f93 Mon Sep 17 00:00:00 2001
From: luisacicolini <luisacicolini at gmail.com>
Date: Sat, 14 Feb 2026 17:33:30 +0000
Subject: [PATCH 14/16] chore: fix tests
---
llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
index d972f3aad591f..17001117cdb48 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
@@ -418,7 +418,7 @@ body: |
name: APlusBPlusCMinusB
body: |
bb.0:
- liveins: $w0, $w1
+ liveins: $x0, $x1, $x2
; CHECK-LABEL: name: APlusBPlusCMinusB
; CHECK: liveins: $w0, $w1
>From d0d77c697e6aead65ad538a4688cb37a267a8471 Mon Sep 17 00:00:00 2001
From: luisacicolini <luisacicolini at gmail.com>
Date: Sat, 14 Feb 2026 17:35:07 +0000
Subject: [PATCH 15/16] chore: fix test
---
llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
index 17001117cdb48..7cc50d2c79717 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
@@ -421,7 +421,7 @@ body: |
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: APlusBPlusCMinusB
- ; CHECK: liveins: $w0, $w1
+ ; CHECK: liveins: $x0, $x1, $x2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %a:_(s64) = COPY $x0
; CHECK-NEXT: %c:_(s64) = COPY $x2
>From ef18da04fd74758d7d2bf7dfe26429d84b1459f1 Mon Sep 17 00:00:00 2001
From: Luisa Cicolini <48860705+luisacicolini at users.noreply.github.com>
Date: Sat, 14 Feb 2026 17:41:38 +0000
Subject: [PATCH 16/16] Update llvm/include/llvm/Target/GlobalISel/Combine.td
Co-authored-by: Osman Yasar <osmanyas05 at gmail.com>
---
llvm/include/llvm/Target/GlobalISel/Combine.td | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 5cdd2cc5484e3..3ab093a696872 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1887,7 +1887,7 @@ def APlusBPlusCMinusB : GICombineRule<
(G_SUB $root, $add2, $B),
[{ return MRI.hasOneNonDBGUse(${add1}.getReg()) &&
MRI.hasOneNonDBGUse(${add2}.getReg()); }]),
- (apply (G_ADD $root, $x, $n))>;
+ (apply (G_ADD $root, $A, $C))>;
// fold ((A-(B-C))-C) -> A-B
def AMinusBMinusCMinusC : GICombineRule<
More information about the llvm-commits
mailing list