[llvm] [GlobalIsel] Combine G_EXTRACT_VECTOR_ELT (PR #85321)
Thorsten Schütt via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 27 01:14:34 PDT 2024
https://github.com/tschuett updated https://github.com/llvm/llvm-project/pull/85321
>From 260ce2c595dc9352a1df77c659ce262ba2e8ccc7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Thorsten=20Sch=C3=BCtt?= <schuett at gmail.com>
Date: Wed, 13 Mar 2024 10:35:35 +0100
Subject: [PATCH 1/4] [GlobalIsel] Combine G_EXTRACT_VECTOR_ELT
preliminary steps
---
.../llvm/CodeGen/GlobalISel/CombinerHelper.h | 3 +
.../CodeGen/GlobalISel/GenericMachineInstrs.h | 33 +++
.../include/llvm/Target/GlobalISel/Combine.td | 9 +-
llvm/lib/CodeGen/GlobalISel/CMakeLists.txt | 1 +
.../GlobalISel/CombinerHelperVectorOps.cpp | 174 ++++++++++++++
.../GlobalISel/combine-extract-vec-elt.mir | 215 +++++++++++++++++-
.../CodeGen/AArch64/extract-vector-elt.ll | 18 +-
7 files changed, 434 insertions(+), 19 deletions(-)
create mode 100644 llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 9e8fc5d635c50a..d2f9d74bf7d61a 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -815,6 +815,9 @@ class CombinerHelper {
/// Combine addos.
bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo);
+ /// Combine extract vector element.
+ bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo);
+
private:
/// Checks for legality of an indexed variant of \p LdSt.
bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
index 261cfcf504d5fe..6727db6988a360 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
@@ -739,6 +739,39 @@ class GOr : public GLogicalBinOp {
};
};
+/// Represents an extract vector element.
+class GExtractVectorElement : public GenericMachineInstr {
+public:
+ Register getVectorReg() const { return getOperand(1).getReg(); }
+ Register getIndexReg() const { return getOperand(2).getReg(); }
+
+ static bool classof(const MachineInstr *MI) {
+ return MI->getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT;
+ }
+};
+
+/// Represents an insert vector element.
+class GInsertVectorElement : public GenericMachineInstr {
+public:
+ Register getVectorReg() const { return getOperand(1).getReg(); }
+ Register getElementReg() const { return getOperand(2).getReg(); }
+ Register getIndexReg() const { return getOperand(3).getReg(); }
+
+ static bool classof(const MachineInstr *MI) {
+ return MI->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT;
+ }
+};
+
+/// Represents a freeze.
+class GFreeze : public GenericMachineInstr {
+public:
+ Register getSourceReg() const { return getOperand(1).getReg(); }
+
+ static bool classof(const MachineInstr *MI) {
+ return MI->getOpcode() == TargetOpcode::G_FREEZE;
+ }
+};
+
} // namespace llvm
#endif // LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 6980cbd04aeb1c..1c71e6b80db051 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1291,6 +1291,12 @@ def match_addos : GICombineRule<
[{ return Helper.matchAddOverflow(*${root}, ${matchinfo}); }]),
(apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+def match_extract_of_element : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
+ [{ return Helper.matchExtractVectorElement(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+
// Combines concat operations
def concat_matchinfo : GIDefMatchData<"SmallVector<Register>">;
def combine_concat_vector : GICombineRule<
@@ -1374,7 +1380,8 @@ def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
and_or_disjoint_mask, fma_combines, fold_binop_into_select,
sub_add_reg, select_to_minmax, redundant_binop_in_equality,
fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
- combine_concat_vector, double_icmp_zero_and_or_combine, match_addos]>;
+ combine_concat_vector, double_icmp_zero_and_or_combine, match_addos,
+ match_extract_of_element]>;
// A combine group used to for prelegalizer combiners at -O0. The combines in
// this group have been selected based on experiments to balance code size and
diff --git a/llvm/lib/CodeGen/GlobalISel/CMakeLists.txt b/llvm/lib/CodeGen/GlobalISel/CMakeLists.txt
index 46e6c6df5998e5..54ac7f72011a6e 100644
--- a/llvm/lib/CodeGen/GlobalISel/CMakeLists.txt
+++ b/llvm/lib/CodeGen/GlobalISel/CMakeLists.txt
@@ -6,6 +6,7 @@ add_llvm_component_library(LLVMGlobalISel
GlobalISel.cpp
Combiner.cpp
CombinerHelper.cpp
+ CombinerHelperVectorOps.cpp
GIMatchTableExecutor.cpp
GISelChangeObserver.cpp
IRTranslator.cpp
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
new file mode 100644
index 00000000000000..f1b42ed549636a
--- /dev/null
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
@@ -0,0 +1,174 @@
+//===- CombinerHelperVectorOps.cpp-----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements CombinerHelper for G_EXTRACT_VECTOR_ELT.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/LowLevelTypeUtils.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/Support/Casting.h"
+#include <optional>
+
+#define DEBUG_TYPE "gi-combiner"
+
+using namespace llvm;
+using namespace MIPatternMatch;
+
+bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
+ BuildFnTy &MatchInfo) {
+
+ GExtractVectorElement *Extract = cast<GExtractVectorElement>(&MI);
+
+ Register Dst = Extract->getReg(0);
+ Register Vector = Extract->getVectorReg();
+ Register Index = Extract->getIndexReg();
+ LLT DstTy = MRI.getType(Dst);
+ LLT VectorTy = MRI.getType(Vector);
+
+ // The vector register can be def'd by various ops that
+ // have vector as its type. They can all be used for
+ // constant folding, scalarizing, canonicalization, or
+ // combining based on symmetry.
+ //
+ // vector like ops
+ // * build vector
+ // * build vector trunc
+ // * shuffle vector
+ // * splat vector
+ // * concat vectors
+ // * insert/extract vector element
+ // * insert/extract subvector
+ // * vector loads
+ // * scalable vector loads
+ //
+ // compute like ops
+ // * binary ops
+ // * unary ops
+ // * exts and truncs
+ // * casts
+ // * fneg
+ // * select
+ // * phis
+ // * cmps
+ // * freeze
+ // * bitcast
+ // * undef
+
+ // Fold extractVectorElement(undef, undef) -> undef
+ if ((getOpcodeDef<GImplicitDef>(Vector, MRI) ||
+ getOpcodeDef<GImplicitDef>(Index, MRI)) &&
+ isLegalOrBeforeLegalizer({TargetOpcode::G_IMPLICIT_DEF, {DstTy}})) {
+ // If the Vector register is undef, then we cannot extract an element from
+ // it. An undef extract Index can be arbitrarily chosen to be an
+ // out-of-range index value, which would result in the instruction being
+ // poison.
+ MatchInfo = [=](MachineIRBuilder &B) { B.buildUndef(Dst); };
+ return true;
+ }
+
+ // We try to get the value of the Index register.
+ std::optional<ValueAndVReg> MaybeIndex =
+ getIConstantVRegValWithLookThrough(Index, MRI);
+ std::optional<APInt> IndexC = std::nullopt;
+
+ if (MaybeIndex)
+ IndexC = MaybeIndex->Value;
+
+ // Fold extractVectorElement(Vector, TOOLARGE) -> undef
+ if (IndexC && VectorTy.isFixedVector() &&
+ IndexC->uge(VectorTy.getNumElements()) &&
+ isLegalOrBeforeLegalizer({TargetOpcode::G_IMPLICIT_DEF, {DstTy}})) {
+ // For fixed-length vectors, it's invalid to extract out-of-range elements.
+ MatchInfo = [=](MachineIRBuilder &B) { B.buildUndef(Dst); };
+ return true;
+ }
+
+ // Fold extractVectorElement(freeze(FV), Index) ->
+ // freeze(extractVectorElement(FV, Index))
+ if (auto *Freeze = getOpcodeDef<GFreeze>(Vector, MRI)) {
+ if (MRI.hasOneNonDBGUse(Freeze->getReg(0)) &&
+ isLegalOrBeforeLegalizer({TargetOpcode::G_FREEZE, {DstTy}})) {
+ // For G_FREEZE, the input and the output types are identical.
+ // Moving the freeze from the Vector into the front of the extract
+ // preserves the freeze semantics. We check above that
+ // the Index register is not undef.
+ // Furthermore, the Vector register
+ // becomes easier to analyze. A build vector
+ // could have been hidden behind the freeze.
+ MatchInfo = [=](MachineIRBuilder &B) {
+ auto Extract =
+ B.buildExtractVectorElement(DstTy, Freeze->getSourceReg(), Index);
+ B.buildFreeze(Dst, Extract);
+ };
+ return true;
+ }
+ }
+
+ // Fold extractVectorElement(insertVectorElement(_, Value, Index), Index) ->
+ // Value
+ if (auto *Insert = getOpcodeDef<GInsertVectorElement>(Vector, MRI)) {
+ if (Insert->getIndexReg() == Index) {
+ // There is no one-use check. We have to keep the insert.
+ // We only check for equality of the Index registers.
+ // The combine is independent of their constness.
+ // We try to insert Value and then immediately extract
+ // it from the same Index.
+ MatchInfo = [=](MachineIRBuilder &B) {
+ B.buildCopy(Dst, Insert->getElementReg());
+ };
+ return true;
+ }
+ }
+
+ // Fold extractVectorElement(insertVectorElement(Vector, _, C1), C2),
+ // where C1 != C2
+ // -> extractVectorElement(Vector, C2)
+ if (IndexC) {
+ if (auto *Insert = getOpcodeDef<GInsertVectorElement>(Vector, MRI)) {
+ std::optional<ValueAndVReg> MaybeIndex =
+ getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
+ if (MaybeIndex && MaybeIndex->Value != *IndexC) {
+ // There is no one-use check. We have to keep the insert.
+ // When both Index registers are constants and not equal,
+ // we can look into the Vector register of the insert.
+ MatchInfo = [=](MachineIRBuilder &B) {
+ B.buildExtractVectorElement(Dst, Insert->getVectorReg(), Index);
+ };
+ return true;
+ }
+ }
+ }
+
+ // Fold extractVectorElement(BuildVector(.., V, ...), IndexOfV) -> V
+ if (IndexC) {
+ if (auto *Build = getOpcodeDef<GBuildVector>(Vector, MRI)) {
+ EVT Ty(getMVTForLLT(VectorTy));
+ if (MRI.hasOneNonDBGUse(Build->getReg(0)) ||
+ getTargetLowering().aggressivelyPreferBuildVectorSources(Ty)) {
+ // There is a one-use check. There are more combines on build vectors.
+ // If the Index is constant, then we can extract the element from the
+ // given offset.
+ MatchInfo = [=](MachineIRBuilder &B) {
+ B.buildCopy(Dst, Build->getSourceReg(IndexC->getLimitedValue()));
+ };
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir
index a2116ccc767112..37dc33330196a8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir
@@ -192,8 +192,8 @@ body: |
...
---
+# This test checks that this combine runs after the insertvec->build_vector
name: extract_from_insert
-alignment: 4
tracksRegLiveness: true
liveins:
- { reg: '$x0' }
@@ -203,8 +203,6 @@ frameInfo:
body: |
bb.1:
liveins: $x0, $x1
- ; This test checks that this combine runs after the insertvec->build_vector
- ; combine.
; CHECK-LABEL: name: extract_from_insert
; CHECK: liveins: $x0, $x1
; CHECK-NEXT: {{ $}}
@@ -247,3 +245,214 @@ body: |
RET_ReallyLR implicit $x0
...
+---
+name: extract_from_vector_undef
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_vector_undef
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %extract:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = G_IMPLICIT_DEF
+ %idx:_(s32) = G_CONSTANT i32 -2
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_index_undef
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: extract_from_index_undef
+ ; CHECK: %extract:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_IMPLICIT_DEF
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_index_too_large
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_index_too_large
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %extract:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_CONSTANT i32 3000
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_with_freeze
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_with_freeze
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %vec:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: %idx:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ ; CHECK-NEXT: %extract:_(s64) = G_FREEZE [[EVEC]]
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = COPY $w1
+ %fvec:_(<2 x s64>) = G_FREEZE %vec
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %fvec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_insert_symmetry
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_insert_symmetry
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %element:_(s64) = COPY $x1
+ ; CHECK-NEXT: $x0 = COPY %element(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = COPY $w1
+ %element:_(s64) = COPY $x1
+ %invec:_(<2 x s64>) = G_INSERT_VECTOR_ELT %vec(<2 x s64>), %element(s64), %idx(s32)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %invec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_insert_with_different_consts
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_insert_with_different_consts
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %vec:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: %idx2:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx2(s32)
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_CONSTANT i32 0
+ %idx2:_(s32) = G_CONSTANT i32 1
+ %element:_(s64) = COPY $x1
+ %invec:_(<2 x s64>) = G_INSERT_VECTOR_ELT %vec(<2 x s64>), %element(s64), %idx(s32)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %invec(<2 x s64>), %idx2(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_build_vector_non_const
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_non_const
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %idx:_(s32) = COPY $w0
+ ; CHECK-NEXT: %arg1:_(s64) = COPY $x0
+ ; CHECK-NEXT: %arg2:_(s64) = COPY $x1
+ ; CHECK-NEXT: %bv:_(<2 x s64>) = G_BUILD_VECTOR %arg1(s64), %arg2(s64)
+ ; CHECK-NEXT: %extract:_(s64) = G_EXTRACT_VECTOR_ELT %bv(<2 x s64>), %idx(s32)
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = COPY $w0
+ %arg1:_(s64) = COPY $x0
+ %arg2:_(s64) = COPY $x1
+ %bv:_(<2 x s64>) = G_BUILD_VECTOR %arg1(s64), %arg2(s64)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %bv(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_build_vector_const
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_const
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %arg1:_(s64) = COPY $x0
+ ; CHECK-NEXT: $x0 = COPY %arg1(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_CONSTANT i32 0
+ %arg1:_(s64) = COPY $x0
+ %arg2:_(s64) = COPY $x1
+ %bv:_(<2 x s64>) = G_BUILD_VECTOR %arg1(s64), %arg2(s64)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %bv(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
diff --git a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
index c5c525a15ad9be..504222e0036e22 100644
--- a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
@@ -25,20 +25,9 @@ entry:
}
define i64 @extract_v2i64_undef_vector(<2 x i64> %a, i32 %c) {
-; CHECK-SD-LABEL: extract_v2i64_undef_vector:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: extract_v2i64_undef_vector:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub sp, sp, #16
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GI-NEXT: mov w9, w0
-; CHECK-GI-NEXT: mov x8, sp
-; CHECK-GI-NEXT: and x9, x9, #0x1
-; CHECK-GI-NEXT: ldr x0, [x8, x9, lsl #3]
-; CHECK-GI-NEXT: add sp, sp, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: extract_v2i64_undef_vector:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ret
entry:
%d = extractelement <2 x i64> undef, i32 %c
ret i64 %d
@@ -130,7 +119,6 @@ define i64 @extract_v2i64_extract_of_insert_different_const(<2 x i64> %a, i64 %e
;
; CHECK-GI-LABEL: extract_v2i64_extract_of_insert_different_const:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: mov v0.d[0], x0
; CHECK-GI-NEXT: mov d0, v0.d[1]
; CHECK-GI-NEXT: fmov x0, d0
; CHECK-GI-NEXT: ret
>From d2cca65cdc146cef68c898381b5d0aafbee77895 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Thorsten=20Sch=C3=BCtt?= <schuett at gmail.com>
Date: Fri, 15 Mar 2024 07:02:09 +0100
Subject: [PATCH 2/4] address review comments
---
.../GlobalISel/CombinerHelperVectorOps.cpp | 23 ++++++++++---------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
index f1b42ed549636a..88d367dfbbdea5 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
@@ -30,7 +30,6 @@ using namespace MIPatternMatch;
bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
BuildFnTy &MatchInfo) {
-
GExtractVectorElement *Extract = cast<GExtractVectorElement>(&MI);
Register Dst = Extract->getReg(0);
@@ -39,10 +38,9 @@ bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
LLT DstTy = MRI.getType(Dst);
LLT VectorTy = MRI.getType(Vector);
- // The vector register can be def'd by various ops that
- // have vector as its type. They can all be used for
- // constant folding, scalarizing, canonicalization, or
- // combining based on symmetry.
+ // The vector register can be def'd by various ops that have vector as its
+ // type. They can all be used for constant folding, scalarizing,
+ // canonicalization, or combining based on symmetry.
//
// vector like ops
// * build vector
@@ -68,9 +66,12 @@ bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
// * bitcast
// * undef
+ // The MIs def'd on the Index and Vector register;
+ MachineInstr *IndexMI = getDefIgnoringCopies(Index, MRI);
+ MachineInstr *VectorMI = getDefIgnoringCopies(Vector, MRI);
+
// Fold extractVectorElement(undef, undef) -> undef
- if ((getOpcodeDef<GImplicitDef>(Vector, MRI) ||
- getOpcodeDef<GImplicitDef>(Index, MRI)) &&
+ if ((isa<GImplicitDef>(VectorMI) || isa<GImplicitDef>(IndexMI)) &&
isLegalOrBeforeLegalizer({TargetOpcode::G_IMPLICIT_DEF, {DstTy}})) {
// If the Vector register is undef, then we cannot extract an element from
// it. An undef extract Index can be arbitrarily chosen to be an
@@ -99,7 +100,7 @@ bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
// Fold extractVectorElement(freeze(FV), Index) ->
// freeze(extractVectorElement(FV, Index))
- if (auto *Freeze = getOpcodeDef<GFreeze>(Vector, MRI)) {
+ if (auto *Freeze = dyn_cast<GFreeze>(VectorMI)) {
if (MRI.hasOneNonDBGUse(Freeze->getReg(0)) &&
isLegalOrBeforeLegalizer({TargetOpcode::G_FREEZE, {DstTy}})) {
// For G_FREEZE, the input and the output types are identical.
@@ -120,7 +121,7 @@ bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
// Fold extractVectorElement(insertVectorElement(_, Value, Index), Index) ->
// Value
- if (auto *Insert = getOpcodeDef<GInsertVectorElement>(Vector, MRI)) {
+ if (auto *Insert = dyn_cast<GInsertVectorElement>(VectorMI)) {
if (Insert->getIndexReg() == Index) {
// There is no one-use check. We have to keep the insert.
// We only check for equality of the Index registers.
@@ -138,7 +139,7 @@ bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
// where C1 != C2
// -> extractVectorElement(Vector, C2)
if (IndexC) {
- if (auto *Insert = getOpcodeDef<GInsertVectorElement>(Vector, MRI)) {
+ if (auto *Insert = dyn_cast<GInsertVectorElement>(VectorMI)) {
std::optional<ValueAndVReg> MaybeIndex =
getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
if (MaybeIndex && MaybeIndex->Value != *IndexC) {
@@ -155,7 +156,7 @@ bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
// Fold extractVectorElement(BuildVector(.., V, ...), IndexOfV) -> V
if (IndexC) {
- if (auto *Build = getOpcodeDef<GBuildVector>(Vector, MRI)) {
+ if (auto *Build = dyn_cast<GBuildVector>(VectorMI)) {
EVT Ty(getMVTForLLT(VectorTy));
if (MRI.hasOneNonDBGUse(Build->getReg(0)) ||
getTargetLowering().aggressivelyPreferBuildVectorSources(Ty)) {
>From e4ae6ebf42d54df5da0e8e7ef490b4eb7721c8b1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Thorsten=20Sch=C3=BCtt?= <schuett at gmail.com>
Date: Wed, 20 Mar 2024 12:26:42 +0100
Subject: [PATCH 3/4] fix typo
---
llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
index 88d367dfbbdea5..3dd32b59c19c7c 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
@@ -66,7 +66,7 @@ bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
// * bitcast
// * undef
- // The MIs def'd on the Index and Vector register;
+ // The MIs def'd on the Index and Vector registers;
MachineInstr *IndexMI = getDefIgnoringCopies(Index, MRI);
MachineInstr *VectorMI = getDefIgnoringCopies(Vector, MRI);
>From e048acb8be842813983e7f02ed6145fd496c1fa2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Thorsten=20Sch=C3=BCtt?= <schuett at gmail.com>
Date: Wed, 27 Mar 2024 09:13:06 +0100
Subject: [PATCH 4/4] rework
---
.../llvm/CodeGen/GlobalISel/CombinerHelper.h | 17 ++
.../include/llvm/Target/GlobalISel/Combine.td | 41 ++-
.../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 8 +
.../GlobalISel/CombinerHelperVectorOps.cpp | 235 +++++++++++++-----
4 files changed, 230 insertions(+), 71 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index d2f9d74bf7d61a..b597c6229b57a4 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -594,6 +594,10 @@ class CombinerHelper {
/// This variant does not erase \p MI after calling the build function.
void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo);
+ /// Use a function which takes in a MachineIRBuilder to perform a combine.
+ /// By default, it erases the instruction \p MI from the function.
+ void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo);
+
bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo);
bool matchFunnelShiftToRotate(MachineInstr &MI);
void applyFunnelShiftToRotate(MachineInstr &MI);
@@ -818,6 +822,19 @@ class CombinerHelper {
/// Combine extract vector element.
bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo);
+ /// Combine extract vector element with freeze on the vector register.
+ bool matchExtractVectorElementWithFreeze(const MachineOperand &MO,
+ BuildFnTy &MatchInfo);
+
+ /// Combine extract vector element with a build vector on the vector register.
+ bool matchExtractVectorElementWithBuildVector(const MachineOperand &MO,
+ BuildFnTy &MatchInfo);
+
+ /// Combine extract vector element with a insert vector element on the vector
+ /// register and different indices.
+ bool matchExtractVectorElementWithDifferentIndices(const MachineOperand &MO,
+ BuildFnTy &MatchInfo);
+
private:
/// Checks for legality of an indexed variant of \p LdSt.
bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 1c71e6b80db051..976e7140c843db 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1297,6 +1297,33 @@ def match_extract_of_element : GICombineRule<
[{ return Helper.matchExtractVectorElement(*${root}, ${matchinfo}); }]),
(apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+def extract_vector_element_not_const : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_INSERT_VECTOR_ELT $src, $x, $value, $idx),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx)),
+ (apply (GIReplaceReg $root, $value))>;
+
+def extract_vector_element_different_indices : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_INSERT_VECTOR_ELT $src, $x, $value, $idx2),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx1),
+ [{ return Helper.matchExtractVectorElementWithDifferentIndices(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def extract_vector_element_build_vector : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_BUILD_VECTOR $src, $x),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
+ [{ return Helper.matchExtractVectorElementWithBuildVector(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def extract_vector_element_freeze : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_FREEZE $src, $input),
+ (G_EXTRACT_VECTOR_ELT $root, $src, $idx),
+ [{ return Helper.matchExtractVectorElementWithFreeze(${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
// Combines concat operations
def concat_matchinfo : GIDefMatchData<"SmallVector<Register>">;
def combine_concat_vector : GICombineRule<
@@ -1305,6 +1332,15 @@ def combine_concat_vector : GICombineRule<
[{ return Helper.matchCombineConcatVectors(*${root}, ${matchinfo}); }]),
(apply [{ Helper.applyCombineConcatVectors(*${root}, ${matchinfo}); }])>;
+// match_extract_of_element must be the first!
+def vector_ops_combines: GICombineGroup<[
+match_extract_of_element,
+extract_vector_element_not_const,
+extract_vector_element_different_indices,
+extract_vector_element_build_vector,
+extract_vector_element_freeze
+]>;
+
// FIXME: These should use the custom predicate feature once it lands.
def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
undef_to_negative_one,
@@ -1362,7 +1398,7 @@ def constant_fold_binops : GICombineGroup<[constant_fold_binop,
def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
extract_vec_elt_combines, combines_for_extload, combine_extracted_vector_load,
- undef_combines, identity_combines, phi_combines,
+ undef_combines, identity_combines, phi_combines, vector_ops_combines,
simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands, shifts_too_big,
reassocs, ptr_add_immed_chain,
shl_ashr_to_sext_inreg, sext_inreg_of_load,
@@ -1380,8 +1416,7 @@ def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
and_or_disjoint_mask, fma_combines, fold_binop_into_select,
sub_add_reg, select_to_minmax, redundant_binop_in_equality,
fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
- combine_concat_vector, double_icmp_zero_and_or_combine, match_addos,
- match_extract_of_element]>;
+ combine_concat_vector, double_icmp_zero_and_or_combine, match_addos]>;
// A combine group used to for prelegalizer combiners at -O0. The combines in
// this group have been selected based on experiments to balance code size and
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 2a521b6b068af7..54a3efa33a2370 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -4100,6 +4100,14 @@ void CombinerHelper::applyBuildFn(
MI.eraseFromParent();
}
+void CombinerHelper::applyBuildFnMO(const MachineOperand &MO,
+ BuildFnTy &MatchInfo) {
+ MachineInstr *Root = getDefIgnoringCopies(MO.getReg(), MRI);
+ Builder.setInstrAndDebugLoc(*Root);
+ MatchInfo(Builder);
+ Root->eraseFromParent();
+}
+
void CombinerHelper::applyBuildFnNoErase(
MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
Builder.setInstrAndDebugLoc(MI);
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
index 3dd32b59c19c7c..d97b2df7b5d2c8 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
@@ -17,6 +17,7 @@
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/LowLevelTypeUtils.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetOpcodes.h"
@@ -98,78 +99,176 @@ bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
return true;
}
- // Fold extractVectorElement(freeze(FV), Index) ->
- // freeze(extractVectorElement(FV, Index))
- if (auto *Freeze = dyn_cast<GFreeze>(VectorMI)) {
- if (MRI.hasOneNonDBGUse(Freeze->getReg(0)) &&
- isLegalOrBeforeLegalizer({TargetOpcode::G_FREEZE, {DstTy}})) {
- // For G_FREEZE, the input and the output types are identical.
- // Moving the freeze from the Vector into the front of the extract
- // preserves the freeze semantics. We check above that
- // the Index register is not undef.
- // Furthermore, the Vector register
- // becomes easier to analyze. A build vector
- // could have been hidden behind the freeze.
- MatchInfo = [=](MachineIRBuilder &B) {
- auto Extract =
- B.buildExtractVectorElement(DstTy, Freeze->getSourceReg(), Index);
- B.buildFreeze(Dst, Extract);
- };
- return true;
- }
- }
+ return false;
+}
- // Fold extractVectorElement(insertVectorElement(_, Value, Index), Index) ->
- // Value
- if (auto *Insert = dyn_cast<GInsertVectorElement>(VectorMI)) {
- if (Insert->getIndexReg() == Index) {
- // There is no one-use check. We have to keep the insert.
- // We only check for equality of the Index registers.
- // The combine is independent of their constness.
- // We try to insert Value and then immediately extract
- // it from the same Index.
- MatchInfo = [=](MachineIRBuilder &B) {
- B.buildCopy(Dst, Insert->getElementReg());
- };
- return true;
- }
- }
+bool CombinerHelper::matchExtractVectorElementWithDifferentIndices(
+ const MachineOperand &MO, BuildFnTy &MatchInfo) {
+ MachineInstr *Root = getDefIgnoringCopies(MO.getReg(), MRI);
+ GExtractVectorElement *Extract = cast<GExtractVectorElement>(Root);
- // Fold extractVectorElement(insertVectorElement(Vector, _, C1), C2),
- // where C1 != C2
- // -> extractVectorElement(Vector, C2)
- if (IndexC) {
- if (auto *Insert = dyn_cast<GInsertVectorElement>(VectorMI)) {
- std::optional<ValueAndVReg> MaybeIndex =
- getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
- if (MaybeIndex && MaybeIndex->Value != *IndexC) {
- // There is no one-use check. We have to keep the insert.
- // When both Index registers are constants and not equal,
- // we can look into the Vector register of the insert.
- MatchInfo = [=](MachineIRBuilder &B) {
- B.buildExtractVectorElement(Dst, Insert->getVectorReg(), Index);
- };
- return true;
- }
- }
- }
+ //
+ // %idx1:_(s64) = G_CONSTANT i64 1
+ // %idx2:_(s64) = G_CONSTANT i64 2
+ // %insert:_(<2 x s32>) = G_INSERT_VECTOR_ELT_ELT %bv(<2 x s32>),
+ // %value(s32), %idx2(s64) %extract:_(s32) = G_EXTRACT_VECTOR_ELT %insert(<2
+ // x s32>), %idx1(s64)
+ //
+ // -->
+ //
+ // %insert:_(<2 x s32>) = G_INSERT_VECTOR_ELT_ELT %bv(<2 x s32>),
+ // %value(s32), %idx2(s64) %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<2 x
+ // s32>), %idx1(s64)
+ //
+ //
+
+ Register Index = Extract->getIndexReg();
+
+ // We try to get the value of the Index register.
+ std::optional<ValueAndVReg> MaybeIndex =
+ getIConstantVRegValWithLookThrough(Index, MRI);
+ std::optional<APInt> IndexC = std::nullopt;
+
+ if (!MaybeIndex)
+ return false;
+ else
+ IndexC = MaybeIndex->Value;
- // Fold extractVectorElement(BuildVector(.., V, ...), IndexOfV) -> V
- if (IndexC) {
- if (auto *Build = dyn_cast<GBuildVector>(VectorMI)) {
- EVT Ty(getMVTForLLT(VectorTy));
- if (MRI.hasOneNonDBGUse(Build->getReg(0)) ||
- getTargetLowering().aggressivelyPreferBuildVectorSources(Ty)) {
- // There is a one-use check. There are more combines on build vectors.
- // If the Index is constant, then we can extract the element from the
- // given offset.
- MatchInfo = [=](MachineIRBuilder &B) {
- B.buildCopy(Dst, Build->getSourceReg(IndexC->getLimitedValue()));
- };
- return true;
- }
- }
+ Register Vector = Extract->getVectorReg();
+
+ GInsertVectorElement *Insert =
+ getOpcodeDef<GInsertVectorElement>(Vector, MRI);
+ if (!Insert)
+ return false;
+
+ Register Dst = Extract->getReg(0);
+
+ std::optional<ValueAndVReg> MaybeInsertIndex =
+ getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
+
+ if (MaybeInsertIndex && MaybeInsertIndex->Value != *IndexC) {
+ // There is no one-use check. We have to keep the insert. When both Index
+ // registers are constants and not equal, we can look into the Vector
+ // register of the insert.
+ MatchInfo = [=](MachineIRBuilder &B) {
+ B.buildExtractVectorElement(Dst, Insert->getVectorReg(), Index);
+ };
+ return true;
}
return false;
}
+
+bool CombinerHelper::matchExtractVectorElementWithFreeze(
+ const MachineOperand &MO, BuildFnTy &MatchInfo) {
+ MachineInstr *Root = getDefIgnoringCopies(MO.getReg(), MRI);
+ GExtractVectorElement *Extract = cast<GExtractVectorElement>(Root);
+
+ Register Vector = Extract->getVectorReg();
+
+ //
+ // %bv:_(<2 x s32>) = G_BUILD_VECTOR %arg1(s32), %arg2(s32)
+ // %freeze:_(<2 x s32>) = G_FREEZE %bv(<2 x s32>)
+ // %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<2 x s32>), %opaque(s64)
+ //
+ // -->
+ //
+ // %bv:_(<2 x s32>) = G_BUILD_VECTOR %arg1(s32), %arg2(s32)
+ // %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<2 x s32>), %opaque(s64)
+ // %freeze:_(s32) = G_FREEZE %extract(s32)
+ //
+ //
+
+ // For G_FREEZE, the input and the output types are identical. Moving the
+ // freeze from the Vector into the front of the extract preserves the freeze
+ // semantics. The result is still freeze'd. Furthermore, the Vector register
+ // becomes easier to analyze. A build vector could have been hidden behind the
+ // freeze.
+
+ // We expect a freeze on the Vector register.
+ GFreeze *Freeze = getOpcodeDef<GFreeze>(Vector, MRI);
+ if (!Freeze)
+ return false;
+
+ Register Dst = Extract->getReg(0);
+ LLT DstTy = MRI.getType(Dst);
+
+ // We first have to check for one-use and legality of the freeze.
+ // The type of the extractVectorElement did not change.
+ if (!MRI.hasOneNonDBGUse(Freeze->getReg(0)) ||
+ !isLegalOrBeforeLegalizer({TargetOpcode::G_FREEZE, {DstTy}}))
+ return false;
+
+ Register Index = Extract->getIndexReg();
+
+ // We move the freeze from the Vector register in front of the
+ // extractVectorElement.
+ MatchInfo = [=](MachineIRBuilder &B) {
+ auto Extract =
+ B.buildExtractVectorElement(DstTy, Freeze->getSourceReg(), Index);
+ B.buildFreeze(Dst, Extract);
+ };
+
+ return true;
+}
+
+bool CombinerHelper::matchExtractVectorElementWithBuildVector(
+ const MachineOperand &MO, BuildFnTy &MatchInfo) {
+ MachineInstr *Root = getDefIgnoringCopies(MO.getReg(), MRI);
+ GExtractVectorElement *Extract = cast<GExtractVectorElement>(Root);
+
+ //
+ // %zero:_(s64) = G_CONSTANT i64 0
+ // %bv:_(<2 x s32>) = G_BUILD_VECTOR %arg1(s32), %arg2(s32)
+ // %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<2 x s32>), %zero(s64)
+ //
+ // -->
+ //
+ // %extract:_(32) = COPY %arg1(s32)
+ //
+ //
+ //
+ // %bv:_(<2 x s32>) = G_BUILD_VECTOR %arg1(s32), %arg2(s32)
+ // %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<2 x s32>), %opaque(s64)
+ //
+ // -->
+ //
+ // %bv:_(<2 x s32>) = G_BUILD_VECTOR %arg1(s32), %arg2(s32)
+ // %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<2 x s32>), %opaque(s64)
+ //
+
+ Register Vector = Extract->getVectorReg();
+
+ // We expect a buildVector on the Vector register.
+ GBuildVector *Build = getOpcodeDef<GBuildVector>(Vector, MRI);
+ if (!Build)
+ return false;
+
+ LLT VectorTy = MRI.getType(Vector);
+
+ // There is a one-use check. There are more combines on build vectors.
+ EVT Ty(getMVTForLLT(VectorTy));
+ if (!MRI.hasOneNonDBGUse(Build->getReg(0)) ||
+ !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty))
+ return false;
+
+ Register Index = Extract->getIndexReg();
+
+ // If the Index is constant, then we can extract the element from the given
+ // offset.
+ std::optional<ValueAndVReg> MaybeIndex =
+ getIConstantVRegValWithLookThrough(Index, MRI);
+ if (!MaybeIndex)
+ return false;
+
+ // We now know that there is a buildVector def'd on the Vector register and
+ // the index is const. The combine will succeed.
+
+ Register Dst = Extract->getReg(0);
+
+ MatchInfo = [=](MachineIRBuilder &B) {
+ B.buildCopy(Dst, Build->getSourceReg(MaybeIndex->Value.getLimitedValue()));
+ };
+
+ return true;
+}
More information about the llvm-commits
mailing list