[llvm] [GlobalIsel] Combine cast of const integer II (PR #100835)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Jul 26 16:40:20 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-globalisel
Author: Thorsten Schütt (tschuett)
<details>
<summary>Changes</summary>
Alternative for https://github.com/llvm/llvm-project/pull/96139
---
Patch is 22.05 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/100835.diff
6 Files Affected:
- (modified) llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h (+2)
- (modified) llvm/include/llvm/CodeGen/TargetLowering.h (+8)
- (modified) llvm/include/llvm/Target/GlobalISel/Combine.td (+18-1)
- (modified) llvm/lib/CodeGen/GlobalISel/CombinerHelperCasts.cpp (+45)
- (modified) llvm/test/CodeGen/AArch64/GlobalISel/combine-cast.mir (+134)
- (modified) llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir (+86-79)
``````````diff
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 05d7e882f5135..54862397c6443 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -893,6 +893,8 @@ class CombinerHelper {
bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI,
BuildFnTy &MatchInfo);
+ bool matchCastOfInteger(const MachineInstr &CastMI, APInt &MatchInfo);
+
private:
/// Checks for legality of an indexed variant of \p LdSt.
bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 9d9886f4920a2..7b271e3dc54c1 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -3069,6 +3069,14 @@ class TargetLoweringBase {
return false;
}
+ /// Return true if sign-extension from FromTy to ToTy is cheaper than
+ /// zero-extension.
+ bool isSExtCheaperThanZExt(LLT FromTy, LLT ToTy, const DataLayout &DL,
+ LLVMContext &Ctx) const {
+ return isSExtCheaperThanZExt(getApproximateEVTForLLT(FromTy, DL, Ctx),
+ getApproximateEVTForLLT(ToTy, DL, Ctx));
+ }
+
/// Return true if this constant should be sign extended when promoting to
/// a larger type.
virtual bool signExtendConstant(const ConstantInt *C) const { return false; }
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 2246e20ecc1dc..335d55ca25ec1 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1783,13 +1783,30 @@ def select_of_zext : select_of_opcode<G_ZEXT>;
def select_of_anyext : select_of_opcode<G_ANYEXT>;
def select_of_truncate : select_of_opcode<G_TRUNC>;
+// Cast of integer.
+class integer_of_opcode<Instruction castOpcode> : GICombineRule <
+ (defs root:$root, apint_matchinfo:$matchinfo),
+ (match (G_CONSTANT $int, $imm),
+ (castOpcode $root, $int):$Cast,
+ [{ return Helper.matchCastOfInteger(*${Cast}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${Cast}, ${matchinfo}); }])>;
+
+def zext_of_integer : integer_of_opcode<G_ZEXT>;
+def sext_of_integer : integer_of_opcode<G_SEXT>;
+def anyext_of_integer : integer_of_opcode<G_ANYEXT>;
+def truncate_of_integer : integer_of_opcode<G_TRUNC>;
+
def cast_combines: GICombineGroup<[
truncate_of_zext,
truncate_of_sext,
truncate_of_anyext,
select_of_zext,
select_of_anyext,
- select_of_truncate
+ select_of_truncate,
+ zext_of_integer,
+ sext_of_integer,
+ anyext_of_integer,
+ truncate_of_integer
]>;
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelperCasts.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelperCasts.cpp
index 59295f7a65835..20382a8876d8d 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelperCasts.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelperCasts.cpp
@@ -209,3 +209,48 @@ bool CombinerHelper::matchCastOfSelect(const MachineInstr &CastMI,
return true;
}
+
+bool CombinerHelper::matchCastOfInteger(const MachineInstr &CastMI,
+ APInt &MatchInfo) {
+ const GExtOrTruncOp *Cast = cast<GExtOrTruncOp>(&CastMI);
+
+ std::optional<APInt> Input = getIConstantVRegVal(Cast->getSrcReg(), MRI);
+ if (!Input)
+ return false;
+
+ LLT DstTy = MRI.getType(Cast->getReg(0));
+ LLT SrcTy = MRI.getType(Cast->getSrcReg());
+
+ if (!isConstantLegalOrBeforeLegalizer(DstTy))
+ return false;
+
+ switch (Cast->getOpcode()) {
+ case TargetOpcode::G_ZEXT: {
+ MatchInfo = Input->zext(DstTy.getScalarSizeInBits());
+ return true;
+ }
+ case TargetOpcode::G_SEXT: {
+ MatchInfo = Input->sext(DstTy.getScalarSizeInBits());
+ return true;
+ }
+ case TargetOpcode::G_TRUNC: {
+ MatchInfo = Input->trunc(DstTy.getScalarSizeInBits());
+ return true;
+ }
+ case TargetOpcode::G_ANYEXT: {
+ const auto &TLI = getTargetLowering();
+ LLVMContext &Ctx = getContext();
+ const DataLayout &DL = getDataLayout();
+
+ // Some targets like RISC-V prefer to sign extend some types.
+ if (TLI.isSExtCheaperThanZExt(SrcTy, DstTy, DL, Ctx))
+ MatchInfo = Input->sext(DstTy.getScalarSizeInBits());
+ else
+ MatchInfo = Input->zext(DstTy.getScalarSizeInBits());
+
+ return true;
+ }
+ default:
+ return false;
+ }
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-cast.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-cast.mir
index 0f436127ea2eb..08cdbb644f4d3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-cast.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-cast.mir
@@ -129,3 +129,137 @@ body: |
%res:_(<2 x s64>) = G_SELECT %cond(<2 x s32>), %bv, %bv2
%small:_(<2 x s32>) = G_TRUNC %res(<2 x s64>)
$x0 = COPY %small(<2 x s32>)
+
+...
+---
+name: zext_const
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-PRE-LABEL: name: zext_const
+ ; CHECK-PRE: liveins: $w0, $w1
+ ; CHECK-PRE-NEXT: {{ $}}
+ ; CHECK-PRE-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
+ ; CHECK-PRE-NEXT: $x0 = COPY [[C]](s64)
+ ;
+ ; CHECK-POST-LABEL: name: zext_const
+ ; CHECK-POST: liveins: $w0, $w1
+ ; CHECK-POST-NEXT: {{ $}}
+ ; CHECK-POST-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+ ; CHECK-POST-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
+ ; CHECK-POST-NEXT: $x0 = COPY [[ZEXT]](s64)
+ %2:_(s32) = G_CONSTANT i32 5
+ %3:_(s64) = G_ZEXT %2
+ $x0 = COPY %3
+...
+---
+name: sext_const
+legalized: true
+body: |
+ bb.0:
+ liveins: $q0, $q1
+ ; CHECK-PRE-LABEL: name: sext_const
+ ; CHECK-PRE: liveins: $q0, $q1
+ ; CHECK-PRE-NEXT: {{ $}}
+ ; CHECK-PRE-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -5
+ ; CHECK-PRE-NEXT: $x0 = COPY [[C]](s64)
+ ;
+ ; CHECK-POST-LABEL: name: sext_const
+ ; CHECK-POST: liveins: $q0, $q1
+ ; CHECK-POST-NEXT: {{ $}}
+ ; CHECK-POST-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -5
+ ; CHECK-POST-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[C]](s32)
+ ; CHECK-POST-NEXT: $x0 = COPY [[SEXT]](s64)
+ %2:_(s32) = G_CONSTANT i32 -5
+ %3:_(s64) = G_SEXT %2
+ $x0 = COPY %3
+...
+---
+name: trunc_const
+legalized: true
+body: |
+ bb.0:
+ liveins: $q0, $q1
+ ; CHECK-PRE-LABEL: name: trunc_const
+ ; CHECK-PRE: liveins: $q0, $q1
+ ; CHECK-PRE-NEXT: {{ $}}
+ ; CHECK-PRE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 705032704
+ ; CHECK-PRE-NEXT: $w0 = COPY [[C]](s32)
+ ;
+ ; CHECK-POST-LABEL: name: trunc_const
+ ; CHECK-POST: liveins: $q0, $q1
+ ; CHECK-POST-NEXT: {{ $}}
+ ; CHECK-POST-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 5000000000
+ ; CHECK-POST-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
+ ; CHECK-POST-NEXT: $w0 = COPY [[TRUNC]](s32)
+ %2:_(s64) = G_CONSTANT i64 5000000000
+ %3:_(s32) = G_TRUNC %2
+ $w0 = COPY %3
+...
+---
+name: trunc_const_bv
+legalized: true
+body: |
+ bb.0:
+ liveins: $q0, $q1
+ ; CHECK-LABEL: name: trunc_const_bv
+ ; CHECK: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 5000000000
+ ; CHECK-NEXT: %bv:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s32>) = G_TRUNC %bv(<2 x s64>)
+ ; CHECK-NEXT: $x0 = COPY [[TRUNC]](<2 x s32>)
+ %2:_(s64) = G_CONSTANT i64 5000000000
+ %bv:_(<2 x s64>) = G_BUILD_VECTOR %2, %2
+ %3:_(<2 x s32>) = G_TRUNC %bv
+ $x0 = COPY %3
+...
+---
+name: anyext_const
+legalized: true
+body: |
+ bb.0:
+ liveins: $q0, $q1
+ ; CHECK-PRE-LABEL: name: anyext_const
+ ; CHECK-PRE: liveins: $q0, $q1
+ ; CHECK-PRE-NEXT: {{ $}}
+ ; CHECK-PRE-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 500
+ ; CHECK-PRE-NEXT: $x0 = COPY [[C]](s64)
+ ;
+ ; CHECK-POST-LABEL: name: anyext_const
+ ; CHECK-POST: liveins: $q0, $q1
+ ; CHECK-POST-NEXT: {{ $}}
+ ; CHECK-POST-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 500
+ ; CHECK-POST-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-POST-NEXT: $x0 = COPY [[ANYEXT]](s64)
+ %2:_(s32) = G_CONSTANT i32 500
+ %3:_(s64) = G_ANYEXT %2
+ $x0 = COPY %3
+...
+---
+name: mul_const
+legalized: true
+body: |
+ bb.0:
+ liveins: $q0, $q1
+ ; CHECK-PRE-LABEL: name: mul_const
+ ; CHECK-PRE: liveins: $q0, $q1
+ ; CHECK-PRE-NEXT: {{ $}}
+ ; CHECK-PRE-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-PRE-NEXT: $x0 = COPY [[C]](s64)
+ ;
+ ; CHECK-POST-LABEL: name: mul_const
+ ; CHECK-POST: liveins: $q0, $q1
+ ; CHECK-POST-NEXT: {{ $}}
+ ; CHECK-POST-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-POST-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB %x, %x
+ ; CHECK-POST-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+ ; CHECK-POST-NEXT: %mul:_(s32) = G_SHL [[SUB]], [[C]](s64)
+ ; CHECK-POST-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT %mul(s32)
+ ; CHECK-POST-NEXT: $x0 = COPY [[ANYEXT]](s64)
+ %x:_(s32) = COPY $w0
+ %zero:_(s32) = G_CONSTANT i32 0
+ %mul:_(s32) = G_MUL %x, %zero
+ %3:_(s64) = G_ANYEXT %mul
+ $x0 = COPY %3
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
index 00c7fc4cab3ab..9ed1e2d9eee3b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
@@ -64,13 +64,14 @@ body: |
; CHECK-LABEL: name: test_ms1
; CHECK: liveins: $w1, $w2, $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
- ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
- ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
- ; CHECK: G_MEMSET [[COPY]](p0), [[TRUNC]](s8), [[ZEXT]](s64), 1 :: (store (s8) into %ir.dst)
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
+ ; CHECK-NEXT: G_MEMSET [[COPY]](p0), [[TRUNC]](s8), [[ZEXT]](s64), 1 :: (store (s8) into %ir.dst)
+ ; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%2:_(s32) = COPY $w2
@@ -90,17 +91,18 @@ body: |
; CHECK-LABEL: name: test_ms2_const
; CHECK: liveins: $w1, $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
- ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s8)
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
- ; CHECK: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
- ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
- ; CHECK: G_STORE [[MUL]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s8)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
+ ; CHECK-NEXT: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: G_STORE [[MUL]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
+ ; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%3:_(s64) = G_CONSTANT i64 16
@@ -119,20 +121,21 @@ body: |
; CHECK-LABEL: name: test_zero_const
; CHECK: liveins: $w1, $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
- ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>) into %ir.dst, align 1)
- ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
- ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %ir.dst + 16, align 1)
- ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
- ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into %ir.dst + 32, align 1)
- ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
- ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into %ir.dst + 48, align 1)
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+ ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>) into %ir.dst, align 1)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %ir.dst + 16, align 1)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into %ir.dst + 32, align 1)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into %ir.dst + 48, align 1)
+ ; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = G_CONSTANT i32 0
%3:_(s64) = G_CONSTANT i64 64
@@ -152,13 +155,14 @@ body: |
; CHECK-LABEL: name: test_ms3_const_both
; CHECK: liveins: $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
- ; CHECK: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
- ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
- ; CHECK: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
+ ; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
+ ; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s8) = G_CONSTANT i8 64
%2:_(s64) = G_CONSTANT i64 16
@@ -176,24 +180,25 @@ body: |
; CHECK-LABEL: name: test_ms_vector
; CHECK: liveins: $w1, $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
- ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s8)
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
- ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MUL]](s64), [[MUL]](s64)
- ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>) into %ir.dst, align 1)
- ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
- ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %ir.dst + 16, align 1)
- ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
- ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into %ir.dst + 32, align 1)
- ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 44
- ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
- ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into %ir.dst + 44, align 1)
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s8)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MUL]](s64), [[MUL]](s64)
+ ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>) into %ir.dst, align 1)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %ir.dst + 16, align 1)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into %ir.dst + 32, align 1)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 44
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into %ir.dst + 44, align 1)
+ ; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%3:_(s64) = G_CONSTANT i64 60
@@ -212,17 +217,18 @@ body: |
; CHECK-LABEL: name: test_ms4_const_both_unaligned
; CHECK: liveins: $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
- ; CHECK: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
- ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
- ; CHECK: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
- ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64)
- ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
- ; CHECK: G_STORE [[TRUNC]](s16), [[PTR_ADD1]](p0) :: (store (s16) into %ir.dst + 16, align 1)
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
+ ; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
+ ; CHECK-NEX...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/100835
More information about the llvm-commits
mailing list