[llvm] a1aa039 - [AArch64][GlobalISel] Update shuffle->ext test before patch.
Amara Emerson via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 29 09:20:45 PDT 2022
Author: Amara Emerson
Date: 2022-09-29T17:20:34+01:00
New Revision: a1aa0390cb53374cda05af8b9f780ecfa0ba16e4
URL: https://github.com/llvm/llvm-project/commit/a1aa0390cb53374cda05af8b9f780ecfa0ba16e4
DIFF: https://github.com/llvm/llvm-project/commit/a1aa0390cb53374cda05af8b9f780ecfa0ba16e4.diff
LOG: [AArch64][GlobalISel] Update shuffle->ext test before patch.
Added:
Modified:
llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-ext.mir
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-ext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-ext.mir
index 25cd37aaeb99a..2dbe4e518cd7e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-ext.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-ext.mir
@@ -14,12 +14,13 @@ body: |
liveins: $d0, $d1
; CHECK-LABEL: name: v8s8_cst3
; CHECK: liveins: $d0, $d1
- ; CHECK: %v1:_(<8 x s8>) = COPY $d0
- ; CHECK: %v2:_(<8 x s8>) = COPY $d1
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
- ; CHECK: %shuf:_(<8 x s8>) = G_EXT %v1, %v2, [[C]](s32)
- ; CHECK: $d0 = COPY %shuf(<8 x s8>)
- ; CHECK: RET_ReallyLR implicit $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<8 x s8>) = COPY $d0
+ ; CHECK-NEXT: %v2:_(<8 x s8>) = COPY $d1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: %shuf:_(<8 x s8>) = G_EXT %v1, %v2, [[C]](s32)
+ ; CHECK-NEXT: $d0 = COPY %shuf(<8 x s8>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
%v1:_(<8 x s8>) = COPY $d0
%v2:_(<8 x s8>) = COPY $d1
%shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(3, 4, 5, 6, 7, 8, 9, 10)
@@ -36,12 +37,13 @@ body: |
liveins: $d0, $d1
; CHECK-LABEL: name: v8s8_cst5
; CHECK: liveins: $d0, $d1
- ; CHECK: %v1:_(<8 x s8>) = COPY $d0
- ; CHECK: %v2:_(<8 x s8>) = COPY $d1
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; CHECK: %shuf:_(<8 x s8>) = G_EXT %v2, %v1, [[C]](s32)
- ; CHECK: $d0 = COPY %shuf(<8 x s8>)
- ; CHECK: RET_ReallyLR implicit $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<8 x s8>) = COPY $d0
+ ; CHECK-NEXT: %v2:_(<8 x s8>) = COPY $d1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+ ; CHECK-NEXT: %shuf:_(<8 x s8>) = G_EXT %v2, %v1, [[C]](s32)
+ ; CHECK-NEXT: $d0 = COPY %shuf(<8 x s8>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
%v1:_(<8 x s8>) = COPY $d0
%v2:_(<8 x s8>) = COPY $d1
%shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(13, 14, 15, 0, 1, 2, 3, 4)
@@ -58,12 +60,13 @@ body: |
liveins: $q0, $q1
; CHECK-LABEL: name: v16s8_cst3
; CHECK: liveins: $q0, $q1
- ; CHECK: %v1:_(<16 x s8>) = COPY $q0
- ; CHECK: %v2:_(<16 x s8>) = COPY $q1
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
- ; CHECK: %shuf:_(<16 x s8>) = G_EXT %v1, %v2, [[C]](s32)
- ; CHECK: $q0 = COPY %shuf(<16 x s8>)
- ; CHECK: RET_ReallyLR implicit $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<16 x s8>) = COPY $q0
+ ; CHECK-NEXT: %v2:_(<16 x s8>) = COPY $q1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: %shuf:_(<16 x s8>) = G_EXT %v1, %v2, [[C]](s32)
+ ; CHECK-NEXT: $q0 = COPY %shuf(<16 x s8>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
%v1:_(<16 x s8>) = COPY $q0
%v2:_(<16 x s8>) = COPY $q1
%shuf:_(<16 x s8>) = G_SHUFFLE_VECTOR %v1(<16 x s8>), %v2, shufflemask(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)
@@ -80,12 +83,13 @@ body: |
liveins: $q0, $q1
; CHECK-LABEL: name: v16s8_cst7
; CHECK: liveins: $q0, $q1
- ; CHECK: %v1:_(<16 x s8>) = COPY $q0
- ; CHECK: %v2:_(<16 x s8>) = COPY $q1
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
- ; CHECK: %shuf:_(<16 x s8>) = G_EXT %v2, %v1, [[C]](s32)
- ; CHECK: $q0 = COPY %shuf(<16 x s8>)
- ; CHECK: RET_ReallyLR implicit $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<16 x s8>) = COPY $q0
+ ; CHECK-NEXT: %v2:_(<16 x s8>) = COPY $q1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+ ; CHECK-NEXT: %shuf:_(<16 x s8>) = G_EXT %v2, %v1, [[C]](s32)
+ ; CHECK-NEXT: $q0 = COPY %shuf(<16 x s8>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
%v1:_(<16 x s8>) = COPY $q0
%v2:_(<16 x s8>) = COPY $q1
%shuf:_(<16 x s8>) = G_SHUFFLE_VECTOR %v1(<16 x s8>), %v2, shufflemask(23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6)
@@ -102,12 +106,13 @@ body: |
liveins: $d0, $d1
; CHECK-LABEL: name: v4s16_cst6
; CHECK: liveins: $d0, $d1
- ; CHECK: %v1:_(<4 x s16>) = COPY $d0
- ; CHECK: %v2:_(<4 x s16>) = COPY $d1
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; CHECK: %shuf:_(<4 x s16>) = G_EXT %v1, %v2, [[C]](s32)
- ; CHECK: $d0 = COPY %shuf(<4 x s16>)
- ; CHECK: RET_ReallyLR implicit $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<4 x s16>) = COPY $d0
+ ; CHECK-NEXT: %v2:_(<4 x s16>) = COPY $d1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
+ ; CHECK-NEXT: %shuf:_(<4 x s16>) = G_EXT %v1, %v2, [[C]](s32)
+ ; CHECK-NEXT: $d0 = COPY %shuf(<4 x s16>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
%v1:_(<4 x s16>) = COPY $d0
%v2:_(<4 x s16>) = COPY $d1
%shuf:_(<4 x s16>) = G_SHUFFLE_VECTOR %v1(<4 x s16>), %v2, shufflemask(3, 4, 5, 6)
@@ -124,12 +129,13 @@ body: |
liveins: $q0, $q1
; CHECK-LABEL: name: v4s32_cst12
; CHECK: liveins: $q0, $q1
- ; CHECK: %v1:_(<4 x s32>) = COPY $q0
- ; CHECK: %v2:_(<4 x s32>) = COPY $q1
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CHECK: %shuf:_(<4 x s32>) = G_EXT %v1, %v2, [[C]](s32)
- ; CHECK: $q0 = COPY %shuf(<4 x s32>)
- ; CHECK: RET_ReallyLR implicit $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<4 x s32>) = COPY $q0
+ ; CHECK-NEXT: %v2:_(<4 x s32>) = COPY $q1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+ ; CHECK-NEXT: %shuf:_(<4 x s32>) = G_EXT %v1, %v2, [[C]](s32)
+ ; CHECK-NEXT: $q0 = COPY %shuf(<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
%v1:_(<4 x s32>) = COPY $q0
%v2:_(<4 x s32>) = COPY $q1
%shuf:_(<4 x s32>) = G_SHUFFLE_VECTOR %v1(<4 x s32>), %v2, shufflemask(3, 4, 5, 6)
@@ -149,12 +155,13 @@ body: |
;
; CHECK-LABEL: name: undef_elts_should_match_1
; CHECK: liveins: $d0, $d1
- ; CHECK: %v1:_(<8 x s8>) = COPY $d0
- ; CHECK: %v2:_(<8 x s8>) = COPY $d1
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
- ; CHECK: %shuf:_(<8 x s8>) = G_EXT %v1, %v2, [[C]](s32)
- ; CHECK: $d0 = COPY %shuf(<8 x s8>)
- ; CHECK: RET_ReallyLR implicit $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<8 x s8>) = COPY $d0
+ ; CHECK-NEXT: %v2:_(<8 x s8>) = COPY $d1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: %shuf:_(<8 x s8>) = G_EXT %v1, %v2, [[C]](s32)
+ ; CHECK-NEXT: $d0 = COPY %shuf(<8 x s8>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
%v1:_(<8 x s8>) = COPY $d0
%v2:_(<8 x s8>) = COPY $d1
%shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(3, -1, -1, 6, 7, 8, 9, 10)
@@ -174,12 +181,13 @@ body: |
;
; CHECK-LABEL: name: undef_elts_should_match_2
; CHECK: liveins: $d0, $d1
- ; CHECK: %v1:_(<8 x s8>) = COPY $d0
- ; CHECK: %v2:_(<8 x s8>) = COPY $d1
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; CHECK: %shuf:_(<8 x s8>) = G_EXT %v2, %v1, [[C]](s32)
- ; CHECK: $d0 = COPY %shuf(<8 x s8>)
- ; CHECK: RET_ReallyLR implicit $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<8 x s8>) = COPY $d0
+ ; CHECK-NEXT: %v2:_(<8 x s8>) = COPY $d1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
+ ; CHECK-NEXT: %shuf:_(<8 x s8>) = G_EXT %v2, %v1, [[C]](s32)
+ ; CHECK-NEXT: $d0 = COPY %shuf(<8 x s8>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
%v1:_(<8 x s8>) = COPY $d0
%v2:_(<8 x s8>) = COPY $d1
%shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(-1, -1, -1, -1, 2, 3, 4, 5)
@@ -198,12 +206,13 @@ body: |
; We should get a constant 7 here.
; CHECK-LABEL: name: undef_elts_should_match_3
; CHECK: liveins: $q0, $q1
- ; CHECK: %v1:_(<16 x s8>) = COPY $q0
- ; CHECK: %v2:_(<16 x s8>) = COPY $q1
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
- ; CHECK: %shuf:_(<16 x s8>) = G_EXT %v2, %v1, [[C]](s32)
- ; CHECK: $q0 = COPY %shuf(<16 x s8>)
- ; CHECK: RET_ReallyLR implicit $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<16 x s8>) = COPY $q0
+ ; CHECK-NEXT: %v2:_(<16 x s8>) = COPY $q1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+ ; CHECK-NEXT: %shuf:_(<16 x s8>) = G_EXT %v2, %v1, [[C]](s32)
+ ; CHECK-NEXT: $q0 = COPY %shuf(<16 x s8>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
%v1:_(<16 x s8>) = COPY $q0
%v2:_(<16 x s8>) = COPY $q1
%shuf:_(<16 x s8>) = G_SHUFFLE_VECTOR %v1(<16 x s8>), %v2, shufflemask(23, 24, 25, 26, -1, -1, 29, 30, 31, 0, 1, 2, 3, 4, -1, 6)
@@ -222,12 +231,13 @@ body: |
; We should get a constant 10 here.
; CHECK-LABEL: name: undef_elts_should_match_4
; CHECK: liveins: $q0, $q1
- ; CHECK: %v1:_(<8 x s16>) = COPY $q0
- ; CHECK: %v2:_(<8 x s16>) = COPY $q1
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
- ; CHECK: %shuf:_(<8 x s16>) = G_EXT %v2, %v1, [[C]](s32)
- ; CHECK: $q0 = COPY %shuf(<8 x s16>)
- ; CHECK: RET_ReallyLR implicit $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<8 x s16>) = COPY $q0
+ ; CHECK-NEXT: %v2:_(<8 x s16>) = COPY $q1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+ ; CHECK-NEXT: %shuf:_(<8 x s16>) = G_EXT %v2, %v1, [[C]](s32)
+ ; CHECK-NEXT: $q0 = COPY %shuf(<8 x s16>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
%v1:_(<8 x s16>) = COPY $q0
%v2:_(<8 x s16>) = COPY $q1
%shuf:_(<8 x s16>) = G_SHUFFLE_VECTOR %v1(<8 x s16>), %v2, shufflemask(-1, -1, -1, -1, 1, 2, 3, 4)
@@ -246,10 +256,11 @@ body: |
;
; CHECK-LABEL: name: all_undef
; CHECK: liveins: $q0, $q1
- ; CHECK: %v1:_(<8 x s16>) = COPY $q0
- ; CHECK: %shuf:_(<8 x s16>) = G_REV64 %v1
- ; CHECK: $q0 = COPY %shuf(<8 x s16>)
- ; CHECK: RET_ReallyLR implicit $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<8 x s16>) = COPY $q0
+ ; CHECK-NEXT: %shuf:_(<8 x s16>) = G_REV64 %v1
+ ; CHECK-NEXT: $q0 = COPY %shuf(<8 x s16>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
%v1:_(<8 x s16>) = COPY $q0
%v2:_(<8 x s16>) = COPY $q1
%shuf:_(<8 x s16>) = G_SHUFFLE_VECTOR %v1(<8 x s16>), %v2, shufflemask(-1, -1, -1, -1, -1, -1, -1, -1)
More information about the llvm-commits
mailing list