[llvm] 2840060 - [AArch64][GlobalISel] Add support for selection of s8:fpr = G_UNMERGE <8 x s8>

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 18 00:34:12 PDT 2021


Author: Amara Emerson
Date: 2021-08-18T00:34:06-07:00
New Revision: 284006079e0e2db986d1cacce8cb5329ffb86044

URL: https://github.com/llvm/llvm-project/commit/284006079e0e2db986d1cacce8cb5329ffb86044
DIFF: https://github.com/llvm/llvm-project/commit/284006079e0e2db986d1cacce8cb5329ffb86044.diff

LOG: [AArch64][GlobalISel] Add support for selection of s8:fpr = G_UNMERGE <8 x s8>

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/select-unmerge.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index c88f0bcd70b9b..336cb7e422b36 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -3830,6 +3830,10 @@ static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg,
   // Choose a lane copy opcode and subregister based off of the size of the
   // vector's elements.
   switch (EltSize) {
+  case 8:
+    CopyOpc = AArch64::CPYi8;
+    ExtractSubReg = AArch64::bsub;
+    break;
   case 16:
     CopyOpc = AArch64::CPYi16;
     ExtractSubReg = AArch64::hsub;

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-unmerge.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-unmerge.mir
index 2ea2711521193..e047692f7d1e0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-unmerge.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-unmerge.mir
@@ -1,39 +1,8 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 
 # RUN: llc -O0 -mattr=-fullfp16 -mtriple=aarch64-- \
-# RUN: -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: -run-pass=instruction-select -verify-machineinstrs -global-isel-abort=1 %s -o - | FileCheck %s
 
---- |
-  define <2 x double> @test_v2s64_unmerge(<2 x double> %a) {
-    ret <2 x double> %a
-  }
-
-  define <4 x float> @test_v4s32_unmerge(<4 x float> %a) {
-    ret <4 x float> %a
-  }
-
-  define <2 x half> @test_v2s16_unmerge(<2 x half> %a) {
-    ret <2 x half> %a
-  }
-
-  define <4 x half> @test_v4s16_unmerge(<4 x half> %a) {
-    ret <4 x half> %a
-  }
-
-  define <8 x half> @test_v8s16_unmerge(<8 x half> %a) {
-    ret <8 x half> %a
-  }
-
-  define <2 x float> @test_vecsplit_2v2s32_v4s32(<4 x float> %a) {
-    ret <2 x float> undef
-  }
-
-  define <2 x half> @test_vecsplit_2v2s16_v4s16(<4 x half> %a) {
-    ret <2 x half> undef
-  }
-
-  define void @test_s128(i128 %p) { ret void }
-
-...
 ---
 name:            test_v2s64_unmerge
 alignment:       4
@@ -46,14 +15,23 @@ registers:
   - { id: 2, class: fpr }
   - { id: 3, class: fpr }
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $q0
-    ; CHECK-LABEL: name:            test_v2s64_unmerge
+    ; CHECK-LABEL: name: test_v2s64_unmerge
+    ; CHECK: liveins: $q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY [[COPY]].dsub
+    ; CHECK: [[CPYi64_:%[0-9]+]]:fpr64 = CPYi64 [[COPY]], 1
+    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
+    ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[CPYi64_]], %subreg.dsub
+    ; CHECK: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG]], 1, [[INSERT_SUBREG1]], 0
+    ; CHECK: $q0 = COPY [[INSvi64lane]]
+    ; CHECK: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s64>) = COPY $q0
 
     ; Since 2 * 64 = 128, we can just directly copy.
-    ; CHECK: %2:fpr64 = COPY %0.dsub
-    ; CHECK: %3:fpr64 = CPYi64 %0, 1
     %2:fpr(s64), %3:fpr(s64) = G_UNMERGE_VALUES %0(<2 x s64>)
 
     %1:fpr(<2 x s64>) = G_BUILD_VECTOR %2(s64), %3(s64)
@@ -74,16 +52,31 @@ registers:
   - { id: 4, class: fpr }
   - { id: 5, class: fpr }
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $q0
-    ; CHECK-LABEL: name:            test_v4s32_unmerge
+    ; CHECK-LABEL: name: test_v4s32_unmerge
+    ; CHECK: liveins: $q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]].ssub
+    ; CHECK: [[CPYi32_:%[0-9]+]]:fpr32 = CPYi32 [[COPY]], 1
+    ; CHECK: [[CPYi32_1:%[0-9]+]]:fpr32 = CPYi32 [[COPY]], 2
+    ; CHECK: [[CPYi32_2:%[0-9]+]]:fpr32 = CPYi32 [[COPY]], 3
+    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.ssub
+    ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[CPYi32_]], %subreg.ssub
+    ; CHECK: [[INSvi32lane:%[0-9]+]]:fpr128 = INSvi32lane [[INSERT_SUBREG]], 1, [[INSERT_SUBREG1]], 0
+    ; CHECK: [[DEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[CPYi32_1]], %subreg.ssub
+    ; CHECK: [[INSvi32lane1:%[0-9]+]]:fpr128 = INSvi32lane [[INSvi32lane]], 2, [[INSERT_SUBREG2]], 0
+    ; CHECK: [[DEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[CPYi32_2]], %subreg.ssub
+    ; CHECK: [[INSvi32lane2:%[0-9]+]]:fpr128 = INSvi32lane [[INSvi32lane1]], 3, [[INSERT_SUBREG3]], 0
+    ; CHECK: $q0 = COPY [[INSvi32lane2]]
+    ; CHECK: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s32>) = COPY $q0
 
     ; Since 4 * 32 = 128, we can just directly copy.
-    ; CHECK: %2:fpr32 = COPY %0.ssub
-    ; CHECK: %3:fpr32 = CPYi32 %0, 1
-    ; CHECK: %4:fpr32 = CPYi32 %0, 2
-    ; CHECK: %5:fpr32 = CPYi32 %0, 3
     %2:fpr(s32), %3:fpr(s32), %4:fpr(s32), %5:fpr(s32) = G_UNMERGE_VALUES %0(<4 x s32>)
 
     %1:fpr(<4 x s32>) = G_BUILD_VECTOR %2(s32), %3(s32), %4(s32), %5(s32)
@@ -103,34 +96,35 @@ registers:
   - { id: 4, class: fpr }
   - { id: 5, class: fpr }
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $s0
-    ; CHECK-LABEL: name: test_v2s16_unmerge
 
+    ; CHECK-LABEL: name: test_v2s16_unmerge
+    ; CHECK: liveins: $s0
     ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    %0:fpr(<2 x s16>) = COPY $s0
-
-    ; Since 2 * 16 != 128, we need to widen using implicit defs.
-    ; Note that we expect to reuse one of the INSERT_SUBREG results, as CPYi16
-    ; expects a lane > 0.
     ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
     ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.ssub
     ; CHECK: [[COPY1:%[0-9]+]]:fpr16 = COPY [[INSERT_SUBREG]].hsub
     ; CHECK: [[CPYi16_:%[0-9]+]]:fpr16 = CPYi16 [[INSERT_SUBREG]], 1
-    %2:fpr(s16), %3:fpr(s16) = G_UNMERGE_VALUES %0(<2 x s16>)
-
     ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
     ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY1]], %subreg.hsub
     ; CHECK: [[DEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF
     ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[CPYi16_]], %subreg.hsub
     ; CHECK: [[INSvi16lane:%[0-9]+]]:fpr128 = INSvi16lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG2]], 0
     ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY [[INSvi16lane]].ssub
+    ; CHECK: $s0 = COPY [[COPY2]]
+    ; CHECK: RET_ReallyLR implicit $s0
+    %0:fpr(<2 x s16>) = COPY $s0
+
+    ; Since 2 * 16 != 128, we need to widen using implicit defs.
+    ; Note that we expect to reuse one of the INSERT_SUBREG results, as CPYi16
+    ; expects a lane > 0.
+    %2:fpr(s16), %3:fpr(s16) = G_UNMERGE_VALUES %0(<2 x s16>)
+
     %1:fpr(<2 x s16>) = G_BUILD_VECTOR %2(s16), %3(s16)
 
-    ; CHECK: $s0 = COPY [[COPY2]]
     $s0 = COPY %1(<2 x s16>)
 
-    ; CHECK: RET_ReallyLR implicit $s0
     RET_ReallyLR implicit $s0
 ...
 ---
@@ -147,24 +141,40 @@ registers:
   - { id: 4, class: fpr }
   - { id: 5, class: fpr }
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $d0
-    ; CHECK-LABEL: name:            test_v4s16_unmerge
+    ; CHECK-LABEL: name: test_v4s16_unmerge
+    ; CHECK: liveins: $d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY]], %subreg.dsub
+    ; CHECK: [[DEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[COPY]], %subreg.dsub
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr16 = COPY [[INSERT_SUBREG]].hsub
+    ; CHECK: [[CPYi16_:%[0-9]+]]:fpr16 = CPYi16 [[INSERT_SUBREG]], 1
+    ; CHECK: [[CPYi16_1:%[0-9]+]]:fpr16 = CPYi16 [[INSERT_SUBREG1]], 2
+    ; CHECK: [[CPYi16_2:%[0-9]+]]:fpr16 = CPYi16 [[INSERT_SUBREG2]], 3
+    ; CHECK: [[DEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[COPY1]], %subreg.hsub
+    ; CHECK: [[DEF4:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG4:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF4]], [[CPYi16_]], %subreg.hsub
+    ; CHECK: [[INSvi16lane:%[0-9]+]]:fpr128 = INSvi16lane [[INSERT_SUBREG3]], 1, [[INSERT_SUBREG4]], 0
+    ; CHECK: [[DEF5:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG5:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF5]], [[CPYi16_1]], %subreg.hsub
+    ; CHECK: [[INSvi16lane1:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane]], 2, [[INSERT_SUBREG5]], 0
+    ; CHECK: [[DEF6:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG6:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF6]], [[CPYi16_2]], %subreg.hsub
+    ; CHECK: [[INSvi16lane2:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane1]], 3, [[INSERT_SUBREG6]], 0
+    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[INSvi16lane2]].dsub
+    ; CHECK: $d0 = COPY [[COPY2]]
+    ; CHECK: RET_ReallyLR implicit $d0
     %0:fpr(<4 x s16>) = COPY $d0
 
     ; Since 4 * 16 != 128, we need to widen using implicit defs.
     ; Note that we expect to reuse one of the INSERT_SUBREG results, as CPYi16
     ; expects a lane > 0.
-    ; CHECK-DAG: [[IMPDEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INS_SHARED:%[0-9]+]]:fpr128 = INSERT_SUBREG [[IMPDEF1]], %0, %subreg.dsub
-    ; CHECK: [[IMPDEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INS2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[IMPDEF2]], %0, %subreg.dsub
-    ; CHECK: [[IMPDEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INS3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[IMPDEF3]], %0, %subreg.dsub
-    ; CHECK: %2:fpr16 = COPY [[INS_SHARED]].hsub
-    ; CHECK: %3:fpr16 = CPYi16 [[INS_SHARED]], 1
-    ; CHECK: %4:fpr16 = CPYi16 [[INS2]], 2
-    ; CHECK: %5:fpr16 = CPYi16 [[INS3]], 3
     %2:fpr(s16), %3:fpr(s16), %4:fpr(s16), %5:fpr(s16) = G_UNMERGE_VALUES %0(<4 x s16>)
 
     %1:fpr(<4 x s16>) = G_BUILD_VECTOR %2(s16), %3(s16), %4(s16), %5(s16)
@@ -189,20 +199,47 @@ registers:
   - { id: 8, class: fpr }
   - { id: 9, class: fpr }
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $q0
-    ; CHECK-LABEL: name:            test_v8s16_unmerge
+    ; CHECK-LABEL: name: test_v8s16_unmerge
+    ; CHECK: liveins: $q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr16 = COPY [[COPY]].hsub
+    ; CHECK: [[CPYi16_:%[0-9]+]]:fpr16 = CPYi16 [[COPY]], 1
+    ; CHECK: [[CPYi16_1:%[0-9]+]]:fpr16 = CPYi16 [[COPY]], 2
+    ; CHECK: [[CPYi16_2:%[0-9]+]]:fpr16 = CPYi16 [[COPY]], 3
+    ; CHECK: [[CPYi16_3:%[0-9]+]]:fpr16 = CPYi16 [[COPY]], 4
+    ; CHECK: [[CPYi16_4:%[0-9]+]]:fpr16 = CPYi16 [[COPY]], 5
+    ; CHECK: [[CPYi16_5:%[0-9]+]]:fpr16 = CPYi16 [[COPY]], 6
+    ; CHECK: [[CPYi16_6:%[0-9]+]]:fpr16 = CPYi16 [[COPY]], 7
+    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.hsub
+    ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[CPYi16_]], %subreg.hsub
+    ; CHECK: [[INSvi16lane:%[0-9]+]]:fpr128 = INSvi16lane [[INSERT_SUBREG]], 1, [[INSERT_SUBREG1]], 0
+    ; CHECK: [[DEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[CPYi16_1]], %subreg.hsub
+    ; CHECK: [[INSvi16lane1:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane]], 2, [[INSERT_SUBREG2]], 0
+    ; CHECK: [[DEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[CPYi16_2]], %subreg.hsub
+    ; CHECK: [[INSvi16lane2:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane1]], 3, [[INSERT_SUBREG3]], 0
+    ; CHECK: [[DEF4:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG4:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF4]], [[CPYi16_3]], %subreg.hsub
+    ; CHECK: [[INSvi16lane3:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane2]], 4, [[INSERT_SUBREG4]], 0
+    ; CHECK: [[DEF5:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG5:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF5]], [[CPYi16_4]], %subreg.hsub
+    ; CHECK: [[INSvi16lane4:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane3]], 5, [[INSERT_SUBREG5]], 0
+    ; CHECK: [[DEF6:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG6:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF6]], [[CPYi16_5]], %subreg.hsub
+    ; CHECK: [[INSvi16lane5:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane4]], 6, [[INSERT_SUBREG6]], 0
+    ; CHECK: [[DEF7:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG7:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF7]], [[CPYi16_6]], %subreg.hsub
+    ; CHECK: [[INSvi16lane6:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane5]], 7, [[INSERT_SUBREG7]], 0
+    ; CHECK: $q0 = COPY [[INSvi16lane6]]
+    ; CHECK: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s16>) = COPY $q0
 
     ; Since 8 * 16 = 128, we can just directly copy.
-    ; CHECK: %2:fpr16 = COPY %0.hsub
-    ; CHECK: %3:fpr16 = CPYi16 %0, 1
-    ; CHECK: %4:fpr16 = CPYi16 %0, 2
-    ; CHECK: %5:fpr16 = CPYi16 %0, 3
-    ; CHECK: %6:fpr16 = CPYi16 %0, 4
-    ; CHECK: %7:fpr16 = CPYi16 %0, 5
-    ; CHECK: %8:fpr16 = CPYi16 %0, 6
-    ; CHECK: %9:fpr16 = CPYi16 %0, 7
     %2:fpr(s16), %3:fpr(s16), %4:fpr(s16), %5:fpr(s16), %6:fpr(s16), %7:fpr(s16), %8:fpr(s16), %9:fpr(s16) = G_UNMERGE_VALUES %0(<8 x s16>)
 
     %1:fpr(<8 x s16>) = G_BUILD_VECTOR %2:fpr(s16), %3:fpr(s16), %4:fpr(s16), %5:fpr(s16), %6:fpr(s16), %7:fpr(s16), %8:fpr(s16), %9:fpr(s16)
@@ -210,13 +247,68 @@ body:             |
     RET_ReallyLR implicit $q0
 ...
 ---
+name:            test_v8s8_unmerge
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1:
+    liveins: $q0
+    ; CHECK-LABEL: name: test_v8s8_unmerge
+    ; CHECK: liveins: $q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY]], %subreg.dsub
+    ; CHECK: [[DEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[COPY]], %subreg.dsub
+    ; CHECK: [[DEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[COPY]], %subreg.dsub
+    ; CHECK: [[DEF4:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG4:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF4]], [[COPY]], %subreg.dsub
+    ; CHECK: [[DEF5:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG5:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF5]], [[COPY]], %subreg.dsub
+    ; CHECK: [[DEF6:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG6:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF6]], [[COPY]], %subreg.dsub
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr8 = COPY [[INSERT_SUBREG]].bsub
+    ; CHECK: [[CPYi8_:%[0-9]+]]:fpr8 = CPYi8 [[INSERT_SUBREG]], 1
+    ; CHECK: [[CPYi8_1:%[0-9]+]]:fpr8 = CPYi8 [[INSERT_SUBREG1]], 2
+    ; CHECK: [[CPYi8_2:%[0-9]+]]:fpr8 = CPYi8 [[INSERT_SUBREG2]], 3
+    ; CHECK: [[CPYi8_3:%[0-9]+]]:fpr8 = CPYi8 [[INSERT_SUBREG3]], 4
+    ; CHECK: [[CPYi8_4:%[0-9]+]]:fpr8 = CPYi8 [[INSERT_SUBREG4]], 5
+    ; CHECK: [[CPYi8_5:%[0-9]+]]:fpr8 = CPYi8 [[INSERT_SUBREG5]], 6
+    ; CHECK: [[CPYi8_6:%[0-9]+]]:fpr8 = CPYi8 [[INSERT_SUBREG6]], 7
+    ; CHECK: $b0 = COPY [[COPY1]]
+    ; CHECK: $b1 = COPY [[CPYi8_]]
+    ; CHECK: $b2 = COPY [[CPYi8_1]]
+    ; CHECK: $b3 = COPY [[CPYi8_2]]
+    ; CHECK: $b4 = COPY [[CPYi8_3]]
+    ; CHECK: $b5 = COPY [[CPYi8_4]]
+    ; CHECK: $b6 = COPY [[CPYi8_5]]
+    ; CHECK: $b7 = COPY [[CPYi8_6]]
+    ; CHECK: RET_ReallyLR implicit $d0
+    %0:fpr(<8 x s8>) = COPY $d0
+    %2:fpr(s8), %3:fpr(s8), %4:fpr(s8), %5:fpr(s8), %6:fpr(s8), %7:fpr(s8), %8:fpr(s8), %9:fpr(s8) = G_UNMERGE_VALUES %0(<8 x s8>)
+    $b0 = COPY %2
+    $b1 = COPY %3
+    $b2 = COPY %4
+    $b3 = COPY %5
+    $b4 = COPY %6
+    $b5 = COPY %7
+    $b6 = COPY %8
+    $b7 = COPY %9
+    RET_ReallyLR implicit $d0
+...
+---
 name:            test_vecsplit_2v2s32_v4s32
 alignment:       4
 legalized:       true
 regBankSelected: true
 tracksRegLiveness: true
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $q0
     ; CHECK-LABEL: name: test_vecsplit_2v2s32_v4s32
     ; CHECK: liveins: $q0
@@ -239,7 +331,7 @@ legalized:       true
 regBankSelected: true
 tracksRegLiveness: true
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $d0
     ; CHECK-LABEL: name: test_vecsplit_2v2s16_v4s16
     ; CHECK: liveins: $d0
@@ -266,6 +358,14 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     liveins: $q0
+    ; CHECK-LABEL: name: test_s128
+    ; CHECK: liveins: $q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY [[COPY]].dsub
+    ; CHECK: [[CPYi64_:%[0-9]+]]:fpr64 = CPYi64 [[COPY]], 1
+    ; CHECK: $d0 = COPY [[COPY1]]
+    ; CHECK: $d1 = COPY [[CPYi64_]]
+    ; CHECK: RET_ReallyLR implicit $d0, implicit $d1
     %0:fpr(s128) = COPY $q0
     %1:fpr(s64), %2:fpr(s64) = G_UNMERGE_VALUES %0(s128)
     $d0 = COPY %1(s64)


        


More information about the llvm-commits mailing list