[llvm] bac995d - AMDGPU/GlobalISel: Clamp G_ZEXT source sizes

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 10 06:43:08 PST 2020


Author: Matt Arsenault
Date: 2020-01-10T09:42:49-05:00
New Revision: bac995d97896c1e785d709da24c55f0e050eb899

URL: https://github.com/llvm/llvm-project/commit/bac995d97896c1e785d709da24c55f0e050eb899
DIFF: https://github.com/llvm/llvm-project/commit/bac995d97896c1e785d709da24c55f0e050eb899.diff

LOG: AMDGPU/GlobalISel: Clamp G_ZEXT source sizes

Also clamps G_SEXT/G_ANYEXT, but the implementation is more limited so
fewer cases actually work.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-anyext.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ptrtoint.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sext.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zext.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 52bff2bc25f0..4d14a8572146 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -447,8 +447,9 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
                {S96, S32},
                // FIXME: Hack
                {S64, LLT::scalar(33)},
-               {S32, S8}, {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}})
-    .scalarize(0);
+               {S32, S8}, {S32, LLT::scalar(24)}})
+    .scalarize(0)
+    .clampScalar(0, S32, S64);
 
   // TODO: Split s1->s64 during regbankselect for VALU.
   auto &IToFP = getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-anyext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-anyext.mir
index f33ddfa7585a..2d77dd3faff8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-anyext.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-anyext.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer -global-isel-abort=0  %s -o - | FileCheck %s
 
 ---
 name: test_anyext_s32_to_s64
@@ -48,6 +48,22 @@ body: |
     $vgpr0 = COPY %2
 ...
 
+---
+name: test_anyext_s24_to_s32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_anyext_s24_to_s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; CHECK: $vgpr0 = COPY [[COPY1]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s24) = G_TRUNC %0
+    %2:_(s32) = G_ANYEXT %1
+    $vgpr0 = COPY %2
+...
+
 ---
 name: test_anyext_s1_to_s32
 body: |
@@ -193,3 +209,213 @@ body: |
     %1:_(<4 x s64>) = G_ANYEXT %0
     $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %1
 ...
+
+---
+name: test_anyext_s8_to_s16
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_anyext_s8_to_s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[TRUNC]](s16)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s8) = G_TRUNC %0
+    %2:_(s16) = G_ANYEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: test_anyext_s8_to_s24
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_anyext_s8_to_s24
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:_(s24) = G_TRUNC [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[TRUNC]](s24)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s8) = G_TRUNC %0
+    %2:_(s24) = G_ANYEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: test_anyext_s7_to_s32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_anyext_s7_to_s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[COPY1]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s7) = G_TRUNC %0
+    %2:_(s32) = G_ANYEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: test_anyext_s8_to_s32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_anyext_s8_to_s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[COPY1]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s8) = G_TRUNC %0
+    %2:_(s32) = G_ANYEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: test_anyext_s32_to_s128
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_anyext_s32_to_s128
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[ANYEXT]](s128)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s128) = G_ANYEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_anyext_s32_to_s256
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_anyext_s32_to_s256
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s256) = G_ANYEXT [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[ANYEXT]](s256)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s256) = G_ANYEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_anyext_s32_to_s512
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_anyext_s32_to_s512
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s512) = G_ANYEXT [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[ANYEXT]](s512)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s512) = G_ANYEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_anyext_s32_to_s1024
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_anyext_s32_to_s1024
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s1024) = G_ANYEXT [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[ANYEXT]](s1024)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s1024) = G_ANYEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_anyext_s64_to_s128
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_anyext_s64_to_s128
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[COPY]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[ANYEXT]](s128)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s128) = G_ANYEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_anyext_s64_to_s256
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_anyext_s64_to_s256
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s256) = G_ANYEXT [[COPY]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[ANYEXT]](s256)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s256) = G_ANYEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_anyext_s64_to_s512
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_anyext_s64_to_s512
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s512) = G_ANYEXT [[COPY]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[ANYEXT]](s512)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s512) = G_ANYEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_anyext_s64_to_s1024
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_anyext_s64_to_s1024
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s1024) = G_ANYEXT [[COPY]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[ANYEXT]](s1024)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s1024) = G_ANYEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+# ---
+# name: test_anyext_s96_to_s128
+# body: |
+#   bb.0:
+#     liveins: $vgpr0_vgpr1_vgpr2
+
+#     %0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
+#     %1:_(s128) = G_ANYEXT %0
+#     S_ENDPGM 0, implicit %1
+# ...
+
+---
+name: test_anyext_s128_to_s256
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1_vgpr2_vgpr3
+
+    ; CHECK-LABEL: name: test_anyext_s128_to_s256
+    ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s256) = G_ANYEXT [[COPY]](s128)
+    ; CHECK: S_ENDPGM 0, implicit [[ANYEXT]](s256)
+    %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+    %1:_(s256) = G_ANYEXT %0
+    S_ENDPGM 0, implicit %1
+...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir
index bee1b122232d..2577df7debad 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir
@@ -836,15 +836,15 @@ body: |
     ; SI-LABEL: name: test_ashr_s128_s128
     ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
-    ; SI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
-    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
-    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
+    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
-    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[TRUNC]](s32)
     ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
     ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
@@ -855,20 +855,20 @@ body: |
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
     ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
-    ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
-    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+    ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV1]](s128)
     ; VI-LABEL: name: test_ashr_s128_s128
     ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
-    ; VI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
-    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
-    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
+    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
-    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[TRUNC]](s32)
     ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
     ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
@@ -879,20 +879,20 @@ body: |
     ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
     ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
     ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
-    ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+    ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV1]](s128)
     ; GFX9-LABEL: name: test_ashr_s128_s128
     ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
-    ; GFX9: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
-    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
-    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
+    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
-    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[TRUNC]](s32)
     ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
     ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
@@ -903,8 +903,8 @@ body: |
     ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
     ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
     ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
-    ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+    ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV1]](s128)
     %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     %1:_(s32) = COPY $vgpr4
     %2:_(s128) = G_ZEXT %1
@@ -1240,21 +1240,21 @@ body: |
     ; SI-LABEL: name: test_ashr_s256_s256
     ; SI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
-    ; SI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
-    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
-    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; SI: [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s256)
+    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
     ; SI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
-    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; SI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
     ; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[TRUNC]](s32)
     ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
     ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[SUB3]](s32)
@@ -1269,7 +1269,7 @@ body: |
     ; SI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; SI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; SI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; SI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
     ; SI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
     ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
@@ -1283,7 +1283,7 @@ body: |
     ; SI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C2]]
     ; SI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB1]]
     ; SI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C2]]
-    ; SI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C1]]
+    ; SI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C]]
     ; SI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB1]](s32)
     ; SI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
     ; SI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
@@ -1301,7 +1301,7 @@ body: |
     ; SI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C2]]
     ; SI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB]]
     ; SI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C2]]
-    ; SI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C1]]
+    ; SI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C]]
     ; SI: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[SUB]](s32)
     ; SI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV10]], [[SUB]](s32)
     ; SI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV11]], [[SUB9]](s32)
@@ -1316,30 +1316,30 @@ body: |
     ; SI: [[UV12:%[0-9]+]]:_(s64), [[UV13:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
     ; SI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV12]], [[SELECT12]]
     ; SI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV13]], [[SELECT13]]
-    ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+    ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
     ; SI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[ASHR3]]
     ; SI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[ASHR4]]
-    ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
-    ; SI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
-    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+    ; SI: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+    ; SI: [[MV3:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV1]](s128), [[MV2]](s128)
+    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV3]](s256)
     ; VI-LABEL: name: test_ashr_s256_s256
     ; VI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
-    ; VI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
-    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
-    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; VI: [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s256)
+    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
     ; VI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
-    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; VI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
     ; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[TRUNC]](s32)
     ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
     ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[SUB3]](s32)
@@ -1354,7 +1354,7 @@ body: |
     ; VI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; VI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; VI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; VI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
     ; VI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
     ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
@@ -1368,7 +1368,7 @@ body: |
     ; VI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C2]]
     ; VI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB1]]
     ; VI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C2]]
-    ; VI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C1]]
+    ; VI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C]]
     ; VI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB1]](s32)
     ; VI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
     ; VI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
@@ -1386,7 +1386,7 @@ body: |
     ; VI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C2]]
     ; VI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB]]
     ; VI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C2]]
-    ; VI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C1]]
+    ; VI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C]]
     ; VI: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[SUB]](s32)
     ; VI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV10]], [[SUB]](s32)
     ; VI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV11]], [[SUB9]](s32)
@@ -1401,30 +1401,30 @@ body: |
     ; VI: [[UV12:%[0-9]+]]:_(s64), [[UV13:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
     ; VI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV12]], [[SELECT12]]
     ; VI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV13]], [[SELECT13]]
-    ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+    ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
     ; VI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[ASHR3]]
     ; VI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[ASHR4]]
-    ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
-    ; VI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+    ; VI: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+    ; VI: [[MV3:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV1]](s128), [[MV2]](s128)
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV3]](s256)
     ; GFX9-LABEL: name: test_ashr_s256_s256
     ; GFX9: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
-    ; GFX9: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
-    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
-    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX9: [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s256)
+    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
     ; GFX9: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
-    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
     ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[TRUNC]](s32)
     ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
     ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[SUB3]](s32)
@@ -1439,7 +1439,7 @@ body: |
     ; GFX9: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; GFX9: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; GFX9: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
     ; GFX9: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
     ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
@@ -1453,7 +1453,7 @@ body: |
     ; GFX9: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C2]]
     ; GFX9: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB1]]
     ; GFX9: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C2]]
-    ; GFX9: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C1]]
+    ; GFX9: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C]]
     ; GFX9: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB1]](s32)
     ; GFX9: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
     ; GFX9: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
@@ -1471,7 +1471,7 @@ body: |
     ; GFX9: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C2]]
     ; GFX9: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB]]
     ; GFX9: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C2]]
-    ; GFX9: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C1]]
+    ; GFX9: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C]]
     ; GFX9: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[SUB]](s32)
     ; GFX9: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV10]], [[SUB]](s32)
     ; GFX9: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV11]], [[SUB9]](s32)
@@ -1486,12 +1486,12 @@ body: |
     ; GFX9: [[UV12:%[0-9]+]]:_(s64), [[UV13:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
     ; GFX9: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV12]], [[SELECT12]]
     ; GFX9: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV13]], [[SELECT13]]
-    ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+    ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
     ; GFX9: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[ASHR3]]
     ; GFX9: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[ASHR4]]
-    ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
-    ; GFX9: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+    ; GFX9: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+    ; GFX9: [[MV3:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV1]](s128), [[MV2]](s128)
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV3]](s256)
     %0:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     %1:_(s32) = COPY $vgpr8
     %2:_(s256) = G_ZEXT %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir
index 4b2c9eff1920..23e59270d7e5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir
@@ -818,15 +818,15 @@ body: |
     ; SI-LABEL: name: test_lshr_s128_s128
     ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
-    ; SI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
-    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
-    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
+    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
-    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[TRUNC]](s32)
     ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
     ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
@@ -836,20 +836,20 @@ body: |
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
     ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
-    ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
-    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+    ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV1]](s128)
     ; VI-LABEL: name: test_lshr_s128_s128
     ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
-    ; VI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
-    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
-    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
+    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
-    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[TRUNC]](s32)
     ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
     ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
@@ -859,20 +859,20 @@ body: |
     ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
     ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
     ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
-    ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+    ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV1]](s128)
     ; GFX9-LABEL: name: test_lshr_s128_s128
     ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
-    ; GFX9: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
-    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
-    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
+    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
-    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[TRUNC]](s32)
     ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
     ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
@@ -882,8 +882,8 @@ body: |
     ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
     ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
     ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
-    ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+    ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV1]](s128)
     %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     %1:_(s32) = COPY $vgpr4
     %2:_(s128) = G_ZEXT %1
@@ -1216,21 +1216,21 @@ body: |
     ; SI-LABEL: name: test_lshr_s256_s256
     ; SI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
-    ; SI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
-    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
-    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; SI: [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s256)
+    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
     ; SI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
-    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; SI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
     ; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[TRUNC]](s32)
     ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
     ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[SUB3]](s32)
@@ -1244,7 +1244,7 @@ body: |
     ; SI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; SI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; SI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; SI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
     ; SI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
     ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
@@ -1257,7 +1257,7 @@ body: |
     ; SI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C2]]
     ; SI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB1]]
     ; SI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C2]]
-    ; SI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C1]]
+    ; SI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C]]
     ; SI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB1]](s32)
     ; SI: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
     ; SI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
@@ -1272,7 +1272,7 @@ body: |
     ; SI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C2]]
     ; SI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB]]
     ; SI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C2]]
-    ; SI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C1]]
+    ; SI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C]]
     ; SI: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[UV9]], [[SUB]](s32)
     ; SI: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB]](s32)
     ; SI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB9]](s32)
@@ -1286,30 +1286,30 @@ body: |
     ; SI: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
     ; SI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT12]]
     ; SI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT13]]
-    ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+    ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
     ; SI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[C3]]
     ; SI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C3]]
-    ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
-    ; SI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
-    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+    ; SI: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+    ; SI: [[MV3:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV1]](s128), [[MV2]](s128)
+    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV3]](s256)
     ; VI-LABEL: name: test_lshr_s256_s256
     ; VI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
-    ; VI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
-    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
-    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; VI: [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s256)
+    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
     ; VI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
-    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; VI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
     ; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[TRUNC]](s32)
     ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
     ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[SUB3]](s32)
@@ -1323,7 +1323,7 @@ body: |
     ; VI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; VI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; VI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; VI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
     ; VI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
     ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
@@ -1336,7 +1336,7 @@ body: |
     ; VI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C2]]
     ; VI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB1]]
     ; VI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C2]]
-    ; VI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C1]]
+    ; VI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C]]
     ; VI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB1]](s32)
     ; VI: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
     ; VI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
@@ -1351,7 +1351,7 @@ body: |
     ; VI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C2]]
     ; VI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB]]
     ; VI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C2]]
-    ; VI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C1]]
+    ; VI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C]]
     ; VI: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[UV9]], [[SUB]](s32)
     ; VI: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB]](s32)
     ; VI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB9]](s32)
@@ -1365,30 +1365,30 @@ body: |
     ; VI: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
     ; VI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT12]]
     ; VI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT13]]
-    ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+    ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
     ; VI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[C3]]
     ; VI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C3]]
-    ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
-    ; VI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+    ; VI: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+    ; VI: [[MV3:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV1]](s128), [[MV2]](s128)
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV3]](s256)
     ; GFX9-LABEL: name: test_lshr_s256_s256
     ; GFX9: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
-    ; GFX9: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
-    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
-    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX9: [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s256)
+    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
     ; GFX9: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
-    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
     ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[TRUNC]](s32)
     ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
     ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[SUB3]](s32)
@@ -1402,7 +1402,7 @@ body: |
     ; GFX9: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; GFX9: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; GFX9: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
     ; GFX9: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
     ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
@@ -1415,7 +1415,7 @@ body: |
     ; GFX9: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C2]]
     ; GFX9: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB1]]
     ; GFX9: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C2]]
-    ; GFX9: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C1]]
+    ; GFX9: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C]]
     ; GFX9: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB1]](s32)
     ; GFX9: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
     ; GFX9: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
@@ -1430,7 +1430,7 @@ body: |
     ; GFX9: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C2]]
     ; GFX9: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB]]
     ; GFX9: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C2]]
-    ; GFX9: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C1]]
+    ; GFX9: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C]]
     ; GFX9: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[UV9]], [[SUB]](s32)
     ; GFX9: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB]](s32)
     ; GFX9: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB9]](s32)
@@ -1444,12 +1444,12 @@ body: |
     ; GFX9: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
     ; GFX9: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT12]]
     ; GFX9: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT13]]
-    ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+    ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
     ; GFX9: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[C3]]
     ; GFX9: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C3]]
-    ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
-    ; GFX9: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+    ; GFX9: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+    ; GFX9: [[MV3:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV1]](s128), [[MV2]](s128)
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV3]](s256)
     %0:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     %1:_(s32) = COPY $vgpr8
     %2:_(s256) = G_ZEXT %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ptrtoint.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ptrtoint.mir
index 37ad65b6e2cb..732d6b71a3c4 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ptrtoint.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ptrtoint.mir
@@ -116,8 +116,9 @@ body: |
     ; CHECK-LABEL: name: test_ptrtoint_p0_to_s128
     ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
     ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY]](p0)
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[PTRTOINT]](s64)
-    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[ZEXT]](s128)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[PTRTOINT]](s64), [[C]](s64)
+    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
     %0:_(p0) = COPY $vgpr0_vgpr1
     %1:_(s128) = G_PTRTOINT %0
     $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sext.mir
index 48740e6537b2..cbff2b8f109c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sext.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sext.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -O0 -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -O0 -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck %s
 
 ---
 name: test_sext_s32_to_s64
@@ -55,6 +55,25 @@ body: |
     $vgpr0 = COPY %2
 ...
 
+---
+name: test_sext_s24_to_s32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_sext_s24_to_s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: $vgpr0 = COPY [[ASHR]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s24) = G_TRUNC %0
+    %2:_(s32) = G_SEXT %1
+    $vgpr0 = COPY %2
+...
+
 ---
 name: test_sext_i1_to_s32
 body: |
@@ -222,3 +241,237 @@ body: |
     %1:_(<4 x s64>) = G_SEXT %0
     $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %1
 ...
+
+---
+name: test_sext_s8_to_s16
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_sext_s8_to_s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
+    ; CHECK: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[C]](s16)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](s16)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s8) = G_TRUNC %0
+    %2:_(s16) = G_SEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: test_sext_s8_to_s24
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_sext_s8_to_s24
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[AND]](s32)
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
+    ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]](s32)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C2]](s32)
+    ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND1]](s32)
+    ; CHECK: [[TRUNC:%[0-9]+]]:_(s24) = G_TRUNC [[ASHR1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[TRUNC]](s24)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s8) = G_TRUNC %0
+    %2:_(s24) = G_SEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: test_sext_s7_to_s32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_sext_s7_to_s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 25
+    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s7) = G_TRUNC %0
+    %2:_(s32) = G_SEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: test_sext_s8_to_s32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_sext_s8_to_s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s8) = G_TRUNC %0
+    %2:_(s32) = G_SEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: test_sext_s32_to_s128
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_sext_s32_to_s128
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[SEXT:%[0-9]+]]:_(s128) = G_SEXT [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[SEXT]](s128)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s128) = G_SEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_sext_s32_to_s256
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_sext_s32_to_s256
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[SEXT:%[0-9]+]]:_(s256) = G_SEXT [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[SEXT]](s256)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s256) = G_SEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_sext_s32_to_s512
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_sext_s32_to_s512
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[SEXT:%[0-9]+]]:_(s512) = G_SEXT [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[SEXT]](s512)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s512) = G_SEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_sext_s32_to_s1024
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_sext_s32_to_s1024
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[SEXT:%[0-9]+]]:_(s1024) = G_SEXT [[COPY]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[SEXT]](s1024)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s1024) = G_SEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_sext_s64_to_s128
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_sext_s64_to_s128
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+    ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32)
+    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[ASHR]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[MV]](s128)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s128) = G_SEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_sext_s64_to_s256
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_sext_s64_to_s256
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[SEXT:%[0-9]+]]:_(s256) = G_SEXT [[COPY]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[SEXT]](s256)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s256) = G_SEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_sext_s64_to_s512
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_sext_s64_to_s512
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[SEXT:%[0-9]+]]:_(s512) = G_SEXT [[COPY]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[SEXT]](s512)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s512) = G_SEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_sext_s64_to_s1024
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_sext_s64_to_s1024
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[SEXT:%[0-9]+]]:_(s1024) = G_SEXT [[COPY]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[SEXT]](s1024)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s1024) = G_SEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+# ---
+# name: test_sext_s96_to_s128
+# body: |
+#   bb.0:
+#     liveins: $vgpr0_vgpr1_vgpr2
+
+#     %0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
+#     %1:_(s128) = G_SEXT %0
+#     S_ENDPGM 0, implicit %1
+# ...
+
+---
+name: test_sext_s128_to_s256
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1_vgpr2_vgpr3
+
+    ; CHECK-LABEL: name: test_sext_s128_to_s256
+    ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+    ; CHECK: [[SEXT:%[0-9]+]]:_(s256) = G_SEXT [[COPY]](s128)
+    ; CHECK: S_ENDPGM 0, implicit [[SEXT]](s256)
+    %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+    %1:_(s256) = G_SEXT %0
+    S_ENDPGM 0, implicit %1
+...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir
index 3986de79befe..3599a8f891ca 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir
@@ -877,15 +877,15 @@ body: |
     ; SI-LABEL: name: test_shl_s128_s128
     ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
-    ; SI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
-    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
-    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
+    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
-    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[TRUNC]](s32)
     ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s32)
     ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s32)
@@ -895,20 +895,20 @@ body: |
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
     ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV1]], [[SELECT1]]
-    ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
-    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+    ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV1]](s128)
     ; VI-LABEL: name: test_shl_s128_s128
     ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
-    ; VI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
-    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
-    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
+    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
-    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[TRUNC]](s32)
     ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s32)
     ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s32)
@@ -918,20 +918,20 @@ body: |
     ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
     ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
     ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV1]], [[SELECT1]]
-    ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+    ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV1]](s128)
     ; GFX9-LABEL: name: test_shl_s128_s128
     ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
-    ; GFX9: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
-    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
-    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
+    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
-    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[TRUNC]](s32)
     ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s32)
     ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s32)
@@ -941,8 +941,8 @@ body: |
     ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
     ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
     ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV1]], [[SELECT1]]
-    ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+    ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV1]](s128)
     %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     %1:_(s32) = COPY $vgpr4
     %2:_(s128) = G_ZEXT %1
@@ -1275,21 +1275,21 @@ body: |
     ; SI-LABEL: name: test_shl_s256_s256
     ; SI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
-    ; SI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
-    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
-    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; SI: [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s256)
+    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
     ; SI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
-    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; SI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
     ; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[TRUNC]](s32)
     ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[SUB3]](s32)
     ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
@@ -1303,7 +1303,7 @@ body: |
     ; SI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C2]]
     ; SI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB1]]
     ; SI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C2]]
-    ; SI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C1]]
+    ; SI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C]]
     ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB1]](s32)
     ; SI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB1]](s32)
     ; SI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
@@ -1316,7 +1316,7 @@ body: |
     ; SI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; SI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; SI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; SI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; SI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; SI: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[TRUNC]](s32)
     ; SI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
     ; SI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[TRUNC]](s32)
@@ -1331,7 +1331,7 @@ body: |
     ; SI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C2]]
     ; SI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB]]
     ; SI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C2]]
-    ; SI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C1]]
+    ; SI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C]]
     ; SI: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[UV8]], [[SUB]](s32)
     ; SI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB9]](s32)
     ; SI: [[SHL8:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB]](s32)
@@ -1342,33 +1342,33 @@ body: |
     ; SI: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV9]], [[SELECT10]]
     ; SI: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT]], [[C3]]
     ; SI: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C3]]
-    ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT12]](s64), [[SELECT13]](s64)
+    ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT12]](s64), [[SELECT13]](s64)
     ; SI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT9]]
     ; SI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
     ; SI: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
     ; SI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT14]]
     ; SI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT15]]
-    ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
-    ; SI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
-    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+    ; SI: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+    ; SI: [[MV3:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV1]](s128), [[MV2]](s128)
+    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV3]](s256)
     ; VI-LABEL: name: test_shl_s256_s256
     ; VI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
-    ; VI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
-    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
-    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; VI: [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s256)
+    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
     ; VI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
-    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; VI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
     ; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[TRUNC]](s32)
     ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[SUB3]](s32)
     ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
@@ -1382,7 +1382,7 @@ body: |
     ; VI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C2]]
     ; VI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB1]]
     ; VI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C2]]
-    ; VI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C1]]
+    ; VI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C]]
     ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB1]](s32)
     ; VI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB1]](s32)
     ; VI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
@@ -1395,7 +1395,7 @@ body: |
     ; VI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; VI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; VI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; VI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; VI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; VI: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[TRUNC]](s32)
     ; VI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
     ; VI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[TRUNC]](s32)
@@ -1410,7 +1410,7 @@ body: |
     ; VI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C2]]
     ; VI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB]]
     ; VI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C2]]
-    ; VI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C1]]
+    ; VI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C]]
     ; VI: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[UV8]], [[SUB]](s32)
     ; VI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB9]](s32)
     ; VI: [[SHL8:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB]](s32)
@@ -1421,33 +1421,33 @@ body: |
     ; VI: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV9]], [[SELECT10]]
     ; VI: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT]], [[C3]]
     ; VI: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C3]]
-    ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT12]](s64), [[SELECT13]](s64)
+    ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT12]](s64), [[SELECT13]](s64)
     ; VI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT9]]
     ; VI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
     ; VI: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
     ; VI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT14]]
     ; VI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT15]]
-    ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
-    ; VI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+    ; VI: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+    ; VI: [[MV3:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV1]](s128), [[MV2]](s128)
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV3]](s256)
     ; GFX9-LABEL: name: test_shl_s256_s256
     ; GFX9: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
-    ; GFX9: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
-    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
-    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX9: [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY1]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s256)
+    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
     ; GFX9: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
-    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
-    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
-    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
-    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C1]]
+    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[TRUNC]]
+    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
     ; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
     ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[TRUNC]](s32)
     ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[SUB3]](s32)
     ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
@@ -1461,7 +1461,7 @@ body: |
     ; GFX9: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C2]]
     ; GFX9: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB1]]
     ; GFX9: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C2]]
-    ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C1]]
+    ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C]]
     ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB1]](s32)
     ; GFX9: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB1]](s32)
     ; GFX9: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
@@ -1474,7 +1474,7 @@ body: |
     ; GFX9: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
     ; GFX9: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
     ; GFX9: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; GFX9: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+    ; GFX9: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C]]
     ; GFX9: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[TRUNC]](s32)
     ; GFX9: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
     ; GFX9: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[TRUNC]](s32)
@@ -1489,7 +1489,7 @@ body: |
     ; GFX9: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C2]]
     ; GFX9: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SUB]]
     ; GFX9: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C2]]
-    ; GFX9: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C1]]
+    ; GFX9: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C]]
     ; GFX9: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[UV8]], [[SUB]](s32)
     ; GFX9: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB9]](s32)
     ; GFX9: [[SHL8:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB]](s32)
@@ -1500,15 +1500,15 @@ body: |
     ; GFX9: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV9]], [[SELECT10]]
     ; GFX9: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT]], [[C3]]
     ; GFX9: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C3]]
-    ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT12]](s64), [[SELECT13]](s64)
+    ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT12]](s64), [[SELECT13]](s64)
     ; GFX9: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT9]]
     ; GFX9: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
     ; GFX9: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
     ; GFX9: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT14]]
     ; GFX9: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT15]]
-    ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
-    ; GFX9: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+    ; GFX9: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+    ; GFX9: [[MV3:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV1]](s128), [[MV2]](s128)
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV3]](s256)
     %0:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     %1:_(s32) = COPY $vgpr8
     %2:_(s256) = G_ZEXT %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zext.mir
index 6f0568d157be..c289a6b87094 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zext.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zext.mir
@@ -52,6 +52,24 @@ body: |
     $vgpr0 = COPY %2
 ...
 
+---
+name: test_zext_s24_to_s32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_zext_s24_to_s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
+    ; CHECK: $vgpr0 = COPY [[AND]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s24) = G_TRUNC %0
+    %2:_(s32) = G_ZEXT %1
+    $vgpr0 = COPY %2
+...
+
 ---
 name: test_zext_i1_to_s32
 body: |
@@ -220,3 +238,234 @@ body: |
     %1:_(<4 x s64>) = G_ZEXT %0
     $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %1
 ...
+
+---
+name: test_zext_s8_to_s16
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_zext_s8_to_s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+    ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
+    ; CHECK: S_ENDPGM 0, implicit [[AND]](s16)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s8) = G_TRUNC %0
+    %2:_(s16) = G_ZEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: test_zext_s8_to_s24
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_zext_s8_to_s24
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[COPY2]]
+    ; CHECK: [[TRUNC:%[0-9]+]]:_(s24) = G_TRUNC [[AND]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[TRUNC]](s24)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s8) = G_TRUNC %0
+    %2:_(s24) = G_ZEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+
+---
+name: test_zext_s7_to_s32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_zext_s7_to_s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
+    ; CHECK: S_ENDPGM 0, implicit [[AND]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s7) = G_TRUNC %0
+    %2:_(s32) = G_ZEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: test_zext_s8_to_s32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_zext_s8_to_s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
+    ; CHECK: S_ENDPGM 0, implicit [[AND]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s8) = G_TRUNC %0
+    %2:_(s32) = G_ZEXT %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: test_zext_s32_to_s128
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_zext_s32_to_s128
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[MV]](s128)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s128) = G_ZEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_zext_s32_to_s256
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_zext_s32_to_s256
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK: [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[MV]](s256)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s256) = G_ZEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_zext_s32_to_s512
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_zext_s32_to_s512
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK: [[MV:%[0-9]+]]:_(s512) = G_MERGE_VALUES [[COPY]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[MV]](s512)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s512) = G_ZEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_zext_s32_to_s1024
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_zext_s32_to_s1024
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK: [[MV:%[0-9]+]]:_(s1024) = G_MERGE_VALUES [[COPY]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[MV]](s1024)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s1024) = G_ZEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_zext_s64_to_s128
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_zext_s64_to_s128
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[C]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[MV]](s128)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s128) = G_ZEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_zext_s64_to_s256
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_zext_s64_to_s256
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK: [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY]](s64), [[C]](s64), [[C]](s64), [[C]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[MV]](s256)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s256) = G_ZEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_zext_s64_to_s512
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_zext_s64_to_s512
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK: [[MV:%[0-9]+]]:_(s512) = G_MERGE_VALUES [[COPY]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[MV]](s512)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s512) = G_ZEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_zext_s64_to_s1024
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_zext_s64_to_s1024
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK: [[MV:%[0-9]+]]:_(s1024) = G_MERGE_VALUES [[COPY]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64), [[C]](s64)
+    ; CHECK: S_ENDPGM 0, implicit [[MV]](s1024)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s1024) = G_ZEXT %0
+    S_ENDPGM 0, implicit %1
+...
+
+# ---
+# name: test_zext_s96_to_s128
+# body: |
+#   bb.0:
+#     liveins: $vgpr0_vgpr1_vgpr2
+
+#     %0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
+#     %1:_(s128) = G_ZEXT %0
+#     S_ENDPGM 0, implicit %1
+# ...
+
+---
+name: test_zext_s128_to_s256
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1_vgpr2_vgpr3
+
+    ; CHECK-LABEL: name: test_zext_s128_to_s256
+    ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C]](s64), [[C]](s64)
+    ; CHECK: [[MV1:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY]](s128), [[MV]](s128)
+    ; CHECK: S_ENDPGM 0, implicit [[MV1]](s256)
+    %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+    %1:_(s256) = G_ZEXT %0
+    S_ENDPGM 0, implicit %1
+...


        


More information about the llvm-commits mailing list