[llvm] 0da937b - [GlobalISel][IRTranslator] Follow convention and put constant offset of getelementptr arithmetic on RHS.

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 29 11:37:30 PST 2020


Author: Amara Emerson
Date: 2020-01-29T11:37:19-08:00
New Revision: 0da937bb5c2bf60a539515975c72a06e59d10c4b

URL: https://github.com/llvm/llvm-project/commit/0da937bb5c2bf60a539515975c72a06e59d10c4b
DIFF: https://github.com/llvm/llvm-project/commit/0da937bb5c2bf60a539515975c72a06e59d10c4b.diff

LOG: [GlobalISel][IRTranslator] Follow convention and put constant offset of getelementptr arithmetic on RHS.

We were needlessly putting known constant values on the LHS of a G_MUL, which
is suboptimal.

Differential Revision: https://reviews.llvm.org/D73650

Added: 
    

Modified: 
    llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll
    llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-getelementptr.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/lds-relocs.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.atomic.dec.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.atomic.inc.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.update.dpp.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/mubuf-global.ll
    llvm/test/CodeGen/X86/GlobalISel/add-ext.ll
    llvm/test/CodeGen/X86/GlobalISel/ptr-add.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index a456c2a6a63e..e29eb15510fd 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1105,7 +1105,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
         auto ElementSizeMIB = MIRBuilder.buildConstant(
             getLLTForType(*OffsetIRTy, *DL), ElementSize);
         GepOffsetReg =
-            MIRBuilder.buildMul(OffsetTy, ElementSizeMIB, IdxReg).getReg(0);
+            MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
       } else
         GepOffsetReg = IdxReg;
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
index 457a5337d1c4..80629f2f9b1a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
@@ -12,15 +12,15 @@ define i32 @cse_gep([4 x i32]* %ptr, i32 %idx) {
   ; O0:   [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
   ; O0:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
   ; O0:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-  ; O0:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[SEXT]]
-  ; O0:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
-  ; O0:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
+  ; O0:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
+  ; O0:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
+  ; O0:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
   ; O0:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.gep1)
-  ; O0:   [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[C]], [[SEXT]]
-  ; O0:   [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL1]](s64)
+  ; O0:   [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
+  ; O0:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL1]](s64)
   ; O0:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-  ; O0:   [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[GEP1]], [[C1]](s64)
-  ; O0:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.gep2)
+  ; O0:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+  ; O0:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 4 from %ir.gep2)
   ; O0:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD1]], [[LOAD1]]
   ; O0:   $w0 = COPY [[ADD]](s32)
   ; O0:   RET_ReallyLR implicit $w0
@@ -31,13 +31,13 @@ define i32 @cse_gep([4 x i32]* %ptr, i32 %idx) {
   ; O3:   [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
   ; O3:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
   ; O3:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-  ; O3:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[SEXT]]
-  ; O3:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
-  ; O3:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
+  ; O3:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
+  ; O3:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
+  ; O3:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
   ; O3:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.gep1)
   ; O3:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-  ; O3:   [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[GEP]], [[C1]](s64)
-  ; O3:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.gep2)
+  ; O3:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
+  ; O3:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 4 from %ir.gep2)
   ; O3:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD1]], [[LOAD1]]
   ; O3:   $w0 = COPY [[ADD]](s32)
   ; O3:   RET_ReallyLR implicit $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll
index 55bb11b75172..bc973b34d6fd 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll
@@ -182,13 +182,13 @@ define i32 @jt_test(i32 %x) {
   ; CHECK:   G_BRJT [[JUMP_TABLE]](p0), %jump-table.0, [[ZEXT]](s64)
   ; CHECK: bb.2.sw.bb:
   ; CHECK:   successors: %bb.4(0x80000000)
-  ; CHECK:   %11:_(s32) = nsw G_ADD [[COPY]], [[C2]]
+  ; CHECK:   [[ADD:%[0-9]+]]:_(s32) = nsw G_ADD [[COPY]], [[C2]]
   ; CHECK:   G_BR %bb.4
   ; CHECK: bb.3.sw.bb1:
   ; CHECK:   successors: %bb.4(0x80000000)
-  ; CHECK:   %9:_(s32) = nsw G_MUL [[COPY]], [[C1]]
+  ; CHECK:   [[MUL:%[0-9]+]]:_(s32) = nsw G_MUL [[COPY]], [[C1]]
   ; CHECK: bb.4.return:
-  ; CHECK:   [[PHI:%[0-9]+]]:_(s32) = G_PHI %9(s32), %bb.3, %11(s32), %bb.2, [[C3]](s32), %bb.1, [[C3]](s32), %bb.5
+  ; CHECK:   [[PHI:%[0-9]+]]:_(s32) = G_PHI [[MUL]](s32), %bb.3, [[ADD]](s32), %bb.2, [[C3]](s32), %bb.1, [[C3]](s32), %bb.5
   ; CHECK:   $w0 = COPY [[PHI]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
 entry:
@@ -780,11 +780,11 @@ define void @jt_multiple_jump_tables(%1* %arg, i32 %arg1, i32* %arg2) {
   ; CHECK:   successors: %bb.59(0x80000000)
   ; CHECK:   [[PHI:%[0-9]+]]:_(s64) = G_PHI [[C55]](s64), %bb.1, [[C56]](s64), %bb.2, [[C57]](s64), %bb.3, [[C58]](s64), %bb.4, [[C59]](s64), %bb.5, [[C60]](s64), %bb.6, [[C61]](s64), %bb.7, [[C62]](s64), %bb.8, [[C63]](s64), %bb.9, [[C64]](s64), %bb.10, [[C65]](s64), %bb.11, [[C66]](s64), %bb.12, [[C67]](s64), %bb.13, [[C68]](s64), %bb.14, [[C69]](s64), %bb.15, [[C70]](s64), %bb.16, [[C71]](s64), %bb.17, [[C72]](s64), %bb.18, [[C73]](s64), %bb.19, [[C74]](s64), %bb.20, [[C75]](s64), %bb.21, [[C76]](s64), %bb.22, [[C77]](s64), %bb.23, [[C78]](s64), %bb.24, [[C79]](s64), %bb.25, [[C80]](s64), %bb.26, [[C81]](s64), %bb.27, [[C82]](s64), %bb.28, [[C83]](s64), %bb.29, [[C84]](s64), %bb.30, [[C85]](s64), %bb.31, [[C86]](s64), %bb.32, [[C87]](s64), %bb.33, [[C88]](s64), %bb.34, [[C89]](s64), %bb.35, [[C90]](s64), %bb.36, [[C91]](s64), %bb.37, [[C92]](s64), %bb.38, [[C93]](s64), %bb.39, [[C94]](s64), %bb.40, [[C95]](s64), %bb.41, [[C96]](s64), %bb.42, [[C97]](s64), %bb.43, [[C98]](s64), %bb.44, [[C99]](s64), %bb.45, [[C100]](s64), %bb.46, [[C101]](s64), %bb.47, [[C102]](s64), %bb.48, [[C103]](s64), %bb.49, [[C104]](s64), %bb.50, [[C105]](s64), %bb.51, [[C106]](s64), %bb.52, [[C107]](s64), %bb.53, [[C108]](s64), %bb.54, [[C109]](s64), %bb.55
   ; CHECK:   [[C110:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-  ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C110]], [[PHI]]
-  ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[MUL]](s64)
+  ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[PHI]], [[C110]]
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[MUL]](s64)
   ; CHECK:   [[C111:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-  ; CHECK:   [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[GEP]], [[C111]](s64)
-  ; CHECK:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.tmp59)
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C111]](s64)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD1]](p0) :: (load 8 from %ir.tmp59)
   ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
   ; CHECK:   $x0 = COPY [[COPY]](p0)
   ; CHECK:   $x1 = COPY [[LOAD]](p0)
@@ -1319,13 +1319,13 @@ define i32 @range_test(i32 %x) {
   ; CHECK:   G_BR %bb.2
   ; CHECK: bb.2.sw.bb:
   ; CHECK:   successors: %bb.4(0x80000000)
-  ; CHECK:   %12:_(s32) = nsw G_ADD [[COPY]], [[C3]]
+  ; CHECK:   [[ADD:%[0-9]+]]:_(s32) = nsw G_ADD [[COPY]], [[C3]]
   ; CHECK:   G_BR %bb.4
   ; CHECK: bb.3.sw.bb1:
   ; CHECK:   successors: %bb.4(0x80000000)
-  ; CHECK:   %10:_(s32) = nsw G_MUL [[COPY]], [[C2]]
+  ; CHECK:   [[MUL:%[0-9]+]]:_(s32) = nsw G_MUL [[COPY]], [[C2]]
   ; CHECK: bb.4.return:
-  ; CHECK:   [[PHI:%[0-9]+]]:_(s32) = G_PHI %10(s32), %bb.3, %12(s32), %bb.2, [[C4]](s32), %bb.5
+  ; CHECK:   [[PHI:%[0-9]+]]:_(s32) = G_PHI [[MUL]](s32), %bb.3, [[ADD]](s32), %bb.2, [[C4]](s32), %bb.5
   ; CHECK:   $w0 = COPY [[PHI]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll b/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
index aefc2068307f..b1cf6b2a0724 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
@@ -10,8 +10,8 @@ define i8*  @translate_element_size1(i64 %arg) {
   ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
   ; CHECK:   [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[C]](s64)
-  ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[INTTOPTR]], [[COPY]](s64)
-  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[INTTOPTR]], [[COPY]](s64)
+  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
   ; CHECK:   $x0 = COPY [[COPY1]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
   %tmp = getelementptr i8, i8* null, i64 %arg
@@ -25,8 +25,8 @@ define %type* @first_offset_const(%type* %addr) {
   ; CHECK:   liveins: $x0
   ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-  ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-  ; CHECK:   $x0 = COPY [[GEP]](p0)
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+  ; CHECK:   $x0 = COPY [[PTR_ADD]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
   %res = getelementptr %type, %type* %addr, i32 1
   ret %type* %res
@@ -53,9 +53,9 @@ define %type* @first_offset_variable(%type* %addr, i64 %idx) {
   ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
   ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-  ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[COPY1]]
-  ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
-  ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
+  ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[C]]
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
+  ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
   ; CHECK:   $x0 = COPY [[COPY2]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
   %res = getelementptr %type, %type* %addr, i64 %idx
@@ -71,9 +71,9 @@ define %type* @first_offset_ext(%type* %addr, i32 %idx) {
   ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
   ; CHECK:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-  ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[SEXT]]
-  ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
-  ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
+  ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
+  ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
   ; CHECK:   $x0 = COPY [[COPY2]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
   %res = getelementptr %type, %type* %addr, i32 %idx
@@ -89,11 +89,11 @@ define i32* @const_then_var(%type1* %addr, i64 %idx) {
   ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
   ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 272
-  ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
   ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-  ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C1]], [[COPY1]]
-  ; CHECK:   [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[GEP]], [[MUL]](s64)
-  ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP1]](p0)
+  ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[C1]]
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[MUL]](s64)
+  ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD1]](p0)
   ; CHECK:   $x0 = COPY [[COPY2]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
   %res = getelementptr %type1, %type1* %addr, i32 4, i32 1, i64 %idx
@@ -108,11 +108,11 @@ define i32* @var_then_const(%type1* %addr, i64 %idx) {
   ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
   ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
-  ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[COPY1]]
-  ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
+  ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[C]]
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
   ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
-  ; CHECK:   [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[GEP]], [[C1]](s64)
-  ; CHECK:   $x0 = COPY [[GEP1]](p0)
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
+  ; CHECK:   $x0 = COPY [[PTR_ADD1]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
   %res = getelementptr %type1, %type1* %addr, i64 %idx, i32 2, i32 2
   ret i32* %res

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-getelementptr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-getelementptr.ll
index e3246cf58722..7a11f1b363f0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-getelementptr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-getelementptr.ll
@@ -23,7 +23,7 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i64(<2 x i32 addrspace(1
   ; CHECK:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV2]](s64), [[MV3]](s64)
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
-  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[BUILD_VECTOR2]], [[BUILD_VECTOR1]]
+  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
   ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(<2 x p1>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>)
   ; CHECK:   [[COPY9:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
   ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY9]](<2 x p1>)
@@ -51,7 +51,7 @@ define <2 x i32 addrspace(3)*> @vector_gep_v2p3_index_v2i32(<2 x i32 addrspace(3
   ; CHECK:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
   ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; CHECK:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
-  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s32>) = G_MUL [[BUILD_VECTOR2]], [[BUILD_VECTOR1]]
+  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s32>) = G_MUL [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
   ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(<2 x p3>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s32>)
   ; CHECK:   [[COPY5:%[0-9]+]]:_(<2 x p3>) = COPY [[PTR_ADD]](<2 x p3>)
   ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY5]](<2 x p3>)
@@ -82,7 +82,7 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i32(<2 x i32 addrspace(1
   ; CHECK:   [[SEXT:%[0-9]+]]:_(<2 x s64>) = G_SEXT [[BUILD_VECTOR1]](<2 x s32>)
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
-  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[BUILD_VECTOR2]], [[SEXT]]
+  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[SEXT]], [[BUILD_VECTOR2]]
   ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(<2 x p1>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>)
   ; CHECK:   [[COPY7:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
   ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY7]](<2 x p1>)
@@ -116,7 +116,7 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_i64(<2 x i32 addrspace(1)*
   ; CHECK:   [[COPY7:%[0-9]+]]:_(<2 x s64>) = COPY [[BUILD_VECTOR1]](<2 x s64>)
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
-  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[BUILD_VECTOR2]], [[COPY7]]
+  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[COPY7]], [[BUILD_VECTOR2]]
   ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(<2 x p1>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>)
   ; CHECK:   [[COPY8:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
   ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY8]](<2 x p1>)
@@ -148,7 +148,7 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_i32(<2 x i32 addrspace(1)*
   ; CHECK:   [[SEXT:%[0-9]+]]:_(<2 x s64>) = G_SEXT [[BUILD_VECTOR1]](<2 x s32>)
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
-  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[BUILD_VECTOR2]], [[SEXT]]
+  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[SEXT]], [[BUILD_VECTOR2]]
   ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(<2 x p1>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>)
   ; CHECK:   [[COPY6:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
   ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY6]](<2 x p1>)
@@ -187,7 +187,7 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i64_constant(<2 x i32 ad
   ; CHECK:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C1]](s64)
   ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK:   [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C2]](s64), [[C2]](s64)
-  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[BUILD_VECTOR3]], [[BUILD_VECTOR2]]
+  ; CHECK:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[BUILD_VECTOR2]], [[BUILD_VECTOR3]]
   ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(<2 x p1>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>)
   ; CHECK:   [[COPY9:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
   ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY9]](<2 x p1>)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/lds-relocs.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/lds-relocs.ll
index 704cf594a861..8a0e5e64df87 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/lds-relocs.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/lds-relocs.ll
@@ -5,7 +5,7 @@
 @lds.defined = unnamed_addr addrspace(3) global [8 x i32] undef, align 8
 
 ; GCN-LABEL: {{^}}test_basic:
-; GCN: s_add_u32 s0, lds.defined at abs32@lo, s0 ; encoding: [0xff,0x00,0x00,0x80,A,A,A,A]
+; GCN: s_add_u32 s0, lds.defined at abs32@lo, s2 ; encoding: [0xff,0x02,0x00,0x80,A,A,A,A]
 ; GCN: v_add_u32_e32 v0, lds.external at abs32@lo, v0 ; encoding: [0xff,0x00,0x00,0x68,A,A,A,A]
 
 ; GCN: .globl lds.external

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.atomic.dec.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.atomic.dec.ll
index eb82ca55b5d9..45f78ebfc695 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.atomic.dec.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.atomic.dec.ll
@@ -369,12 +369,12 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_addr64(i32 addrspace
 ; CI-LABEL: global_atomic_dec_ret_i32_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; CI-NEXT:    v_mul_lo_u32 v4, 4, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; CI-NEXT:    v_mul_lo_u32 v4, v0, 4
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v3, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v0, s3
@@ -394,12 +394,12 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_addr64(i32 addrspace
 ; VI-LABEL: global_atomic_dec_ret_i32_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; VI-NEXT:    v_mul_lo_u32 v4, 4, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; VI-NEXT:    v_mul_lo_u32 v4, v0, 4
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
@@ -452,12 +452,12 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_addr64(i32 addrspa
 ; CI-LABEL: global_atomic_dec_noret_i32_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; CI-NEXT:    v_mul_lo_u32 v0, 4, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; CI-NEXT:    v_mul_lo_u32 v0, v0, 4
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_mov_b32_e32 v2, 42
 ; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
@@ -472,12 +472,12 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_addr64(i32 addrspa
 ; VI-LABEL: global_atomic_dec_noret_i32_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; VI-NEXT:    v_mul_lo_u32 v0, 4, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; VI-NEXT:    v_mul_lo_u32 v0, v0, 4
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_mov_b32_e32 v2, 42
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
@@ -690,12 +690,12 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_addr64(i32* %out, i32*
 ; CI-LABEL: flat_atomic_dec_ret_i32_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; CI-NEXT:    v_mul_lo_u32 v4, 4, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; CI-NEXT:    v_mul_lo_u32 v4, v0, 4
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v3, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v0, s3
@@ -715,12 +715,12 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_addr64(i32* %out, i32*
 ; VI-LABEL: flat_atomic_dec_ret_i32_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; VI-NEXT:    v_mul_lo_u32 v4, 4, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; VI-NEXT:    v_mul_lo_u32 v4, v0, 4
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
@@ -773,12 +773,12 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_addr64(i32* %ptr) #0
 ; CI-LABEL: flat_atomic_dec_noret_i32_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; CI-NEXT:    v_mul_lo_u32 v0, 4, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; CI-NEXT:    v_mul_lo_u32 v0, v0, 4
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_mov_b32_e32 v2, 42
 ; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
@@ -793,12 +793,12 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_addr64(i32* %ptr) #0
 ; VI-LABEL: flat_atomic_dec_noret_i32_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; VI-NEXT:    v_mul_lo_u32 v0, 4, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; VI-NEXT:    v_mul_lo_u32 v0, v0, 4
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_mov_b32_e32 v2, 42
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1023,12 +1023,12 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset_addr64(i64* %out, i64*
 ; CI-LABEL: flat_atomic_dec_ret_i64_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; CI-NEXT:    v_mul_lo_u32 v4, 8, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; CI-NEXT:    v_mul_lo_u32 v4, v0, 8
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v3, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v0, s3
@@ -1049,12 +1049,12 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset_addr64(i64* %out, i64*
 ; VI-LABEL: flat_atomic_dec_ret_i64_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; VI-NEXT:    v_mul_lo_u32 v4, 8, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; VI-NEXT:    v_mul_lo_u32 v4, v0, 8
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
@@ -1109,12 +1109,12 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(i64* %ptr) #0
 ; CI-LABEL: flat_atomic_dec_noret_i64_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; CI-NEXT:    v_mul_lo_u32 v0, 8, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; CI-NEXT:    v_mul_lo_u32 v0, v0, 8
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v4, s1
@@ -1130,12 +1130,12 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(i64* %ptr) #0
 ; VI-LABEL: flat_atomic_dec_noret_i64_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; VI-NEXT:    v_mul_lo_u32 v0, 8, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; VI-NEXT:    v_mul_lo_u32 v0, v0, 8
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v4, s1
@@ -1179,7 +1179,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(i64* %ptr) #0
 define amdgpu_kernel void @atomic_dec_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
 ; CI-LABEL: atomic_dec_shl_base_lds_0:
 ; CI:       ; %bb.0:
-; CI-NEXT:    v_mul_lo_u32 v5, 4, v0
+; CI-NEXT:    v_mul_lo_u32 v5, v0, 4
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; CI-NEXT:    v_mov_b32_e32 v6, 9
 ; CI-NEXT:    s_mov_b32 m0, -1
@@ -1198,7 +1198,7 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0(i32 addrspace(1)* %out, i32
 ;
 ; VI-LABEL: atomic_dec_shl_base_lds_0:
 ; VI:       ; %bb.0:
-; VI-NEXT:    v_mul_lo_u32 v5, 4, v0
+; VI-NEXT:    v_mul_lo_u32 v5, v0, 4
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; VI-NEXT:    v_mov_b32_e32 v6, 9
 ; VI-NEXT:    s_mov_b32 m0, -1
@@ -1608,12 +1608,12 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_addr64(i64 addrspace
 ; CI-LABEL: global_atomic_dec_ret_i64_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; CI-NEXT:    v_mul_lo_u32 v4, 8, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; CI-NEXT:    v_mul_lo_u32 v4, v0, 8
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v3, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v0, s3
@@ -1634,12 +1634,12 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_addr64(i64 addrspace
 ; VI-LABEL: global_atomic_dec_ret_i64_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; VI-NEXT:    v_mul_lo_u32 v4, 8, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; VI-NEXT:    v_mul_lo_u32 v4, v0, 8
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
@@ -1694,12 +1694,12 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_addr64(i64 addrspa
 ; CI-LABEL: global_atomic_dec_noret_i64_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; CI-NEXT:    v_mul_lo_u32 v0, 8, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; CI-NEXT:    v_mul_lo_u32 v0, v0, 8
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v4, s1
@@ -1715,12 +1715,12 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_addr64(i64 addrspa
 ; VI-LABEL: global_atomic_dec_noret_i64_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; VI-NEXT:    v_mul_lo_u32 v0, 8, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; VI-NEXT:    v_mul_lo_u32 v0, v0, 8
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v4, s1
@@ -1764,7 +1764,7 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_addr64(i64 addrspa
 define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
 ; CI-LABEL: atomic_dec_shl_base_lds_0_i64:
 ; CI:       ; %bb.0:
-; CI-NEXT:    v_mul_lo_u32 v7, 8, v0
+; CI-NEXT:    v_mul_lo_u32 v7, v0, 8
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; CI-NEXT:    v_add_i32_e32 v6, vcc, 2, v0
 ; CI-NEXT:    v_mov_b32_e32 v0, 9
@@ -1784,7 +1784,7 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(i64 addrspace(1)* %out,
 ;
 ; VI-LABEL: atomic_dec_shl_base_lds_0_i64:
 ; VI:       ; %bb.0:
-; VI-NEXT:    v_mul_lo_u32 v7, 8, v0
+; VI-NEXT:    v_mul_lo_u32 v7, v0, 8
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; VI-NEXT:    v_add_u32_e32 v6, vcc, 2, v0
 ; VI-NEXT:    v_mov_b32_e32 v0, 9

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.atomic.inc.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.atomic.inc.ll
index 916f4e7fc664..067a34db7027 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.atomic.inc.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.atomic.inc.ll
@@ -371,12 +371,12 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_addr64(i32 addrspace
 ; CI-LABEL: global_atomic_inc_ret_i32_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; CI-NEXT:    v_mul_lo_u32 v4, 4, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; CI-NEXT:    v_mul_lo_u32 v4, v0, 4
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v3, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v0, s3
@@ -396,12 +396,12 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_addr64(i32 addrspace
 ; VI-LABEL: global_atomic_inc_ret_i32_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; VI-NEXT:    v_mul_lo_u32 v4, 4, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; VI-NEXT:    v_mul_lo_u32 v4, v0, 4
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
@@ -422,24 +422,24 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_addr64(i32 addrspace
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX9-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX9-NEXT:    v_mul_lo_u32 v1, 4, v1
-; GFX9-NEXT:    v_mul_hi_u32 v3, 4, v0
-; GFX9-NEXT:    v_mul_lo_u32 v4, 4, v0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, 4
+; GFX9-NEXT:    v_mul_lo_u32 v2, v0, 0
+; GFX9-NEXT:    v_mul_hi_u32 v3, v0, 4
+; GFX9-NEXT:    v_mul_lo_u32 v4, v0, 4
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v5, 42
-; GFX9-NEXT:    v_add3_u32 v2, v2, v1, v3
+; GFX9-NEXT:    v_mov_b32_e32 v5, s1
+; GFX9-NEXT:    v_add3_u32 v3, v1, v2, v3
 ; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s2, v4
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v0, v2, vcc
+; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v0, v3, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, 20, v1
-; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v3, vcc
-; GFX9-NEXT:    global_atomic_inc v3, v[0:1], v5, off glc
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
-; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v4
-; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v4, 42
+; GFX9-NEXT:    global_atomic_inc v0, v[0:1], v4, off glc
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v5, v3, vcc
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    global_store_dword v[0:1], v3, off
+; GFX9-NEXT:    global_store_dword v[2:3], v0, off
 ; GFX9-NEXT:    s_endpgm
   %id = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id
@@ -454,12 +454,12 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_addr64(i32 addrspa
 ; CI-LABEL: global_atomic_inc_noret_i32_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; CI-NEXT:    v_mul_lo_u32 v0, 4, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; CI-NEXT:    v_mul_lo_u32 v0, v0, 4
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_mov_b32_e32 v2, 42
 ; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
@@ -474,12 +474,12 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_addr64(i32 addrspa
 ; VI-LABEL: global_atomic_inc_noret_i32_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; VI-NEXT:    v_mul_lo_u32 v0, 4, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; VI-NEXT:    v_mul_lo_u32 v0, v0, 4
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_mov_b32_e32 v2, 42
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
@@ -495,19 +495,19 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_addr64(i32 addrspa
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX9-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX9-NEXT:    v_mul_hi_u32 v3, 4, v0
-; GFX9-NEXT:    v_mul_lo_u32 v1, 4, v1
-; GFX9-NEXT:    v_mul_lo_u32 v0, 4, v0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, 4
+; GFX9-NEXT:    v_mul_lo_u32 v2, v0, 0
+; GFX9-NEXT:    v_mul_hi_u32 v3, v0, 4
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, 4
+; GFX9-NEXT:    v_add3_u32 v1, v1, v2, v3
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v4, s1
-; GFX9-NEXT:    v_mov_b32_e32 v5, 42
-; GFX9-NEXT:    v_add3_u32 v1, v2, v1, v3
+; GFX9-NEXT:    v_mov_b32_e32 v2, s1
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
-; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v4, v1, vcc
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v2, v1, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, 20, v0
 ; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT:    global_atomic_inc v0, v[0:1], v5, off glc
+; GFX9-NEXT:    v_mov_b32_e32 v2, 42
+; GFX9-NEXT:    global_atomic_inc v0, v[0:1], v2, off glc
 ; GFX9-NEXT:    s_endpgm
   %id = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id
@@ -521,7 +521,7 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_addr64(i32 addrspa
 define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
 ; CI-LABEL: atomic_inc_shl_base_lds_0_i32:
 ; CI:       ; %bb.0:
-; CI-NEXT:    v_mul_lo_u32 v5, 4, v0
+; CI-NEXT:    v_mul_lo_u32 v5, v0, 4
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; CI-NEXT:    v_mov_b32_e32 v6, 9
 ; CI-NEXT:    s_mov_b32 m0, -1
@@ -540,7 +540,7 @@ define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i32(i32 addrspace(1)* %out,
 ;
 ; VI-LABEL: atomic_inc_shl_base_lds_0_i32:
 ; VI:       ; %bb.0:
-; VI-NEXT:    v_mul_lo_u32 v5, 4, v0
+; VI-NEXT:    v_mul_lo_u32 v5, v0, 4
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; VI-NEXT:    v_mov_b32_e32 v6, 9
 ; VI-NEXT:    s_mov_b32 m0, -1
@@ -559,7 +559,7 @@ define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i32(i32 addrspace(1)* %out,
 ;
 ; GFX9-LABEL: atomic_inc_shl_base_lds_0_i32:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    v_mul_lo_u32 v1, 4, v0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v0, 4
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; GFX9-NEXT:    v_add_u32_e32 v3, 2, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 9
@@ -959,12 +959,12 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_addr64(i64 addrspace
 ; CI-LABEL: global_atomic_inc_ret_i64_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; CI-NEXT:    v_mul_lo_u32 v4, 8, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; CI-NEXT:    v_mul_lo_u32 v4, v0, 8
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v3, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v0, s3
@@ -985,12 +985,12 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_addr64(i64 addrspace
 ; VI-LABEL: global_atomic_inc_ret_i64_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; VI-NEXT:    v_mul_lo_u32 v4, 8, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; VI-NEXT:    v_mul_lo_u32 v4, v0, 8
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
@@ -1010,25 +1010,25 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_addr64(i64 addrspace
 ;
 ; GFX9-LABEL: global_atomic_inc_ret_i64_offset_addr64:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX9-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX9-NEXT:    v_mul_lo_u32 v3, 8, v1
-; GFX9-NEXT:    v_mul_hi_u32 v4, 8, v0
-; GFX9-NEXT:    v_mul_lo_u32 v5, 8, v0
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, 8
+; GFX9-NEXT:    v_mul_lo_u32 v2, v0, 0
+; GFX9-NEXT:    v_mul_hi_u32 v3, v0, 8
+; GFX9-NEXT:    v_mul_lo_u32 v4, v0, 8
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v6, s3
-; GFX9-NEXT:    v_mov_b32_e32 v0, 42
-; GFX9-NEXT:    v_add3_u32 v4, v2, v3, v4
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s2, v5
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v6, v4, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, 40, v2
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
-; GFX9-NEXT:    global_atomic_inc_x2 v[0:1], v[2:3], v[0:1], off glc
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s0, v5
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v4, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:    v_mov_b32_e32 v5, s1
+; GFX9-NEXT:    v_add3_u32 v3, v1, v2, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s2, v4
+; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v0, v3, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, 40, v1
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s0, v4
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v5, v3, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v4, 42
+; GFX9-NEXT:    v_mov_b32_e32 v5, 0
+; GFX9-NEXT:    global_atomic_inc_x2 v[0:1], v[0:1], v[4:5], off glc
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-NEXT:    global_store_dwordx2 v[2:3], v[0:1], off
 ; GFX9-NEXT:    s_endpgm
@@ -1045,12 +1045,12 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_addr64(i64 addrspa
 ; CI-LABEL: global_atomic_inc_noret_i64_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; CI-NEXT:    v_mul_lo_u32 v0, 8, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; CI-NEXT:    v_mul_lo_u32 v0, v0, 8
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v4, s1
@@ -1066,12 +1066,12 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_addr64(i64 addrspa
 ; VI-LABEL: global_atomic_inc_noret_i64_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; VI-NEXT:    v_mul_lo_u32 v0, 8, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; VI-NEXT:    v_mul_lo_u32 v0, v0, 8
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v4, s1
@@ -1086,22 +1086,22 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_addr64(i64 addrspa
 ;
 ; GFX9-LABEL: global_atomic_inc_noret_i64_offset_addr64:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
 ; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX9-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX9-NEXT:    v_mul_lo_u32 v3, 8, v1
-; GFX9-NEXT:    v_mul_hi_u32 v4, 8, v0
-; GFX9-NEXT:    v_mul_lo_u32 v5, 8, v0
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, 8
+; GFX9-NEXT:    v_mul_lo_u32 v2, v0, 0
+; GFX9-NEXT:    v_mul_hi_u32 v3, v0, 8
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, 8
+; GFX9-NEXT:    v_add3_u32 v1, v1, v2, v3
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v6, s1
-; GFX9-NEXT:    v_mov_b32_e32 v0, 42
-; GFX9-NEXT:    v_add3_u32 v2, v2, v3, v4
-; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, s0, v5
-; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v6, v2, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, 40, v3
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v4, vcc
-; GFX9-NEXT:    global_atomic_inc_x2 v[0:1], v[2:3], v[0:1], off glc
+; GFX9-NEXT:    v_mov_b32_e32 v2, s1
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v2, v1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, 40, v0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 42
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v3, 0
+; GFX9-NEXT:    global_atomic_inc_x2 v[0:1], v[0:1], v[2:3], off glc
 ; GFX9-NEXT:    s_endpgm
   %id = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.tid = getelementptr i64, i64 addrspace(1)* %ptr, i32 %id
@@ -1186,12 +1186,12 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_addr64(i32* %out, i32*
 ; CI-LABEL: flat_atomic_inc_ret_i32_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; CI-NEXT:    v_mul_lo_u32 v4, 4, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; CI-NEXT:    v_mul_lo_u32 v4, v0, 4
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v3, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v0, s3
@@ -1211,12 +1211,12 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_addr64(i32* %out, i32*
 ; VI-LABEL: flat_atomic_inc_ret_i32_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; VI-NEXT:    v_mul_lo_u32 v4, 4, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; VI-NEXT:    v_mul_lo_u32 v4, v0, 4
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
@@ -1237,24 +1237,24 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_addr64(i32* %out, i32*
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX9-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX9-NEXT:    v_mul_lo_u32 v1, 4, v1
-; GFX9-NEXT:    v_mul_hi_u32 v3, 4, v0
-; GFX9-NEXT:    v_mul_lo_u32 v4, 4, v0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, 4
+; GFX9-NEXT:    v_mul_lo_u32 v2, v0, 0
+; GFX9-NEXT:    v_mul_hi_u32 v3, v0, 4
+; GFX9-NEXT:    v_mul_lo_u32 v4, v0, 4
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v5, 42
-; GFX9-NEXT:    v_add3_u32 v2, v2, v1, v3
+; GFX9-NEXT:    v_mov_b32_e32 v5, s1
+; GFX9-NEXT:    v_add3_u32 v3, v1, v2, v3
 ; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s2, v4
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v0, v2, vcc
+; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v0, v3, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, 20, v1
-; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v3, vcc
-; GFX9-NEXT:    flat_atomic_inc v3, v[0:1], v5 glc
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
-; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v4
-; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v4, 42
+; GFX9-NEXT:    flat_atomic_inc v0, v[0:1], v4 glc
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v5, v3, vcc
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    flat_store_dword v[0:1], v3
+; GFX9-NEXT:    flat_store_dword v[2:3], v0
 ; GFX9-NEXT:    s_endpgm
   %id = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.tid = getelementptr i32, i32* %ptr, i32 %id
@@ -1269,12 +1269,12 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_addr64(i32* %ptr) #0
 ; CI-LABEL: flat_atomic_inc_noret_i32_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; CI-NEXT:    v_mul_lo_u32 v0, 4, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; CI-NEXT:    v_mul_lo_u32 v0, v0, 4
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_mov_b32_e32 v2, 42
 ; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1289,12 +1289,12 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_addr64(i32* %ptr) #0
 ; VI-LABEL: flat_atomic_inc_noret_i32_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 4, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 4
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 4, v0
-; VI-NEXT:    v_mul_lo_u32 v0, 4, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 4
+; VI-NEXT:    v_mul_lo_u32 v0, v0, 4
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_mov_b32_e32 v2, 42
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1310,19 +1310,19 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_addr64(i32* %ptr) #0
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX9-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX9-NEXT:    v_mul_hi_u32 v3, 4, v0
-; GFX9-NEXT:    v_mul_lo_u32 v1, 4, v1
-; GFX9-NEXT:    v_mul_lo_u32 v0, 4, v0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, 4
+; GFX9-NEXT:    v_mul_lo_u32 v2, v0, 0
+; GFX9-NEXT:    v_mul_hi_u32 v3, v0, 4
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, 4
+; GFX9-NEXT:    v_add3_u32 v1, v1, v2, v3
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v4, s1
-; GFX9-NEXT:    v_mov_b32_e32 v5, 42
-; GFX9-NEXT:    v_add3_u32 v1, v2, v1, v3
+; GFX9-NEXT:    v_mov_b32_e32 v2, s1
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
-; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v4, v1, vcc
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v2, v1, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, 20, v0
 ; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT:    flat_atomic_inc v0, v[0:1], v5 glc
+; GFX9-NEXT:    v_mov_b32_e32 v2, 42
+; GFX9-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
 ; GFX9-NEXT:    s_endpgm
   %id = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.tid = getelementptr i32, i32* %ptr, i32 %id
@@ -1336,7 +1336,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_addr64(i32* %ptr) #0
 define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
 ; CI-LABEL: atomic_inc_shl_base_lds_0_i64:
 ; CI:       ; %bb.0:
-; CI-NEXT:    v_mul_lo_u32 v7, 8, v0
+; CI-NEXT:    v_mul_lo_u32 v7, v0, 8
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; CI-NEXT:    v_add_i32_e32 v6, vcc, 2, v0
 ; CI-NEXT:    v_mov_b32_e32 v0, 9
@@ -1356,7 +1356,7 @@ define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i64(i64 addrspace(1)* %out,
 ;
 ; VI-LABEL: atomic_inc_shl_base_lds_0_i64:
 ; VI:       ; %bb.0:
-; VI-NEXT:    v_mul_lo_u32 v7, 8, v0
+; VI-NEXT:    v_mul_lo_u32 v7, v0, 8
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; VI-NEXT:    v_add_u32_e32 v6, vcc, 2, v0
 ; VI-NEXT:    v_mov_b32_e32 v0, 9
@@ -1376,7 +1376,7 @@ define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i64(i64 addrspace(1)* %out,
 ;
 ; GFX9-LABEL: atomic_inc_shl_base_lds_0_i64:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    v_mul_lo_u32 v3, 8, v0
+; GFX9-NEXT:    v_mul_lo_u32 v3, v0, 8
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 9
 ; GFX9-NEXT:    v_add_u32_e32 v4, 2, v0
@@ -1481,12 +1481,12 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_addr64(i64* %out, i64*
 ; CI-LABEL: flat_atomic_inc_ret_i64_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; CI-NEXT:    v_mul_lo_u32 v4, 8, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; CI-NEXT:    v_mul_lo_u32 v4, v0, 8
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v3, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v0, s3
@@ -1507,12 +1507,12 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_addr64(i64* %out, i64*
 ; VI-LABEL: flat_atomic_inc_ret_i64_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; VI-NEXT:    v_mul_lo_u32 v4, 8, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; VI-NEXT:    v_mul_lo_u32 v4, v0, 8
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
@@ -1532,25 +1532,25 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_addr64(i64* %out, i64*
 ;
 ; GFX9-LABEL: flat_atomic_inc_ret_i64_offset_addr64:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX9-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX9-NEXT:    v_mul_lo_u32 v3, 8, v1
-; GFX9-NEXT:    v_mul_hi_u32 v4, 8, v0
-; GFX9-NEXT:    v_mul_lo_u32 v5, 8, v0
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, 8
+; GFX9-NEXT:    v_mul_lo_u32 v2, v0, 0
+; GFX9-NEXT:    v_mul_hi_u32 v3, v0, 8
+; GFX9-NEXT:    v_mul_lo_u32 v4, v0, 8
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v6, s3
-; GFX9-NEXT:    v_mov_b32_e32 v0, 42
-; GFX9-NEXT:    v_add3_u32 v4, v2, v3, v4
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s2, v5
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v6, v4, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, 40, v2
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
-; GFX9-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s0, v5
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v4, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:    v_mov_b32_e32 v5, s1
+; GFX9-NEXT:    v_add3_u32 v3, v1, v2, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s2, v4
+; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v0, v3, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, 40, v1
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s0, v4
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v5, v3, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v4, 42
+; GFX9-NEXT:    v_mov_b32_e32 v5, 0
+; GFX9-NEXT:    flat_atomic_inc_x2 v[0:1], v[0:1], v[4:5] glc
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX9-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; GFX9-NEXT:    s_endpgm
@@ -1567,12 +1567,12 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_addr64(i64* %ptr) #0
 ; CI-LABEL: flat_atomic_inc_noret_i64_offset_addr64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; CI-NEXT:    v_mul_lo_u32 v0, 8, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; CI-NEXT:    v_mul_lo_u32 v0, v0, 8
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v4, s1
@@ -1588,12 +1588,12 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_addr64(i64* %ptr) #0
 ; VI-LABEL: flat_atomic_inc_noret_i64_offset_addr64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; VI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, 8, v1
+; VI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; VI-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; VI-NEXT:    v_mul_hi_u32 v3, 8, v0
-; VI-NEXT:    v_mul_lo_u32 v0, 8, v0
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; VI-NEXT:    v_mul_hi_u32 v3, v0, 8
+; VI-NEXT:    v_mul_lo_u32 v0, v0, 8
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v4, s1
@@ -1608,22 +1608,22 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_addr64(i64* %ptr) #0
 ;
 ; GFX9-LABEL: flat_atomic_inc_noret_i64_offset_addr64:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
 ; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX9-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX9-NEXT:    v_mul_lo_u32 v3, 8, v1
-; GFX9-NEXT:    v_mul_hi_u32 v4, 8, v0
-; GFX9-NEXT:    v_mul_lo_u32 v5, 8, v0
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, 8
+; GFX9-NEXT:    v_mul_lo_u32 v2, v0, 0
+; GFX9-NEXT:    v_mul_hi_u32 v3, v0, 8
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, 8
+; GFX9-NEXT:    v_add3_u32 v1, v1, v2, v3
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v6, s1
-; GFX9-NEXT:    v_mov_b32_e32 v0, 42
-; GFX9-NEXT:    v_add3_u32 v2, v2, v3, v4
-; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, s0, v5
-; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v6, v2, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, 40, v3
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v4, vcc
-; GFX9-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX9-NEXT:    v_mov_b32_e32 v2, s1
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v2, v1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, 40, v0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 42
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v3, 0
+; GFX9-NEXT:    flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
 ; GFX9-NEXT:    s_endpgm
   %id = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.tid = getelementptr i64, i64* %ptr, i32 %id

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll
index 5609867bb782..7ba85c03f539 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll
@@ -8,12 +8,12 @@ define amdgpu_kernel void @is_private_vgpr(i8* addrspace(1)* %ptr.ptr) {
 ; CI-LABEL: is_private_vgpr:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 8, v1
-; CI-NEXT:    v_mul_lo_u32 v3, 8, v0
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
+; CI-NEXT:    v_mul_lo_u32 v3, v0, 8
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
-; CI-NEXT:    v_mul_hi_u32 v0, 8, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v0, v0, 8
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v0
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_add_i32_e32 v0, vcc, s0, v3
@@ -31,11 +31,11 @@ define amdgpu_kernel void @is_private_vgpr(i8* addrspace(1)* %ptr.ptr) {
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
-; GFX9-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX9-NEXT:    v_mul_hi_u32 v3, 8, v0
-; GFX9-NEXT:    v_mul_lo_u32 v1, 8, v1
-; GFX9-NEXT:    v_mul_lo_u32 v0, 8, v0
-; GFX9-NEXT:    v_add3_u32 v1, v2, v1, v3
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, 8
+; GFX9-NEXT:    v_mul_lo_u32 v2, v0, 0
+; GFX9-NEXT:    v_mul_hi_u32 v3, v0, 8
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, 8
+; GFX9-NEXT:    v_add3_u32 v1, v1, v2, v3
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s1
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll
index 9a24715d5aee..006c0b417b7b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll
@@ -8,12 +8,12 @@ define amdgpu_kernel void @is_local_vgpr(i8* addrspace(1)* %ptr.ptr) {
 ; CI-LABEL: is_local_vgpr:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CI-NEXT:    v_mul_lo_u32 v2, 0, v0
-; CI-NEXT:    v_mul_lo_u32 v1, 8, v1
-; CI-NEXT:    v_mul_lo_u32 v3, 8, v0
+; CI-NEXT:    v_mul_lo_u32 v1, v1, 8
+; CI-NEXT:    v_mul_lo_u32 v2, v0, 0
+; CI-NEXT:    v_mul_lo_u32 v3, v0, 8
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
-; CI-NEXT:    v_mul_hi_u32 v0, 8, v0
-; CI-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT:    v_mul_hi_u32 v0, v0, 8
+; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; CI-NEXT:    v_add_i32_e32 v1, vcc, v1, v0
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_add_i32_e32 v0, vcc, s0, v3
@@ -31,11 +31,11 @@ define amdgpu_kernel void @is_local_vgpr(i8* addrspace(1)* %ptr.ptr) {
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
-; GFX9-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX9-NEXT:    v_mul_hi_u32 v3, 8, v0
-; GFX9-NEXT:    v_mul_lo_u32 v1, 8, v1
-; GFX9-NEXT:    v_mul_lo_u32 v0, 8, v0
-; GFX9-NEXT:    v_add3_u32 v1, v2, v1, v3
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, 8
+; GFX9-NEXT:    v_mul_lo_u32 v2, v0, 0
+; GFX9-NEXT:    v_mul_hi_u32 v3, v0, 8
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, 8
+; GFX9-NEXT:    v_add3_u32 v1, v1, v2, v3
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s1
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.update.dpp.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.update.dpp.ll
index b9efacd72e4e..bcc8b5c32016 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.update.dpp.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.update.dpp.ll
@@ -40,12 +40,12 @@ define amdgpu_kernel void @update_dpp64_test(i64 addrspace(1)* %arg, i64 %in1, i
 ; GFX8-LABEL: update_dpp64_test:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX8-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX8-NEXT:    v_mul_lo_u32 v1, 8, v1
+; GFX8-NEXT:    v_mul_lo_u32 v1, v1, 8
+; GFX8-NEXT:    v_mul_lo_u32 v2, v0, 0
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX8-NEXT:    v_mul_hi_u32 v3, 8, v0
-; GFX8-NEXT:    v_mul_lo_u32 v0, 8, v0
-; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; GFX8-NEXT:    v_mul_hi_u32 v3, v0, 8
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, 8
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v4, s1
@@ -65,14 +65,14 @@ define amdgpu_kernel void @update_dpp64_test(i64 addrspace(1)* %arg, i64 %in1, i
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GFX10-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX10-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX10-NEXT:    v_mul_hi_u32 v3, 8, v0
-; GFX10-NEXT:    v_mul_lo_u32 v0, 8, v0
-; GFX10-NEXT:    v_mul_lo_u32 v1, 8, v1
+; GFX10-NEXT:    v_mul_lo_u32 v2, v0, 0
+; GFX10-NEXT:    v_mul_hi_u32 v3, v0, 8
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, 8
+; GFX10-NEXT:    v_mul_lo_u32 v1, v1, 8
 ; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    v_add_co_u32_e64 v6, vcc_lo, s0, v0
-; GFX10-NEXT:    v_add3_u32 v1, v2, v1, v3
+; GFX10-NEXT:    v_add3_u32 v1, v1, v2, v3
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX10-NEXT:    v_mov_b32_e32 v4, s2
 ; GFX10-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, s1, v1, vcc_lo

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mubuf-global.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mubuf-global.ll
index 7a7fd41e43dd..6b88922c9a2f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mubuf-global.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mubuf-global.ll
@@ -253,14 +253,14 @@ define amdgpu_ps void @mubuf_store_sgpr_ptr_sgpr_offset(i32 addrspace(1)* inreg
 ; GFX6-LABEL: mubuf_store_sgpr_ptr_sgpr_offset:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x200000
-; GFX6-NEXT:    v_mul_hi_u32 v0, 4, s4
+; GFX6-NEXT:    v_mul_hi_u32 v0, s4, 4
 ; GFX6-NEXT:    s_mov_b32 s1, s3
 ; GFX6-NEXT:    s_mul_i32 s3, s4, 4
-; GFX6-NEXT:    s_mul_i32 s6, s4, 0
-; GFX6-NEXT:    s_mul_i32 s4, s5, 4
-; GFX6-NEXT:    s_add_i32 s6, s6, s4
+; GFX6-NEXT:    s_mul_i32 s5, s5, 4
+; GFX6-NEXT:    s_mul_i32 s4, s4, 0
+; GFX6-NEXT:    s_add_i32 s5, s5, s4
 ; GFX6-NEXT:    s_mov_b32 s0, s2
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, s6, v0
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, s5, v0
 ; GFX6-NEXT:    v_mov_b32_e32 v0, s3
 ; GFX6-NEXT:    s_mov_b32 s2, 0
 ; GFX6-NEXT:    v_mov_b32_e32 v2, 0
@@ -271,14 +271,14 @@ define amdgpu_ps void @mubuf_store_sgpr_ptr_sgpr_offset(i32 addrspace(1)* inreg
 ; GFX7-LABEL: mubuf_store_sgpr_ptr_sgpr_offset:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x200000
-; GFX7-NEXT:    v_mul_hi_u32 v0, 4, s4
+; GFX7-NEXT:    v_mul_hi_u32 v0, s4, 4
 ; GFX7-NEXT:    s_mov_b32 s1, s3
 ; GFX7-NEXT:    s_mul_i32 s3, s4, 4
-; GFX7-NEXT:    s_mul_i32 s6, s4, 0
-; GFX7-NEXT:    s_mul_i32 s4, s5, 4
-; GFX7-NEXT:    s_add_i32 s6, s6, s4
+; GFX7-NEXT:    s_mul_i32 s5, s5, 4
+; GFX7-NEXT:    s_mul_i32 s4, s4, 0
+; GFX7-NEXT:    s_add_i32 s5, s5, s4
 ; GFX7-NEXT:    s_mov_b32 s0, s2
-; GFX7-NEXT:    v_add_i32_e32 v1, vcc, s6, v0
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, s5, v0
 ; GFX7-NEXT:    v_mov_b32_e32 v0, s3
 ; GFX7-NEXT:    s_mov_b32 s2, 0
 ; GFX7-NEXT:    v_mov_b32_e32 v2, 0
@@ -294,12 +294,12 @@ define amdgpu_ps void @mubuf_store_vgpr_ptr_sgpr_offset(i32 addrspace(1)* %ptr,
 ; GFX6-LABEL: mubuf_store_vgpr_ptr_sgpr_offset:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_bfe_i64 s[0:1], s[2:3], 0x200000
-; GFX6-NEXT:    v_mul_hi_u32 v2, 4, s0
+; GFX6-NEXT:    v_mul_hi_u32 v2, s0, 4
 ; GFX6-NEXT:    s_mul_i32 s2, s0, 4
-; GFX6-NEXT:    s_mul_i32 s3, s0, 0
-; GFX6-NEXT:    s_mul_i32 s0, s1, 4
-; GFX6-NEXT:    s_add_i32 s3, s3, s0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s3, v2
+; GFX6-NEXT:    s_mul_i32 s1, s1, 4
+; GFX6-NEXT:    s_mul_i32 s0, s0, 0
+; GFX6-NEXT:    s_add_i32 s1, s1, s0
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s1, v2
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
 ; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX6-NEXT:    s_mov_b32 s6, 0
@@ -312,12 +312,12 @@ define amdgpu_ps void @mubuf_store_vgpr_ptr_sgpr_offset(i32 addrspace(1)* %ptr,
 ; GFX7-LABEL: mubuf_store_vgpr_ptr_sgpr_offset:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_bfe_i64 s[0:1], s[2:3], 0x200000
-; GFX7-NEXT:    v_mul_hi_u32 v2, 4, s0
+; GFX7-NEXT:    v_mul_hi_u32 v2, s0, 4
 ; GFX7-NEXT:    s_mul_i32 s2, s0, 4
-; GFX7-NEXT:    s_mul_i32 s3, s0, 0
-; GFX7-NEXT:    s_mul_i32 s0, s1, 4
-; GFX7-NEXT:    s_add_i32 s3, s3, s0
-; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s3, v2
+; GFX7-NEXT:    s_mul_i32 s1, s1, 4
+; GFX7-NEXT:    s_mul_i32 s0, s0, 0
+; GFX7-NEXT:    s_add_i32 s1, s1, s0
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s1, v2
 ; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
 ; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX7-NEXT:    s_mov_b32 s6, 0
@@ -335,12 +335,12 @@ define amdgpu_ps void @mubuf_store_vgpr_ptr_sgpr_offset_offset256(i32 addrspace(
 ; GFX6-LABEL: mubuf_store_vgpr_ptr_sgpr_offset_offset256:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_bfe_i64 s[0:1], s[2:3], 0x200000
-; GFX6-NEXT:    v_mul_hi_u32 v2, 4, s0
+; GFX6-NEXT:    v_mul_hi_u32 v2, s0, 4
 ; GFX6-NEXT:    s_mul_i32 s2, s0, 4
-; GFX6-NEXT:    s_mul_i32 s3, s0, 0
-; GFX6-NEXT:    s_mul_i32 s0, s1, 4
-; GFX6-NEXT:    s_add_i32 s3, s3, s0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s3, v2
+; GFX6-NEXT:    s_mul_i32 s1, s1, 4
+; GFX6-NEXT:    s_mul_i32 s0, s0, 0
+; GFX6-NEXT:    s_add_i32 s1, s1, s0
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s1, v2
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
 ; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX6-NEXT:    s_mov_b32 s6, 0
@@ -353,12 +353,12 @@ define amdgpu_ps void @mubuf_store_vgpr_ptr_sgpr_offset_offset256(i32 addrspace(
 ; GFX7-LABEL: mubuf_store_vgpr_ptr_sgpr_offset_offset256:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_bfe_i64 s[0:1], s[2:3], 0x200000
-; GFX7-NEXT:    v_mul_hi_u32 v2, 4, s0
+; GFX7-NEXT:    v_mul_hi_u32 v2, s0, 4
 ; GFX7-NEXT:    s_mul_i32 s2, s0, 4
-; GFX7-NEXT:    s_mul_i32 s3, s0, 0
-; GFX7-NEXT:    s_mul_i32 s0, s1, 4
-; GFX7-NEXT:    s_add_i32 s3, s3, s0
-; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s3, v2
+; GFX7-NEXT:    s_mul_i32 s1, s1, 4
+; GFX7-NEXT:    s_mul_i32 s0, s0, 0
+; GFX7-NEXT:    s_add_i32 s1, s1, s0
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s1, v2
 ; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
 ; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX7-NEXT:    s_mov_b32 s6, 0
@@ -379,16 +379,16 @@ define amdgpu_ps void @mubuf_store_vgpr_ptr_sgpr_offset256_offset(i32 addrspace(
 ; GFX6-NEXT:    s_mov_b32 s1, 0
 ; GFX6-NEXT:    s_movk_i32 s0, 0x400
 ; GFX6-NEXT:    v_mov_b32_e32 v3, s1
-; GFX6-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX6-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x200000
+; GFX6-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v2, 4, s2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s2, 4
 ; GFX6-NEXT:    s_mul_i32 s0, s2, 4
-; GFX6-NEXT:    s_mul_i32 s4, s2, 0
-; GFX6-NEXT:    s_mul_i32 s2, s3, 4
-; GFX6-NEXT:    s_add_i32 s4, s4, s2
+; GFX6-NEXT:    s_mul_i32 s3, s3, 4
+; GFX6-NEXT:    s_mul_i32 s2, s2, 0
+; GFX6-NEXT:    s_add_i32 s3, s3, s2
 ; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s3, v2
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
 ; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX6-NEXT:    s_mov_b32 s2, s1
@@ -403,16 +403,16 @@ define amdgpu_ps void @mubuf_store_vgpr_ptr_sgpr_offset256_offset(i32 addrspace(
 ; GFX7-NEXT:    s_mov_b32 s1, 0
 ; GFX7-NEXT:    s_movk_i32 s0, 0x400
 ; GFX7-NEXT:    v_mov_b32_e32 v3, s1
-; GFX7-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX7-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x200000
+; GFX7-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX7-NEXT:    v_mul_hi_u32 v2, 4, s2
+; GFX7-NEXT:    v_mul_hi_u32 v2, s2, 4
 ; GFX7-NEXT:    s_mul_i32 s0, s2, 4
-; GFX7-NEXT:    s_mul_i32 s4, s2, 0
-; GFX7-NEXT:    s_mul_i32 s2, s3, 4
-; GFX7-NEXT:    s_add_i32 s4, s4, s2
+; GFX7-NEXT:    s_mul_i32 s3, s3, 4
+; GFX7-NEXT:    s_mul_i32 s2, s2, 0
+; GFX7-NEXT:    s_add_i32 s3, s3, s2
 ; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s3, v2
 ; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
 ; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX7-NEXT:    s_mov_b32 s2, s1
@@ -431,10 +431,10 @@ define amdgpu_ps void @mubuf_store_sgpr_ptr_vgpr_offset(i32 addrspace(1)* inreg
 ; GFX6-LABEL: mubuf_store_sgpr_ptr_vgpr_offset:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX6-NEXT:    v_mul_lo_u32 v3, 4, v1
-; GFX6-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v1, 4, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, 4, v0
+; GFX6-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX6-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX6-NEXT:    s_mov_b32 s0, s2
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX6-NEXT:    s_mov_b32 s1, s3
@@ -448,10 +448,10 @@ define amdgpu_ps void @mubuf_store_sgpr_ptr_vgpr_offset(i32 addrspace(1)* inreg
 ; GFX7-LABEL: mubuf_store_sgpr_ptr_vgpr_offset:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX7-NEXT:    v_mul_lo_u32 v3, 4, v1
-; GFX7-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX7-NEXT:    v_mul_lo_u32 v1, 4, v0
-; GFX7-NEXT:    v_mul_hi_u32 v0, 4, v0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX7-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX7-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX7-NEXT:    s_mov_b32 s0, s2
 ; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX7-NEXT:    s_mov_b32 s1, s3
@@ -470,10 +470,10 @@ define amdgpu_ps void @mubuf_store_sgpr_ptr_vgpr_offset_offset4095(i32 addrspace
 ; GFX6-LABEL: mubuf_store_sgpr_ptr_vgpr_offset_offset4095:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX6-NEXT:    v_mul_lo_u32 v3, 4, v1
-; GFX6-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v1, 4, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, 4, v0
+; GFX6-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX6-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX6-NEXT:    s_mov_b32 s0, s2
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX6-NEXT:    s_mov_b32 s1, s3
@@ -488,10 +488,10 @@ define amdgpu_ps void @mubuf_store_sgpr_ptr_vgpr_offset_offset4095(i32 addrspace
 ; GFX7-LABEL: mubuf_store_sgpr_ptr_vgpr_offset_offset4095:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX7-NEXT:    v_mul_lo_u32 v3, 4, v1
-; GFX7-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX7-NEXT:    v_mul_lo_u32 v1, 4, v0
-; GFX7-NEXT:    v_mul_hi_u32 v0, 4, v0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX7-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX7-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX7-NEXT:    s_mov_b32 s0, s2
 ; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX7-NEXT:    s_mov_b32 s1, s3
@@ -511,10 +511,10 @@ define amdgpu_ps void @mubuf_store_sgpr_ptr_offset4095_vgpr_offset(i32 addrspace
 ; GFX6-LABEL: mubuf_store_sgpr_ptr_offset4095_vgpr_offset:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX6-NEXT:    v_mul_lo_u32 v3, 4, v1
-; GFX6-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v1, 4, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, 4, v0
+; GFX6-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX6-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX6-NEXT:    s_add_u32 s4, s2, 0x3ffc
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX6-NEXT:    s_mov_b32 s6, 0
@@ -528,10 +528,10 @@ define amdgpu_ps void @mubuf_store_sgpr_ptr_offset4095_vgpr_offset(i32 addrspace
 ; GFX7-LABEL: mubuf_store_sgpr_ptr_offset4095_vgpr_offset:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX7-NEXT:    v_mul_lo_u32 v3, 4, v1
-; GFX7-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX7-NEXT:    v_mul_lo_u32 v1, 4, v0
-; GFX7-NEXT:    v_mul_hi_u32 v0, 4, v0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX7-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX7-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX7-NEXT:    s_add_u32 s4, s2, 0x3ffc
 ; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX7-NEXT:    s_mov_b32 s6, 0
@@ -797,12 +797,12 @@ define amdgpu_ps float @mubuf_load_sgpr_ptr_sgpr_offset(float addrspace(1)* inre
 ; GFX6-NEXT:    s_mov_b32 s0, s2
 ; GFX6-NEXT:    s_mov_b32 s1, s3
 ; GFX6-NEXT:    s_bfe_i64 s[2:3], s[4:5], 0x200000
-; GFX6-NEXT:    v_mul_hi_u32 v0, 4, s2
+; GFX6-NEXT:    v_mul_hi_u32 v0, s2, 4
 ; GFX6-NEXT:    s_mul_i32 s4, s2, 0
 ; GFX6-NEXT:    s_mul_i32 s3, s3, 4
-; GFX6-NEXT:    s_add_i32 s4, s4, s3
+; GFX6-NEXT:    s_add_i32 s3, s3, s4
 ; GFX6-NEXT:    s_mul_i32 s2, s2, 4
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, s4, v0
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, s3, v0
 ; GFX6-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX6-NEXT:    s_mov_b32 s2, 0
 ; GFX6-NEXT:    s_mov_b32 s3, 0xf000
@@ -815,12 +815,12 @@ define amdgpu_ps float @mubuf_load_sgpr_ptr_sgpr_offset(float addrspace(1)* inre
 ; GFX7-NEXT:    s_mov_b32 s0, s2
 ; GFX7-NEXT:    s_mov_b32 s1, s3
 ; GFX7-NEXT:    s_bfe_i64 s[2:3], s[4:5], 0x200000
-; GFX7-NEXT:    v_mul_hi_u32 v0, 4, s2
+; GFX7-NEXT:    v_mul_hi_u32 v0, s2, 4
 ; GFX7-NEXT:    s_mul_i32 s4, s2, 0
 ; GFX7-NEXT:    s_mul_i32 s3, s3, 4
-; GFX7-NEXT:    s_add_i32 s4, s4, s3
+; GFX7-NEXT:    s_add_i32 s3, s3, s4
 ; GFX7-NEXT:    s_mul_i32 s2, s2, 4
-; GFX7-NEXT:    v_add_i32_e32 v1, vcc, s4, v0
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, s3, v0
 ; GFX7-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX7-NEXT:    s_mov_b32 s2, 0
 ; GFX7-NEXT:    s_mov_b32 s3, 0xf000
@@ -836,12 +836,12 @@ define amdgpu_ps float @mubuf_load_vgpr_ptr_sgpr_offset(float addrspace(1)* %ptr
 ; GFX6-LABEL: mubuf_load_vgpr_ptr_sgpr_offset:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_bfe_i64 s[0:1], s[2:3], 0x200000
-; GFX6-NEXT:    v_mul_hi_u32 v2, 4, s0
+; GFX6-NEXT:    v_mul_hi_u32 v2, s0, 4
 ; GFX6-NEXT:    s_mul_i32 s2, s0, 0
 ; GFX6-NEXT:    s_mul_i32 s1, s1, 4
-; GFX6-NEXT:    s_add_i32 s2, s2, s1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s2, v2
+; GFX6-NEXT:    s_add_i32 s1, s1, s2
 ; GFX6-NEXT:    s_mul_i32 s0, s0, 4
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s1, v2
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
 ; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX6-NEXT:    s_mov_b32 s2, 0
@@ -854,12 +854,12 @@ define amdgpu_ps float @mubuf_load_vgpr_ptr_sgpr_offset(float addrspace(1)* %ptr
 ; GFX7-LABEL: mubuf_load_vgpr_ptr_sgpr_offset:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_bfe_i64 s[0:1], s[2:3], 0x200000
-; GFX7-NEXT:    v_mul_hi_u32 v2, 4, s0
+; GFX7-NEXT:    v_mul_hi_u32 v2, s0, 4
 ; GFX7-NEXT:    s_mul_i32 s2, s0, 0
 ; GFX7-NEXT:    s_mul_i32 s1, s1, 4
-; GFX7-NEXT:    s_add_i32 s2, s2, s1
-; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s2, v2
+; GFX7-NEXT:    s_add_i32 s1, s1, s2
 ; GFX7-NEXT:    s_mul_i32 s0, s0, 4
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s1, v2
 ; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
 ; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX7-NEXT:    s_mov_b32 s2, 0
@@ -877,12 +877,12 @@ define amdgpu_ps float @mubuf_load_vgpr_ptr_sgpr_offset_offset256(float addrspac
 ; GFX6-LABEL: mubuf_load_vgpr_ptr_sgpr_offset_offset256:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_bfe_i64 s[0:1], s[2:3], 0x200000
-; GFX6-NEXT:    v_mul_hi_u32 v2, 4, s0
+; GFX6-NEXT:    v_mul_hi_u32 v2, s0, 4
 ; GFX6-NEXT:    s_mul_i32 s2, s0, 0
 ; GFX6-NEXT:    s_mul_i32 s1, s1, 4
-; GFX6-NEXT:    s_add_i32 s2, s2, s1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s2, v2
+; GFX6-NEXT:    s_add_i32 s1, s1, s2
 ; GFX6-NEXT:    s_mul_i32 s0, s0, 4
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s1, v2
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
 ; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX6-NEXT:    s_mov_b32 s2, 0
@@ -895,12 +895,12 @@ define amdgpu_ps float @mubuf_load_vgpr_ptr_sgpr_offset_offset256(float addrspac
 ; GFX7-LABEL: mubuf_load_vgpr_ptr_sgpr_offset_offset256:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_bfe_i64 s[0:1], s[2:3], 0x200000
-; GFX7-NEXT:    v_mul_hi_u32 v2, 4, s0
+; GFX7-NEXT:    v_mul_hi_u32 v2, s0, 4
 ; GFX7-NEXT:    s_mul_i32 s2, s0, 0
 ; GFX7-NEXT:    s_mul_i32 s1, s1, 4
-; GFX7-NEXT:    s_add_i32 s2, s2, s1
-; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s2, v2
+; GFX7-NEXT:    s_add_i32 s1, s1, s2
 ; GFX7-NEXT:    s_mul_i32 s0, s0, 4
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s1, v2
 ; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
 ; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX7-NEXT:    s_mov_b32 s2, 0
@@ -919,18 +919,18 @@ define amdgpu_ps float @mubuf_load_vgpr_ptr_sgpr_offset256_offset(float addrspac
 ; GFX6-LABEL: mubuf_load_vgpr_ptr_sgpr_offset256_offset:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_bfe_i64 s[0:1], s[2:3], 0x200000
-; GFX6-NEXT:    v_mul_hi_u32 v4, 4, s0
+; GFX6-NEXT:    v_mul_hi_u32 v4, s0, 4
 ; GFX6-NEXT:    s_movk_i32 s4, 0x400
 ; GFX6-NEXT:    s_mov_b32 s5, 0
 ; GFX6-NEXT:    v_mov_b32_e32 v2, s4
 ; GFX6-NEXT:    s_mul_i32 s2, s0, 0
 ; GFX6-NEXT:    s_mul_i32 s1, s1, 4
+; GFX6-NEXT:    s_add_i32 s1, s1, s2
 ; GFX6-NEXT:    v_mov_b32_e32 v3, s5
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    s_add_i32 s2, s2, s1
 ; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s2, v4
 ; GFX6-NEXT:    s_mul_i32 s0, s0, 4
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, s1, v4
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
 ; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX6-NEXT:    s_mov_b32 s3, 0xf000
@@ -943,18 +943,18 @@ define amdgpu_ps float @mubuf_load_vgpr_ptr_sgpr_offset256_offset(float addrspac
 ; GFX7-LABEL: mubuf_load_vgpr_ptr_sgpr_offset256_offset:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_bfe_i64 s[0:1], s[2:3], 0x200000
-; GFX7-NEXT:    v_mul_hi_u32 v4, 4, s0
+; GFX7-NEXT:    v_mul_hi_u32 v4, s0, 4
 ; GFX7-NEXT:    s_movk_i32 s4, 0x400
 ; GFX7-NEXT:    s_mov_b32 s5, 0
 ; GFX7-NEXT:    v_mov_b32_e32 v2, s4
 ; GFX7-NEXT:    s_mul_i32 s2, s0, 0
 ; GFX7-NEXT:    s_mul_i32 s1, s1, 4
+; GFX7-NEXT:    s_add_i32 s1, s1, s2
 ; GFX7-NEXT:    v_mov_b32_e32 v3, s5
 ; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX7-NEXT:    s_add_i32 s2, s2, s1
 ; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s2, v4
 ; GFX7-NEXT:    s_mul_i32 s0, s0, 4
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, s1, v4
 ; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
 ; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
 ; GFX7-NEXT:    s_mov_b32 s3, 0xf000
@@ -973,10 +973,10 @@ define amdgpu_ps float @mubuf_load_sgpr_ptr_vgpr_offset(float addrspace(1)* inre
 ; GFX6-LABEL: mubuf_load_sgpr_ptr_vgpr_offset:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX6-NEXT:    v_mul_lo_u32 v3, 4, v1
-; GFX6-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v1, 4, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, 4, v0
+; GFX6-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX6-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX6-NEXT:    s_mov_b32 s0, s2
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX6-NEXT:    s_mov_b32 s1, s3
@@ -990,10 +990,10 @@ define amdgpu_ps float @mubuf_load_sgpr_ptr_vgpr_offset(float addrspace(1)* inre
 ; GFX7-LABEL: mubuf_load_sgpr_ptr_vgpr_offset:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX7-NEXT:    v_mul_lo_u32 v3, 4, v1
-; GFX7-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX7-NEXT:    v_mul_lo_u32 v1, 4, v0
-; GFX7-NEXT:    v_mul_hi_u32 v0, 4, v0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX7-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX7-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX7-NEXT:    s_mov_b32 s0, s2
 ; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX7-NEXT:    s_mov_b32 s1, s3
@@ -1012,36 +1012,36 @@ define amdgpu_ps float @mubuf_load_sgpr_ptr_vgpr_offset_offset4095(float addrspa
 ; GFX6-LABEL: mubuf_load_sgpr_ptr_vgpr_offset_offset4095:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX6-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v1, 4, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, 4, v0
-; GFX6-NEXT:    v_mul_lo_u32 v0, 4, v0
+; GFX6-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX6-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX6-NEXT:    s_mov_b32 s0, s2
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX6-NEXT:    s_mov_b32 s1, s3
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v0
 ; GFX6-NEXT:    s_mov_b32 s2, 0
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    s_movk_i32 s4, 0x3ffc
-; GFX6-NEXT:    buffer_load_dword v0, v[0:1], s[0:3], s4 addr64
+; GFX6-NEXT:    buffer_load_dword v0, v[1:2], s[0:3], s4 addr64
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
 ; GFX6-NEXT:    ; return to shader part epilog
 ;
 ; GFX7-LABEL: mubuf_load_sgpr_ptr_vgpr_offset_offset4095:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX7-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX7-NEXT:    v_mul_lo_u32 v1, 4, v1
-; GFX7-NEXT:    v_mul_hi_u32 v3, 4, v0
-; GFX7-NEXT:    v_mul_lo_u32 v0, 4, v0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX7-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX7-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX7-NEXT:    s_mov_b32 s0, s2
-; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX7-NEXT:    s_mov_b32 s1, s3
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v2, v0
 ; GFX7-NEXT:    s_mov_b32 s2, 0
-; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GFX7-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX7-NEXT:    s_movk_i32 s4, 0x3ffc
-; GFX7-NEXT:    buffer_load_dword v0, v[0:1], s[0:3], s4 addr64
+; GFX7-NEXT:    buffer_load_dword v0, v[1:2], s[0:3], s4 addr64
 ; GFX7-NEXT:    s_waitcnt vmcnt(0)
 ; GFX7-NEXT:    ; return to shader part epilog
   %gep0 = getelementptr float, float addrspace(1)* %ptr, i32 %voffset
@@ -1053,34 +1053,34 @@ define amdgpu_ps float @mubuf_load_sgpr_ptr_offset4095_vgpr_offset(float addrspa
 ; GFX6-LABEL: mubuf_load_sgpr_ptr_offset4095_vgpr_offset:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX6-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v1, 4, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, 4, v0
-; GFX6-NEXT:    v_mul_lo_u32 v0, 4, v0
+; GFX6-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX6-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX6-NEXT:    s_add_u32 s0, s2, 0x3ffc
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX6-NEXT:    s_addc_u32 s1, s3, 0
 ; GFX6-NEXT:    s_mov_b32 s2, 0
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v0
 ; GFX6-NEXT:    s_mov_b32 s3, 0xf000
-; GFX6-NEXT:    buffer_load_dword v0, v[0:1], s[0:3], 0 addr64
+; GFX6-NEXT:    buffer_load_dword v0, v[1:2], s[0:3], 0 addr64
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
 ; GFX6-NEXT:    ; return to shader part epilog
 ;
 ; GFX7-LABEL: mubuf_load_sgpr_ptr_offset4095_vgpr_offset:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX7-NEXT:    v_mul_lo_u32 v2, 0, v0
-; GFX7-NEXT:    v_mul_lo_u32 v1, 4, v1
-; GFX7-NEXT:    v_mul_hi_u32 v3, 4, v0
-; GFX7-NEXT:    v_mul_lo_u32 v0, 4, v0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v1, 4
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, 0
+; GFX7-NEXT:    v_mul_lo_u32 v1, v0, 4
+; GFX7-NEXT:    v_mul_hi_u32 v0, v0, 4
 ; GFX7-NEXT:    s_add_u32 s0, s2, 0x3ffc
-; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GFX7-NEXT:    s_addc_u32 s1, s3, 0
 ; GFX7-NEXT:    s_mov_b32 s2, 0
-; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v2, v0
 ; GFX7-NEXT:    s_mov_b32 s3, 0xf000
-; GFX7-NEXT:    buffer_load_dword v0, v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT:    buffer_load_dword v0, v[1:2], s[0:3], 0 addr64
 ; GFX7-NEXT:    s_waitcnt vmcnt(0)
 ; GFX7-NEXT:    ; return to shader part epilog
   %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 4095

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/add-ext.ll b/llvm/test/CodeGen/X86/GlobalISel/add-ext.ll
index d0db8fdc0909..fc21e6e641b0 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/add-ext.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/add-ext.ll
@@ -92,9 +92,8 @@ define i16* @gep16(i32 %i, i16* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addl $-5, %edi
 ; CHECK-NEXT:    movslq %edi, %rax
-; CHECK-NEXT:    movq $2, %rcx
-; CHECK-NEXT:    imulq %rax, %rcx
-; CHECK-NEXT:    leaq (%rsi,%rcx), %rax
+; CHECK-NEXT:    imulq $2, %rax, %rax
+; CHECK-NEXT:    addq %rsi, %rax
 ; CHECK-NEXT:    retq
 
   %add = add nsw i32 %i, -5
@@ -108,9 +107,8 @@ define i32* @gep32(i32 %i, i32* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addl $5, %edi
 ; CHECK-NEXT:    movslq %edi, %rax
-; CHECK-NEXT:    movq $4, %rcx
-; CHECK-NEXT:    imulq %rax, %rcx
-; CHECK-NEXT:    leaq (%rsi,%rcx), %rax
+; CHECK-NEXT:    imulq $4, %rax, %rax
+; CHECK-NEXT:    addq %rsi, %rax
 ; CHECK-NEXT:    retq
 
   %add = add nsw i32 %i, 5
@@ -124,9 +122,8 @@ define i64* @gep64(i32 %i, i64* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addl $-5, %edi
 ; CHECK-NEXT:    movslq %edi, %rax
-; CHECK-NEXT:    movq $8, %rcx
-; CHECK-NEXT:    imulq %rax, %rcx
-; CHECK-NEXT:    leaq (%rsi,%rcx), %rax
+; CHECK-NEXT:    imulq $8, %rax, %rax
+; CHECK-NEXT:    addq %rsi, %rax
 ; CHECK-NEXT:    retq
 
   %add = add nsw i32 %i, -5
@@ -142,9 +139,8 @@ define i128* @gep128(i32 %i, i128* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addl $5, %edi
 ; CHECK-NEXT:    movslq %edi, %rax
-; CHECK-NEXT:    movq $16, %rcx
-; CHECK-NEXT:    imulq %rax, %rcx
-; CHECK-NEXT:    leaq (%rsi,%rcx), %rax
+; CHECK-NEXT:    imulq $16, %rax, %rax
+; CHECK-NEXT:    addq %rsi, %rax
 ; CHECK-NEXT:    retq
 
   %add = add nsw i32 %i, 5
@@ -163,19 +159,18 @@ define void @PR20134(i32* %a, i32 %i) {
 ; CHECK-NEXT:    # kill: def $esi killed $esi def $rsi
 ; CHECK-NEXT:    leal 1(%rsi), %eax
 ; CHECK-NEXT:    cltq
-; CHECK-NEXT:    movq $4, %rcx
-; CHECK-NEXT:    imulq %rcx, %rax
+; CHECK-NEXT:    imulq $4, %rax, %rax
 ; CHECK-NEXT:    addq %rdi, %rax
-; CHECK-NEXT:    leal 2(%rsi), %edx
-; CHECK-NEXT:    movslq %edx, %rdx
-; CHECK-NEXT:    imulq %rcx, %rdx
-; CHECK-NEXT:    addq %rdi, %rdx
-; CHECK-NEXT:    movl (%rdx), %edx
-; CHECK-NEXT:    addl (%rax), %edx
+; CHECK-NEXT:    leal 2(%rsi), %ecx
+; CHECK-NEXT:    movslq %ecx, %rcx
+; CHECK-NEXT:    imulq $4, %rcx, %rcx
+; CHECK-NEXT:    addq %rdi, %rcx
+; CHECK-NEXT:    movl (%rcx), %ecx
+; CHECK-NEXT:    addl (%rax), %ecx
 ; CHECK-NEXT:    movslq %esi, %rax
-; CHECK-NEXT:    imulq %rcx, %rax
+; CHECK-NEXT:    imulq $4, %rax, %rax
 ; CHECK-NEXT:    addq %rdi, %rax
-; CHECK-NEXT:    movl %edx, (%rax)
+; CHECK-NEXT:    movl %ecx, (%rax)
 ; CHECK-NEXT:    retq
 
   %add1 = add nsw i32 %i, 1
@@ -202,19 +197,18 @@ define void @PR20134_zext(i32* %a, i32 %i) {
 ; CHECK-NEXT:    # kill: def $esi killed $esi def $rsi
 ; CHECK-NEXT:    leal 1(%rsi), %eax
 ; CHECK-NEXT:    movl %eax, %eax
-; CHECK-NEXT:    movq $4, %rcx
-; CHECK-NEXT:    imulq %rcx, %rax
+; CHECK-NEXT:    imulq $4, %rax, %rax
 ; CHECK-NEXT:    addq %rdi, %rax
-; CHECK-NEXT:    leal 2(%rsi), %edx
-; CHECK-NEXT:    movl %edx, %edx
-; CHECK-NEXT:    imulq %rcx, %rdx
-; CHECK-NEXT:    addq %rdi, %rdx
-; CHECK-NEXT:    movl (%rdx), %edx
-; CHECK-NEXT:    addl (%rax), %edx
+; CHECK-NEXT:    leal 2(%rsi), %ecx
+; CHECK-NEXT:    movl %ecx, %ecx
+; CHECK-NEXT:    imulq $4, %rcx, %rcx
+; CHECK-NEXT:    addq %rdi, %rcx
+; CHECK-NEXT:    movl (%rcx), %ecx
+; CHECK-NEXT:    addl (%rax), %ecx
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    imulq %rcx, %rax
+; CHECK-NEXT:    imulq $4, %rax, %rax
 ; CHECK-NEXT:    addq %rdi, %rax
-; CHECK-NEXT:    movl %edx, (%rax)
+; CHECK-NEXT:    movl %ecx, (%rax)
 ; CHECK-NEXT:    retq
 
   %add1 = add nuw i32 %i, 1

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/ptr-add.ll b/llvm/test/CodeGen/X86/GlobalISel/ptr-add.ll
index 94e8f5877353..db757a51ebe8 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/ptr-add.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/ptr-add.ll
@@ -8,8 +8,7 @@ define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
 ; X64_GISEL-NEXT:    # kill: def $esi killed $esi def $rsi
 ; X64_GISEL-NEXT:    shlq $56, %rsi
 ; X64_GISEL-NEXT:    sarq $56, %rsi
-; X64_GISEL-NEXT:    movq $4, %rax
-; X64_GISEL-NEXT:    imulq %rsi, %rax
+; X64_GISEL-NEXT:    imulq $4, %rsi, %rax
 ; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;
@@ -44,8 +43,7 @@ define i32* @test_gep_i16(i32 *%arr, i16 %ind) {
 ; X64_GISEL-NEXT:    # kill: def $esi killed $esi def $rsi
 ; X64_GISEL-NEXT:    shlq $48, %rsi
 ; X64_GISEL-NEXT:    sarq $48, %rsi
-; X64_GISEL-NEXT:    movq $4, %rax
-; X64_GISEL-NEXT:    imulq %rsi, %rax
+; X64_GISEL-NEXT:    imulq $4, %rsi, %rax
 ; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;
@@ -78,9 +76,8 @@ define i32* @test_gep_i32(i32 *%arr, i32 %ind) {
 ; X64_GISEL-LABEL: test_gep_i32:
 ; X64_GISEL:       # %bb.0:
 ; X64_GISEL-NEXT:    movslq %esi, %rax
-; X64_GISEL-NEXT:    movq $4, %rcx
-; X64_GISEL-NEXT:    imulq %rax, %rcx
-; X64_GISEL-NEXT:    leaq (%rdi,%rcx), %rax
+; X64_GISEL-NEXT:    imulq $4, %rax, %rax
+; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i32:
@@ -110,8 +107,7 @@ define i32* @test_gep_i32_const(i32 *%arr) {
 define i32* @test_gep_i64(i32 *%arr, i64 %ind) {
 ; X64_GISEL-LABEL: test_gep_i64:
 ; X64_GISEL:       # %bb.0:
-; X64_GISEL-NEXT:    movq $4, %rax
-; X64_GISEL-NEXT:    imulq %rsi, %rax
+; X64_GISEL-NEXT:    imulq $4, %rsi, %rax
 ; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;


        


More information about the llvm-commits mailing list