[llvm] 19b3b88 - AMDGPU/GlobalISel: Fix porting error in 32-bit division

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 10 18:49:54 PDT 2020


Author: Matt Arsenault
Date: 2020-06-10T21:48:58-04:00
New Revision: 19b3b886b7645bcfacbcf4f4e57af944484ffb37

URL: https://github.com/llvm/llvm-project/commit/19b3b886b7645bcfacbcf4f4e57af944484ffb37
DIFF: https://github.com/llvm/llvm-project/commit/19b3b886b7645bcfacbcf4f4e57af944484ffb37.diff

LOG: AMDGPU/GlobalISel: Fix porting error in 32-bit division

The baffling thing is this passed the OpenCL conformance test for
32-bit integer divisions, but only failed in the 32-bit path of
BypassSlowDivisions for the 64-bit tests.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sdiv.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-udiv.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i32.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 67f889f90e99..83ebb0452474 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2561,8 +2561,8 @@ void AMDGPULegalizerInfo::legalizeUDIV_UREM32Impl(MachineIRBuilder &B,
   // Quotient_S_One = Quotient - 1
   auto Quotient_S_One = B.buildSub(S32, Quotient, One);
 
-  // Div = (Tmp1 ? Quotient : Quotient_A_One)
-  auto Div = B.buildSelect(S32, Tmp1, Quotient, Quotient_A_One);
+  // Div = (Tmp1 ? Quotient_A_One : Quotient)
+  auto Div = B.buildSelect(S32, Tmp1, Quotient_A_One, Quotient);
 
   // Div = (Remainder_GE_Zero ? Div : Quotient_S_One)
   if (IsRem) {

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sdiv.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sdiv.mir
index 8690d04ef023..9e69ea9f89ae 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sdiv.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sdiv.mir
@@ -43,7 +43,7 @@ body: |
     ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX6: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -83,7 +83,7 @@ body: |
     ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX8: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -123,7 +123,7 @@ body: |
     ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX9: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -177,7 +177,7 @@ body: |
     ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX6: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -210,7 +210,7 @@ body: |
     ; GFX6: [[AND1:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX6: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C3]]
     ; GFX6: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C3]]
-    ; GFX6: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[UMULH5]], [[ADD7]]
+    ; GFX6: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[ADD7]], [[UMULH5]]
     ; GFX6: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB8]]
     ; GFX6: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[ASHR2]], [[ASHR3]]
     ; GFX6: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[SELECT7]], [[XOR6]]
@@ -253,7 +253,7 @@ body: |
     ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX8: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -286,7 +286,7 @@ body: |
     ; GFX8: [[AND1:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX8: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C3]]
     ; GFX8: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C3]]
-    ; GFX8: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[UMULH5]], [[ADD7]]
+    ; GFX8: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[ADD7]], [[UMULH5]]
     ; GFX8: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB8]]
     ; GFX8: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[ASHR2]], [[ASHR3]]
     ; GFX8: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[SELECT7]], [[XOR6]]
@@ -329,7 +329,7 @@ body: |
     ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX9: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -362,7 +362,7 @@ body: |
     ; GFX9: [[AND1:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX9: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C3]]
     ; GFX9: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C3]]
-    ; GFX9: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[UMULH5]], [[ADD7]]
+    ; GFX9: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[ADD7]], [[UMULH5]]
     ; GFX9: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB8]]
     ; GFX9: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[ASHR2]], [[ASHR3]]
     ; GFX9: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[SELECT7]], [[XOR6]]
@@ -485,7 +485,7 @@ body: |
     ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX6: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -530,7 +530,7 @@ body: |
     ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX8: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -575,7 +575,7 @@ body: |
     ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX9: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -640,7 +640,7 @@ body: |
     ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C4]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C4]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX6: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -677,7 +677,7 @@ body: |
     ; GFX6: [[AND1:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX6: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C4]]
     ; GFX6: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C4]]
-    ; GFX6: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[UMULH5]], [[ADD7]]
+    ; GFX6: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[ADD7]], [[UMULH5]]
     ; GFX6: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB8]]
     ; GFX6: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[ASHR2]], [[ASHR3]]
     ; GFX6: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[SELECT7]], [[XOR6]]
@@ -734,7 +734,7 @@ body: |
     ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C4]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C4]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX8: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -771,7 +771,7 @@ body: |
     ; GFX8: [[AND1:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX8: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C4]]
     ; GFX8: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C4]]
-    ; GFX8: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[UMULH5]], [[ADD7]]
+    ; GFX8: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[ADD7]], [[UMULH5]]
     ; GFX8: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB8]]
     ; GFX8: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[ASHR2]], [[ASHR3]]
     ; GFX8: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[SELECT7]], [[XOR6]]
@@ -828,7 +828,7 @@ body: |
     ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C4]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C4]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX9: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -865,7 +865,7 @@ body: |
     ; GFX9: [[AND1:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX9: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C4]]
     ; GFX9: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C4]]
-    ; GFX9: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[UMULH5]], [[ADD7]]
+    ; GFX9: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[ADD7]], [[UMULH5]]
     ; GFX9: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB8]]
     ; GFX9: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[ASHR2]], [[ASHR3]]
     ; GFX9: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[SELECT7]], [[XOR6]]
@@ -924,7 +924,7 @@ body: |
     ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX6: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -969,7 +969,7 @@ body: |
     ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX8: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -1014,7 +1014,7 @@ body: |
     ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX9: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -1074,7 +1074,7 @@ body: |
     ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX6: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -1119,7 +1119,7 @@ body: |
     ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX8: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]
@@ -1164,7 +1164,7 @@ body: |
     ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD3]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD3]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]]
     ; GFX9: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[XOR2]]

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-udiv.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-udiv.mir
index d9204693d518..b0615686427b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-udiv.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-udiv.mir
@@ -36,7 +36,7 @@ body: |
     ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C2]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C2]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD1]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: $vgpr0 = COPY [[SELECT3]](s32)
     ; GFX8-LABEL: name: test_udiv_s32
@@ -66,7 +66,7 @@ body: |
     ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C2]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C2]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD1]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: $vgpr0 = COPY [[SELECT3]](s32)
     ; GFX9-LABEL: name: test_udiv_s32
@@ -96,7 +96,7 @@ body: |
     ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C2]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C2]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD1]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: $vgpr0 = COPY [[SELECT3]](s32)
     %0:_(s32) = COPY $vgpr0
@@ -140,7 +140,7 @@ body: |
     ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C2]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C2]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD1]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: [[UITOFP1:%[0-9]+]]:_(s32) = G_UITOFP [[UV3]](s32)
     ; GFX6: [[AMDGPU_RCP_IFLAG1:%[0-9]+]]:_(s32) = G_AMDGPU_RCP_IFLAG [[UITOFP1]](s32)
@@ -164,7 +164,7 @@ body: |
     ; GFX6: [[AND1:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX6: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C2]]
     ; GFX6: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C2]]
-    ; GFX6: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[UMULH5]], [[ADD3]]
+    ; GFX6: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[ADD3]], [[UMULH5]]
     ; GFX6: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB7]]
     ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT3]](s32), [[SELECT7]](s32)
     ; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -197,7 +197,7 @@ body: |
     ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C2]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C2]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD1]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: [[UITOFP1:%[0-9]+]]:_(s32) = G_UITOFP [[UV3]](s32)
     ; GFX8: [[AMDGPU_RCP_IFLAG1:%[0-9]+]]:_(s32) = G_AMDGPU_RCP_IFLAG [[UITOFP1]](s32)
@@ -221,7 +221,7 @@ body: |
     ; GFX8: [[AND1:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C2]]
     ; GFX8: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C2]]
-    ; GFX8: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[UMULH5]], [[ADD3]]
+    ; GFX8: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[ADD3]], [[UMULH5]]
     ; GFX8: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB7]]
     ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT3]](s32), [[SELECT7]](s32)
     ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -254,7 +254,7 @@ body: |
     ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C2]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C2]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s1), [[ADD1]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: [[UITOFP1:%[0-9]+]]:_(s32) = G_UITOFP [[UV3]](s32)
     ; GFX9: [[AMDGPU_RCP_IFLAG1:%[0-9]+]]:_(s32) = G_AMDGPU_RCP_IFLAG [[UITOFP1]](s32)
@@ -278,7 +278,7 @@ body: |
     ; GFX9: [[AND1:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C2]]
     ; GFX9: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C2]]
-    ; GFX9: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[UMULH5]], [[ADD3]]
+    ; GFX9: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s1), [[ADD3]], [[UMULH5]]
     ; GFX9: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB7]]
     ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT3]](s32), [[SELECT7]](s32)
     ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -1715,7 +1715,7 @@ body: |
     ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX6: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
@@ -1752,7 +1752,7 @@ body: |
     ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
@@ -1789,7 +1789,7 @@ body: |
     ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX9: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
@@ -1846,7 +1846,7 @@ body: |
     ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C4]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C4]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
     ; GFX6: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C1]]
@@ -1874,7 +1874,7 @@ body: |
     ; GFX6: [[AND5:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX6: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C4]]
     ; GFX6: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C4]]
-    ; GFX6: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND5]](s1), [[UMULH5]], [[ADD3]]
+    ; GFX6: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND5]](s1), [[ADD3]], [[UMULH5]]
     ; GFX6: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB7]]
     ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX6: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C1]]
@@ -1921,7 +1921,7 @@ body: |
     ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C4]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C4]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
     ; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C1]]
@@ -1949,7 +1949,7 @@ body: |
     ; GFX8: [[AND5:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C4]]
     ; GFX8: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C4]]
-    ; GFX8: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND5]](s1), [[UMULH5]], [[ADD3]]
+    ; GFX8: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND5]](s1), [[ADD3]], [[UMULH5]]
     ; GFX8: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB7]]
     ; GFX8: [[COPY6:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX8: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C1]]
@@ -1996,7 +1996,7 @@ body: |
     ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C4]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C4]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
     ; GFX9: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C1]]
@@ -2024,7 +2024,7 @@ body: |
     ; GFX9: [[AND5:%[0-9]+]]:_(s1) = G_AND [[ICMP4]], [[ICMP5]]
     ; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH5]], [[C4]]
     ; GFX9: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[UMULH5]], [[C4]]
-    ; GFX9: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND5]](s1), [[UMULH5]], [[ADD3]]
+    ; GFX9: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[AND5]](s1), [[ADD3]], [[UMULH5]]
     ; GFX9: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT6]], [[SUB7]]
     ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY [[SELECT7]](s32)
@@ -2074,7 +2074,7 @@ body: |
     ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
@@ -2110,7 +2110,7 @@ body: |
     ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX8: $vgpr0 = COPY [[COPY4]](s32)
@@ -2146,7 +2146,7 @@ body: |
     ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX9: $vgpr0 = COPY [[COPY4]](s32)
@@ -2197,7 +2197,7 @@ body: |
     ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
@@ -2233,7 +2233,7 @@ body: |
     ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX8: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX8: $vgpr0 = COPY [[COPY4]](s32)
@@ -2269,7 +2269,7 @@ body: |
     ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH2]], [[C3]]
     ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[UMULH2]], [[C3]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[UMULH2]], [[ADD1]]
+    ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s1), [[ADD1]], [[UMULH2]]
     ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[SELECT2]], [[SUB3]]
     ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SELECT3]](s32)
     ; GFX9: $vgpr0 = COPY [[COPY4]](s32)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
index ecd9cd5c547d..d10c99e09f7b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
@@ -34,7 +34,7 @@ define i32 @v_sdiv_i32(i32 %num, i32 %den) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v4
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[4:5], v7, v1
 ; GISEL-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v5, v3, s[4:5]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v3, v5, s[4:5]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v6, v0, vcc
 ; GISEL-NEXT:    v_xor_b32_e32 v1, v2, v2
 ; GISEL-NEXT:    v_xor_b32_e32 v0, v0, v1
@@ -124,7 +124,7 @@ define amdgpu_ps i32 @s_sdiv_i32(i32 inreg %num, i32 inreg %den) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e32 vcc, s3, v1
 ; GISEL-NEXT:    v_cmp_le_u32_e64 s[0:1], s4, v4
 ; GISEL-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
 ; GISEL-NEXT:    s_xor_b32 s0, s2, s2
 ; GISEL-NEXT:    v_xor_b32_e32 v0, s0, v0
@@ -244,9 +244,9 @@ define <2 x i32> @v_sdiv_v2i32(<2 x i32> %num, <2 x i32> %den) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[6:7], v14, v2
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[8:9], v0, v3
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v9, v4, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v4, v9, s[6:7]
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[8:9], s[4:5]
-; GISEL-NEXT:    v_cndmask_b32_e64 v1, v12, v5, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v1, v5, v12, s[6:7]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v10, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e64 v1, v13, v1, s[4:5]
 ; GISEL-NEXT:    v_xor_b32_e32 v0, v0, v6
@@ -379,7 +379,7 @@ define i32 @v_sdiv_i32_pow2k_denom(i32 %num) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v4
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[4:5], v7, v2
 ; CHECK-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v5, v3, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v3, v5, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v6, v0, vcc
 ; CHECK-NEXT:    v_xor_b32_e32 v1, v1, v1
 ; CHECK-NEXT:    v_xor_b32_e32 v0, v0, v1
@@ -448,9 +448,9 @@ define <2 x i32> @v_sdiv_v2i32_pow2k_denom(<2 x i32> %num) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[6:7], v14, v2
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[8:9], v0, v3
 ; CHECK-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v9, v4, s[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v4, v9, s[6:7]
 ; CHECK-NEXT:    s_and_b64 s[6:7], s[8:9], s[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v1, v12, v5, s[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v1, v5, v12, s[6:7]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v10, v0, vcc
 ; CHECK-NEXT:    v_cndmask_b32_e64 v1, v13, v1, s[4:5]
 ; CHECK-NEXT:    v_xor_b32_e32 v0, v0, v6
@@ -492,7 +492,7 @@ define i32 @v_sdiv_i32_oddk_denom(i32 %num) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v4
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[4:5], v7, v2
 ; CHECK-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v5, v3, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v3, v5, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v6, v0, vcc
 ; CHECK-NEXT:    v_xor_b32_e32 v1, v1, v1
 ; CHECK-NEXT:    v_xor_b32_e32 v0, v0, v1
@@ -561,9 +561,9 @@ define <2 x i32> @v_sdiv_v2i32_oddk_denom(<2 x i32> %num) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[6:7], v14, v2
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[8:9], v0, v3
 ; CHECK-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v9, v4, s[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v4, v9, s[6:7]
 ; CHECK-NEXT:    s_and_b64 s[6:7], s[8:9], s[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v1, v12, v5, s[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v1, v5, v12, s[6:7]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v10, v0, vcc
 ; CHECK-NEXT:    v_cndmask_b32_e64 v1, v13, v1, s[4:5]
 ; CHECK-NEXT:    v_xor_b32_e32 v0, v0, v6
@@ -606,7 +606,7 @@ define i32 @v_sdiv_i32_pow2_shl_denom(i32 %x, i32 %y) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v4
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[4:5], v7, v1
 ; CHECK-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v5, v3, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v3, v5, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v6, v0, vcc
 ; CHECK-NEXT:    v_xor_b32_e32 v1, v2, v2
 ; CHECK-NEXT:    v_xor_b32_e32 v0, v0, v1
@@ -677,9 +677,9 @@ define <2 x i32> @v_sdiv_v2i32_pow2_shl_denom(<2 x i32> %x, <2 x i32> %y) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[6:7], v14, v2
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[8:9], v0, v3
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v9, v4, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v4, v9, s[6:7]
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[8:9], s[4:5]
-; GISEL-NEXT:    v_cndmask_b32_e64 v1, v12, v5, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v1, v5, v12, s[6:7]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v10, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e64 v1, v13, v1, s[4:5]
 ; GISEL-NEXT:    v_xor_b32_e32 v0, v0, v6
@@ -819,7 +819,7 @@ define i32 @v_sdiv_i32_24bit(i32 %num, i32 %den) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v4
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[4:5], v7, v1
 ; GISEL-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v5, v3, s[4:5]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v3, v5, s[4:5]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v6, v0, vcc
 ; GISEL-NEXT:    v_xor_b32_e32 v1, v2, v2
 ; GISEL-NEXT:    v_xor_b32_e32 v0, v0, v1
@@ -935,9 +935,9 @@ define <2 x i32> @v_sdiv_v2i32_24bit(<2 x i32> %num, <2 x i32> %den) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[6:7], v14, v2
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[8:9], v0, v3
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v9, v4, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v4, v9, s[6:7]
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[8:9], s[4:5]
-; GISEL-NEXT:    v_cndmask_b32_e64 v1, v12, v5, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v1, v5, v12, s[6:7]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v10, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e64 v1, v13, v1, s[4:5]
 ; GISEL-NEXT:    v_xor_b32_e32 v0, v0, v6

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i32.ll
index b968982585f0..31ce2d033eea 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i32.ll
@@ -29,7 +29,7 @@ define i32 @v_udiv_i32(i32 %num, i32 %den) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[4:5], v6, v1
 ; GISEL-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v4, v2, s[4:5]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v2, v4, s[4:5]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
 ; GISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -102,7 +102,7 @@ define amdgpu_ps i32 @s_udiv_i32(i32 inreg %num, i32 inreg %den) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e32 vcc, s0, v1
 ; GISEL-NEXT:    v_cmp_le_u32_e64 s[0:1], s1, v4
 ; GISEL-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
 ; GISEL-NEXT:    v_readfirstlane_b32 s0, v0
 ; GISEL-NEXT:    ; return to shader part epilog
@@ -199,9 +199,9 @@ define <2 x i32> @v_udiv_v2i32(<2 x i32> %num, <2 x i32> %den) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[6:7], v12, v2
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[8:9], v0, v3
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v7, v4, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v4, v7, s[6:7]
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[8:9], s[4:5]
-; GISEL-NEXT:    v_cndmask_b32_e64 v1, v10, v5, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v1, v5, v10, s[6:7]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v8, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e64 v1, v11, v1, s[4:5]
 ; GISEL-NEXT:    s_setpc_b64 s[30:31]
@@ -309,7 +309,7 @@ define i32 @v_udiv_i32_pow2k_denom(i32 %num) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[4:5], v6, v1
 ; CHECK-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v4, v2, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v2, v4, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %result = udiv i32 %num, 4096
@@ -363,9 +363,9 @@ define <2 x i32> @v_udiv_v2i32_pow2k_denom(<2 x i32> %num) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[6:7], v11, v2
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[8:9], v0, v2
 ; CHECK-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v6, v3, s[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v3, v6, s[6:7]
 ; CHECK-NEXT:    s_and_b64 s[6:7], s[8:9], s[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v1, v9, v4, s[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v1, v4, v9, s[6:7]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v7, v0, vcc
 ; CHECK-NEXT:    v_cndmask_b32_e64 v1, v10, v1, s[4:5]
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
@@ -399,7 +399,7 @@ define i32 @v_udiv_i32_oddk_denom(i32 %num) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
 ; CHECK-NEXT:    v_cmp_le_u32_e64 s[4:5], s6, v5
 ; CHECK-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v3, v1, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v1, v3, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %result = udiv i32 %num, 1235195
@@ -453,9 +453,9 @@ define <2 x i32> @v_udiv_v2i32_oddk_denom(<2 x i32> %num) {
 ; CHECK-NEXT:    v_cmp_le_u32_e64 s[6:7], s8, v11
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[8:9], v0, v2
 ; CHECK-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v6, v3, s[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v3, v6, s[6:7]
 ; CHECK-NEXT:    s_and_b64 s[6:7], s[8:9], s[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v1, v9, v4, s[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v1, v4, v9, s[6:7]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v7, v0, vcc
 ; CHECK-NEXT:    v_cndmask_b32_e64 v1, v10, v1, s[4:5]
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
@@ -489,7 +489,7 @@ define i32 @v_udiv_i32_pow2_shl_denom(i32 %x, i32 %y) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[4:5], v6, v1
 ; CHECK-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v4, v2, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v2, v4, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shl.y = shl i32 4096, %y
@@ -545,9 +545,9 @@ define <2 x i32> @v_udiv_v2i32_pow2_shl_denom(<2 x i32> %x, <2 x i32> %y) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[6:7], v12, v2
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[8:9], v0, v3
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v7, v4, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v4, v7, s[6:7]
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[8:9], s[4:5]
-; GISEL-NEXT:    v_cndmask_b32_e64 v1, v10, v5, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v1, v5, v10, s[6:7]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v8, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e64 v1, v11, v1, s[4:5]
 ; GISEL-NEXT:    s_setpc_b64 s[30:31]
@@ -660,7 +660,7 @@ define i32 @v_udiv_i32_24bit(i32 %num, i32 %den) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[4:5], v6, v1
 ; GISEL-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v4, v2, s[4:5]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v2, v4, s[4:5]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
 ; GISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -761,9 +761,9 @@ define <2 x i32> @v_udiv_v2i32_24bit(<2 x i32> %num, <2 x i32> %den) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[6:7], v12, v2
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[8:9], v0, v3
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v7, v4, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v4, v7, s[6:7]
 ; GISEL-NEXT:    s_and_b64 s[6:7], s[8:9], s[4:5]
-; GISEL-NEXT:    v_cndmask_b32_e64 v1, v10, v5, s[6:7]
+; GISEL-NEXT:    v_cndmask_b32_e64 v1, v5, v10, s[6:7]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v8, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e64 v1, v11, v1, s[4:5]
 ; GISEL-NEXT:    s_setpc_b64 s[30:31]

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
index fa3ffea3bd59..10bdf82b4e16 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
@@ -171,7 +171,7 @@ define i64 @v_udiv_i64(i64 %num, i64 %den) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[4:5], v6, v2
 ; CHECK-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v4, v1, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v1, v4, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v4, v5, v0, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v5, 0
 ; CHECK-NEXT:  BB0_4:
@@ -355,7 +355,7 @@ define amdgpu_ps i64 @s_udiv_i64(i64 inreg %num, i64 inreg %den) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, s0, v1
 ; CHECK-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v4
 ; CHECK-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
 ; CHECK-NEXT:  BB1_4:
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
@@ -801,7 +801,7 @@ define <2 x i64> @v_udiv_v2i64(<2 x i64> %num, <2 x i64> %den) {
 ; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v8, v1
 ; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v10, v4
 ; CGP-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CGP-NEXT:    v_cndmask_b32_e64 v0, v5, v0, s[4:5]
+; CGP-NEXT:    v_cndmask_b32_e64 v0, v0, v5, s[4:5]
 ; CGP-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc
 ; CGP-NEXT:    v_mov_b32_e32 v1, 0
 ; CGP-NEXT:  BB2_4:
@@ -969,7 +969,7 @@ define <2 x i64> @v_udiv_v2i64(<2 x i64> %num, <2 x i64> %den) {
 ; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v4
 ; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v8, v6
 ; CGP-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CGP-NEXT:    v_cndmask_b32_e64 v2, v5, v3, s[4:5]
+; CGP-NEXT:    v_cndmask_b32_e64 v2, v3, v5, s[4:5]
 ; CGP-NEXT:    v_cndmask_b32_e32 v4, v7, v2, vcc
 ; CGP-NEXT:    v_mov_b32_e32 v5, 0
 ; CGP-NEXT:  BB2_8:
@@ -2473,7 +2473,7 @@ define i64 @v_udiv_i64_pow2_shl_denom(i64 %x, i64 %y) {
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[4:5], v6, v4
 ; CHECK-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v3, v1, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v1, v3, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e32 v2, v5, v0, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v3, 0
 ; CHECK-NEXT:  BB7_4:
@@ -2922,7 +2922,7 @@ define <2 x i64> @v_udiv_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) {
 ; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v1
 ; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v7, v10
 ; CGP-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CGP-NEXT:    v_cndmask_b32_e64 v0, v4, v0, s[4:5]
+; CGP-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s[4:5]
 ; CGP-NEXT:    v_cndmask_b32_e32 v0, v6, v0, vcc
 ; CGP-NEXT:    v_mov_b32_e32 v1, 0
 ; CGP-NEXT:  BB8_4:
@@ -3090,7 +3090,7 @@ define <2 x i64> @v_udiv_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) {
 ; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v4
 ; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v7, v8
 ; CGP-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; CGP-NEXT:    v_cndmask_b32_e64 v2, v5, v3, s[4:5]
+; CGP-NEXT:    v_cndmask_b32_e64 v2, v3, v5, s[4:5]
 ; CGP-NEXT:    v_cndmask_b32_e32 v4, v6, v2, vcc
 ; CGP-NEXT:    v_mov_b32_e32 v5, 0
 ; CGP-NEXT:  BB8_8:
@@ -3131,7 +3131,7 @@ define i64 @v_udiv_i64_24bit(i64 %num, i64 %den) {
 ; GISEL-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
 ; GISEL-NEXT:    v_cmp_ge_u32_e64 s[4:5], v6, v1
 ; GISEL-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v4, v2, s[4:5]
+; GISEL-NEXT:    v_cndmask_b32_e64 v0, v2, v4, s[4:5]
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
 ; GISEL-NEXT:    v_mov_b32_e32 v1, 0
 ; GISEL-NEXT:    s_setpc_b64 s[30:31]


        


More information about the llvm-commits mailing list