[llvm] 93311a9 - AMDGPU/GlobalISel: Fix custom lowering of llvm.trunc.f64 for SI

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 20 07:06:29 PDT 2020


Author: Matt Arsenault
Date: 2020-07-20T10:06:18-04:00
New Revision: 93311a981283775031113c5a6db0591f03357b7b

URL: https://github.com/llvm/llvm-project/commit/93311a981283775031113c5a6db0591f03357b7b
DIFF: https://github.com/llvm/llvm-project/commit/93311a981283775031113c5a6db0591f03357b7b.diff

LOG: AMDGPU/GlobalISel: Fix custom lowering of llvm.trunc.f64 for SI

This was missing an operand from BFE and not erasing the original
instruction.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fceil.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fptosi.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fptoui.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-trunc.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 415b707e01cf..f0a4cb7496cb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1754,7 +1754,7 @@ bool AMDGPULegalizerInfo::legalizeFceil(
   return true;
 }
 
-static MachineInstrBuilder extractF64Exponent(unsigned Hi,
+static MachineInstrBuilder extractF64Exponent(Register Hi,
                                               MachineIRBuilder &B) {
   const unsigned FractBits = 52;
   const unsigned ExpBits = 11;
@@ -1764,6 +1764,7 @@ static MachineInstrBuilder extractF64Exponent(unsigned Hi,
   auto Const1 = B.buildConstant(S32, ExpBits);
 
   auto ExpPart = B.buildIntrinsic(Intrinsic::amdgcn_ubfe, {S32}, false)
+    .addUse(Hi)
     .addUse(Const0.getReg(0))
     .addUse(Const1.getReg(0));
 
@@ -1811,6 +1812,7 @@ bool AMDGPULegalizerInfo::legalizeIntrinsicTrunc(
 
   auto Tmp1 = B.buildSelect(S64, ExpLt0, SignBit64, Tmp0);
   B.buildSelect(MI.getOperand(0).getReg(), ExpGt51, Src, Tmp1);
+  MI.eraseFromParent();
   return true;
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fceil.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fceil.mir
index 39537fef6471..0ab5583f20c2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fceil.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fceil.mir
@@ -79,7 +79,7 @@ body: |
     ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV1]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -96,14 +96,13 @@ body: |
     ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[COPY]], [[SELECT]]
-    ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[COPY]]
     ; SI: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
     ; SI: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
     ; SI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[COPY]](s64), [[C8]]
-    ; SI: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(one), [[COPY]](s64), [[INTRINSIC_TRUNC]]
+    ; SI: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(one), [[COPY]](s64), [[SELECT1]]
     ; SI: [[AND2:%[0-9]+]]:_(s1) = G_AND [[FCMP]], [[FCMP1]]
-    ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[AND2]](s1), [[C9]], [[C8]]
-    ; SI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT1]]
+    ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[AND2]](s1), [[C9]], [[C8]]
+    ; SI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[SELECT1]], [[SELECT2]]
     ; SI: [[FCEIL:%[0-9]+]]:_(s64) = G_FCEIL [[COPY]]
     ; SI: $vgpr0_vgpr1 = COPY [[FCEIL]](s64)
     ; CI-LABEL: name: test_fceil_s64
@@ -197,7 +196,7 @@ body: |
     ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV3]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -214,17 +213,16 @@ body: |
     ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
-    ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV]]
     ; SI: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
     ; SI: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
     ; SI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UV]](s64), [[C8]]
-    ; SI: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(one), [[UV]](s64), [[INTRINSIC_TRUNC]]
+    ; SI: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(one), [[UV]](s64), [[SELECT1]]
     ; SI: [[AND2:%[0-9]+]]:_(s1) = G_AND [[FCMP]], [[FCMP1]]
-    ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[AND2]](s1), [[C9]], [[C8]]
-    ; SI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT1]]
+    ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[AND2]](s1), [[C9]], [[C8]]
+    ; SI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[SELECT1]], [[SELECT2]]
     ; SI: [[FCEIL:%[0-9]+]]:_(s64) = G_FCEIL [[UV]]
     ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
-    ; SI: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV5]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[INT1]], [[C2]]
     ; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV5]], [[C3]]
     ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C5]](s32), [[AND3]](s32)
@@ -233,14 +231,13 @@ body: |
     ; SI: [[AND4:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[XOR1]]
     ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB1]](s32), [[C5]]
     ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB1]](s32), [[C7]]
-    ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV1]], [[AND4]]
-    ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV1]], [[SELECT2]]
-    ; SI: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV1]]
+    ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV1]], [[AND4]]
+    ; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV1]], [[SELECT3]]
     ; SI: [[FCMP2:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UV1]](s64), [[C8]]
-    ; SI: [[FCMP3:%[0-9]+]]:_(s1) = G_FCMP floatpred(one), [[UV1]](s64), [[INTRINSIC_TRUNC1]]
+    ; SI: [[FCMP3:%[0-9]+]]:_(s1) = G_FCMP floatpred(one), [[UV1]](s64), [[SELECT4]]
     ; SI: [[AND5:%[0-9]+]]:_(s1) = G_AND [[FCMP2]], [[FCMP3]]
-    ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[AND5]](s1), [[C9]], [[C8]]
-    ; SI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT3]]
+    ; SI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[AND5]](s1), [[C9]], [[C8]]
+    ; SI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[SELECT4]], [[SELECT5]]
     ; SI: [[FCEIL1:%[0-9]+]]:_(s64) = G_FCEIL [[UV1]]
     ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FCEIL]](s64), [[FCEIL1]](s64)
     ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fptosi.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fptosi.mir
index c88ef0daad5b..74f6f01a4548 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fptosi.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fptosi.mir
@@ -171,7 +171,7 @@ body: |
     ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV1]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -188,18 +188,17 @@ body: |
     ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[COPY]], [[SELECT]]
-    ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[COPY]]
     ; SI: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3DF0000000000000
     ; SI: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 0xC1F0000000000000
-    ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INTRINSIC_TRUNC]], [[C8]]
+    ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[SELECT1]], [[C8]]
     ; SI: [[INT1:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s64)
     ; SI: [[C10:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3FEFFFFFFFFFFFFF
     ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s64) = G_FMINNUM_IEEE [[INT1]], [[C10]]
     ; SI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[FMUL]](s64), [[FMUL]]
-    ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[FMUL]], [[FMINNUM_IEEE]]
-    ; SI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT1]]
+    ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[FMUL]], [[FMINNUM_IEEE]]
+    ; SI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT2]]
     ; SI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[FNEG]]
-    ; SI: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FADD]], [[C9]], [[INTRINSIC_TRUNC]]
+    ; SI: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FADD]], [[C9]], [[SELECT1]]
     ; SI: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[FADD]](s64)
     ; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s64)
     ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI]](s32), [[FPTOSI]](s32)
@@ -232,7 +231,7 @@ body: |
     ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV1]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -249,16 +248,15 @@ body: |
     ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[COPY]], [[SELECT]]
-    ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = nnan G_INTRINSIC_TRUNC [[COPY]]
     ; SI: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3DF0000000000000
     ; SI: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 0xC1F0000000000000
-    ; SI: [[FMUL:%[0-9]+]]:_(s64) = nnan G_FMUL [[INTRINSIC_TRUNC]], [[C8]]
+    ; SI: [[FMUL:%[0-9]+]]:_(s64) = nnan G_FMUL [[SELECT1]], [[C8]]
     ; SI: [[INT1:%[0-9]+]]:_(s64) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s64)
     ; SI: [[C10:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3FEFFFFFFFFFFFFF
     ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s64) = nnan G_FMINNUM_IEEE [[INT1]], [[C10]]
     ; SI: [[FNEG:%[0-9]+]]:_(s64) = nnan G_FNEG [[FMINNUM_IEEE]]
     ; SI: [[FADD:%[0-9]+]]:_(s64) = nnan G_FADD [[FMUL]], [[FNEG]]
-    ; SI: [[FMA:%[0-9]+]]:_(s64) = nnan G_FMA [[FADD]], [[C9]], [[INTRINSIC_TRUNC]]
+    ; SI: [[FMA:%[0-9]+]]:_(s64) = nnan G_FMA [[FADD]], [[C9]], [[SELECT1]]
     ; SI: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[FADD]](s64)
     ; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s64)
     ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI]](s32), [[FPTOSI]](s32)
@@ -292,7 +290,7 @@ body: |
     ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV3]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -309,23 +307,22 @@ body: |
     ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
-    ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV]]
     ; SI: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3DF0000000000000
     ; SI: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 0xC1F0000000000000
-    ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INTRINSIC_TRUNC]], [[C8]]
+    ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[SELECT1]], [[C8]]
     ; SI: [[INT1:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s64)
     ; SI: [[C10:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3FEFFFFFFFFFFFFF
     ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s64) = G_FMINNUM_IEEE [[INT1]], [[C10]]
     ; SI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[FMUL]](s64), [[FMUL]]
-    ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[FMUL]], [[FMINNUM_IEEE]]
-    ; SI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT1]]
+    ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[FMUL]], [[FMINNUM_IEEE]]
+    ; SI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT2]]
     ; SI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[FNEG]]
-    ; SI: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FADD]], [[C9]], [[INTRINSIC_TRUNC]]
+    ; SI: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FADD]], [[C9]], [[SELECT1]]
     ; SI: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[FADD]](s64)
     ; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s64)
     ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI]](s32), [[FPTOSI]](s32)
     ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
-    ; SI: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV5]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[INT2]], [[C2]]
     ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV5]], [[C3]]
     ; SI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C5]](s32), [[AND2]](s32)
@@ -334,17 +331,16 @@ body: |
     ; SI: [[AND3:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[XOR1]]
     ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB1]](s32), [[C5]]
     ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB1]](s32), [[C7]]
-    ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV2]], [[AND3]]
-    ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV1]], [[SELECT2]]
-    ; SI: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV1]]
-    ; SI: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[INTRINSIC_TRUNC1]], [[C8]]
+    ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV2]], [[AND3]]
+    ; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV1]], [[SELECT3]]
+    ; SI: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[SELECT4]], [[C8]]
     ; SI: [[INT3:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL1]](s64)
     ; SI: [[FMINNUM_IEEE1:%[0-9]+]]:_(s64) = G_FMINNUM_IEEE [[INT3]], [[C10]]
     ; SI: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[FMUL1]](s64), [[FMUL1]]
-    ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[FMUL1]], [[FMINNUM_IEEE1]]
-    ; SI: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[SELECT3]]
+    ; SI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[FMUL1]], [[FMINNUM_IEEE1]]
+    ; SI: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[SELECT5]]
     ; SI: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[FMUL1]], [[FNEG1]]
-    ; SI: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FADD1]], [[C9]], [[INTRINSIC_TRUNC1]]
+    ; SI: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FADD1]], [[C9]], [[SELECT4]]
     ; SI: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FADD1]](s64)
     ; SI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA1]](s64)
     ; SI: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOSI1]](s32)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fptoui.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fptoui.mir
index 97bf44836ea1..87482c0b48d8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fptoui.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fptoui.mir
@@ -171,7 +171,7 @@ body: |
     ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV1]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -188,18 +188,17 @@ body: |
     ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[COPY]], [[SELECT]]
-    ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[COPY]]
     ; SI: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3DF0000000000000
     ; SI: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 0xC1F0000000000000
-    ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INTRINSIC_TRUNC]], [[C8]]
+    ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[SELECT1]], [[C8]]
     ; SI: [[INT1:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s64)
     ; SI: [[C10:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3FEFFFFFFFFFFFFF
     ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s64) = G_FMINNUM_IEEE [[INT1]], [[C10]]
     ; SI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[FMUL]](s64), [[FMUL]]
-    ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[FMUL]], [[FMINNUM_IEEE]]
-    ; SI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT1]]
+    ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[FMUL]], [[FMINNUM_IEEE]]
+    ; SI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT2]]
     ; SI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[FNEG]]
-    ; SI: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FADD]], [[C9]], [[INTRINSIC_TRUNC]]
+    ; SI: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FADD]], [[C9]], [[SELECT1]]
     ; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FADD]](s64)
     ; SI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s64)
     ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
@@ -232,7 +231,7 @@ body: |
     ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV1]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -249,16 +248,15 @@ body: |
     ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[COPY]], [[SELECT]]
-    ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = nnan G_INTRINSIC_TRUNC [[COPY]]
     ; SI: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3DF0000000000000
     ; SI: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 0xC1F0000000000000
-    ; SI: [[FMUL:%[0-9]+]]:_(s64) = nnan G_FMUL [[INTRINSIC_TRUNC]], [[C8]]
+    ; SI: [[FMUL:%[0-9]+]]:_(s64) = nnan G_FMUL [[SELECT1]], [[C8]]
     ; SI: [[INT1:%[0-9]+]]:_(s64) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s64)
     ; SI: [[C10:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3FEFFFFFFFFFFFFF
     ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s64) = nnan G_FMINNUM_IEEE [[INT1]], [[C10]]
     ; SI: [[FNEG:%[0-9]+]]:_(s64) = nnan G_FNEG [[FMINNUM_IEEE]]
     ; SI: [[FADD:%[0-9]+]]:_(s64) = nnan G_FADD [[FMUL]], [[FNEG]]
-    ; SI: [[FMA:%[0-9]+]]:_(s64) = nnan G_FMA [[FADD]], [[C9]], [[INTRINSIC_TRUNC]]
+    ; SI: [[FMA:%[0-9]+]]:_(s64) = nnan G_FMA [[FADD]], [[C9]], [[SELECT1]]
     ; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FADD]](s64)
     ; SI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s64)
     ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
@@ -292,7 +290,7 @@ body: |
     ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV3]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -309,23 +307,22 @@ body: |
     ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
-    ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV]]
     ; SI: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3DF0000000000000
     ; SI: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 0xC1F0000000000000
-    ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INTRINSIC_TRUNC]], [[C8]]
+    ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[SELECT1]], [[C8]]
     ; SI: [[INT1:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s64)
     ; SI: [[C10:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x3FEFFFFFFFFFFFFF
     ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s64) = G_FMINNUM_IEEE [[INT1]], [[C10]]
     ; SI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[FMUL]](s64), [[FMUL]]
-    ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[FMUL]], [[FMINNUM_IEEE]]
-    ; SI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT1]]
+    ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[FMUL]], [[FMINNUM_IEEE]]
+    ; SI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT2]]
     ; SI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[FNEG]]
-    ; SI: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FADD]], [[C9]], [[INTRINSIC_TRUNC]]
+    ; SI: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FADD]], [[C9]], [[SELECT1]]
     ; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FADD]](s64)
     ; SI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s64)
     ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
     ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
-    ; SI: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV5]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[INT2]], [[C2]]
     ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV5]], [[C3]]
     ; SI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C5]](s32), [[AND2]](s32)
@@ -334,17 +331,16 @@ body: |
     ; SI: [[AND3:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[XOR1]]
     ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB1]](s32), [[C5]]
     ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB1]](s32), [[C7]]
-    ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV2]], [[AND3]]
-    ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV1]], [[SELECT2]]
-    ; SI: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV1]]
-    ; SI: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[INTRINSIC_TRUNC1]], [[C8]]
+    ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV2]], [[AND3]]
+    ; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV1]], [[SELECT3]]
+    ; SI: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[SELECT4]], [[C8]]
     ; SI: [[INT3:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL1]](s64)
     ; SI: [[FMINNUM_IEEE1:%[0-9]+]]:_(s64) = G_FMINNUM_IEEE [[INT3]], [[C10]]
     ; SI: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[FMUL1]](s64), [[FMUL1]]
-    ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[FMUL1]], [[FMINNUM_IEEE1]]
-    ; SI: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[SELECT3]]
+    ; SI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[FMUL1]], [[FMINNUM_IEEE1]]
+    ; SI: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[SELECT5]]
     ; SI: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[FMUL1]], [[FNEG1]]
-    ; SI: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FADD1]], [[C9]], [[INTRINSIC_TRUNC1]]
+    ; SI: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FADD1]], [[C9]], [[SELECT4]]
     ; SI: [[FPTOUI2:%[0-9]+]]:_(s32) = G_FPTOUI [[FADD1]](s64)
     ; SI: [[FPTOUI3:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA1]](s64)
     ; SI: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI3]](s32), [[FPTOUI2]](s32)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir
index 005025bca503..463deb7a565d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir
@@ -126,7 +126,7 @@ body: |
     ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
     ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV1]](s32), [[C]](s32), [[C1]](s32)
     ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -143,8 +143,7 @@ body: |
     ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[COPY]], [[SELECT]]
-    ; GFX6: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[COPY]]
-    ; GFX6: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INTRINSIC_TRUNC]]
+    ; GFX6: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT1]]
     ; GFX6: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[COPY]], [[FNEG]]
     ; GFX6: [[FABS:%[0-9]+]]:_(s64) = G_FABS [[FADD]]
     ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
@@ -154,8 +153,8 @@ body: |
     ; GFX6: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C10]]
     ; GFX6: [[OR:%[0-9]+]]:_(s64) = G_OR [[C11]], [[AND2]]
     ; GFX6: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C9]]
-    ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[OR]], [[C8]]
-    ; GFX6: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[OR]], [[C8]]
+    ; GFX6: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[SELECT1]], [[SELECT2]]
     ; GFX6: $vgpr0_vgpr1 = COPY [[FADD1]](s64)
     ; GFX8-LABEL: name: test_intrinsic_round_s64
     ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
@@ -292,7 +291,7 @@ body: |
     ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
     ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV3]](s32), [[C]](s32), [[C1]](s32)
     ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -309,8 +308,7 @@ body: |
     ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
-    ; GFX6: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV]]
-    ; GFX6: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INTRINSIC_TRUNC]]
+    ; GFX6: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT1]]
     ; GFX6: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[UV]], [[FNEG]]
     ; GFX6: [[FABS:%[0-9]+]]:_(s64) = G_FABS [[FADD]]
     ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
@@ -320,10 +318,10 @@ body: |
     ; GFX6: [[AND2:%[0-9]+]]:_(s64) = G_AND [[UV]], [[C10]]
     ; GFX6: [[OR:%[0-9]+]]:_(s64) = G_OR [[C11]], [[AND2]]
     ; GFX6: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C9]]
-    ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[OR]], [[C8]]
-    ; GFX6: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[OR]], [[C8]]
+    ; GFX6: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[SELECT1]], [[SELECT2]]
     ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
-    ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV5]](s32), [[C]](s32), [[C1]](s32)
     ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[INT1]], [[C2]]
     ; GFX6: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV5]], [[C3]]
     ; GFX6: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C5]](s32), [[AND3]](s32)
@@ -332,17 +330,16 @@ body: |
     ; GFX6: [[AND4:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[XOR1]]
     ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB1]](s32), [[C5]]
     ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB1]](s32), [[C7]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV1]], [[AND4]]
-    ; GFX6: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV1]], [[SELECT2]]
-    ; GFX6: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV1]]
-    ; GFX6: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[INTRINSIC_TRUNC1]]
+    ; GFX6: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV1]], [[AND4]]
+    ; GFX6: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV1]], [[SELECT3]]
+    ; GFX6: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[SELECT4]]
     ; GFX6: [[FADD2:%[0-9]+]]:_(s64) = G_FADD [[UV1]], [[FNEG1]]
     ; GFX6: [[FABS1:%[0-9]+]]:_(s64) = G_FABS [[FADD2]]
     ; GFX6: [[AND5:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[C10]]
     ; GFX6: [[OR1:%[0-9]+]]:_(s64) = G_OR [[C11]], [[AND5]]
     ; GFX6: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s64), [[C9]]
-    ; GFX6: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C8]]
-    ; GFX6: [[FADD3:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT3]]
+    ; GFX6: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C8]]
+    ; GFX6: [[FADD3:%[0-9]+]]:_(s64) = G_FADD [[SELECT4]], [[SELECT5]]
     ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD1]](s64), [[FADD3]](s64)
     ; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
     ; GFX8-LABEL: name: test_intrinsic_round_v2s64

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-trunc.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-trunc.mir
index ad3d02eea0ef..ed38f54ddd62 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-trunc.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-trunc.mir
@@ -79,7 +79,7 @@ body: |
     ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV1]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -96,8 +96,7 @@ body: |
     ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[COPY]], [[SELECT]]
-    ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[COPY]]
-    ; SI: $vgpr0_vgpr1 = COPY [[INTRINSIC_TRUNC]](s64)
+    ; SI: $vgpr0_vgpr1 = COPY [[SELECT1]](s64)
     ; CI-LABEL: name: test_intrinsic_trunc_s64
     ; CI: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
     ; CI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[COPY]]
@@ -189,7 +188,7 @@ body: |
     ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV3]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023
     ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C2]]
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
@@ -206,9 +205,8 @@ body: |
     ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C7]]
     ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[MV]], [[AND1]]
     ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
-    ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV]]
     ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
-    ; SI: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[C]](s32), [[C1]](s32)
+    ; SI: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV5]](s32), [[C]](s32), [[C1]](s32)
     ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[INT1]], [[C2]]
     ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV5]], [[C3]]
     ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C5]](s32), [[AND2]](s32)
@@ -217,10 +215,9 @@ body: |
     ; SI: [[AND3:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[XOR1]]
     ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB1]](s32), [[C5]]
     ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB1]](s32), [[C7]]
-    ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV1]], [[AND3]]
-    ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV1]], [[SELECT1]]
-    ; SI: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV1]]
-    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INTRINSIC_TRUNC]](s64), [[INTRINSIC_TRUNC1]](s64)
+    ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV1]], [[AND3]]
+    ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV1]], [[SELECT2]]
+    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT1]](s64), [[SELECT3]](s64)
     ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
     ; CI-LABEL: name: test_intrinsic_trunc_v2s64
     ; CI: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3


        


More information about the llvm-commits mailing list