[llvm] cda2b01 - [X86] combineUIntToFP - fold vXiY -> vXf16 using SINT_TO_FP(ZEXT())

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 15 04:03:22 PST 2023


Author: Simon Pilgrim
Date: 2023-11-15T11:51:38Z
New Revision: cda2b01df708cc4b5448fa1bdb63ca5e15251545

URL: https://github.com/llvm/llvm-project/commit/cda2b01df708cc4b5448fa1bdb63ca5e15251545
DIFF: https://github.com/llvm/llvm-project/commit/cda2b01df708cc4b5448fa1bdb63ca5e15251545.diff

LOG: [X86] combineUIntToFP - fold vXiY -> vXf16 using SINT_TO_FP(ZEXT())

AVX512 targets can just as easily use UINT_TO_FP/SINT_TO_FP, but pre-AVX512 only have SINT_TO_FP instructions

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/avx512fp16-cvt-ph-w-vl-intrinsics.ll
    llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
    llvm/test/CodeGen/X86/select-narrow-int-to-fp.ll
    llvm/test/CodeGen/X86/vec-strict-inttofp-128-fp16.ll
    llvm/test/CodeGen/X86/vec-strict-inttofp-256-fp16.ll
    llvm/test/CodeGen/X86/vec-strict-inttofp-512-fp16.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e25cbd49b3cf681..c9381218eee7840 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -53323,11 +53323,11 @@ static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
   // Using i16 as an intermediate type is a bad idea, unless we have HW support
   // for it. Therefore for type sizes equal or smaller than 32 just go with i32.
   // if hasFP16 support:
-  //   UINT_TO_FP(vXi1~15)  -> UINT_TO_FP(ZEXT(vXi1~15  to vXi16))
-  //   UINT_TO_FP(vXi17~31) -> UINT_TO_FP(ZEXT(vXi17~31 to vXi32))
+  //   UINT_TO_FP(vXi1~15)  -> SINT_TO_FP(ZEXT(vXi1~15  to vXi16))
+  //   UINT_TO_FP(vXi17~31) -> SINT_TO_FP(ZEXT(vXi17~31 to vXi32))
   // else
-  //   UINT_TO_FP(vXi1~31) -> UINT_TO_FP(ZEXT(vXi1~31 to vXi32))
-  // UINT_TO_FP(vXi33~63) -> UINT_TO_FP(ZEXT(vXi33~63 to vXi64))
+  //   UINT_TO_FP(vXi1~31) -> SINT_TO_FP(ZEXT(vXi1~31 to vXi32))
+  // UINT_TO_FP(vXi33~63) -> SINT_TO_FP(ZEXT(vXi33~63 to vXi64))
   if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
     unsigned ScalarSize = InVT.getScalarSizeInBits();
     if ((ScalarSize == 16 && Subtarget.hasFP16()) || ScalarSize == 32 ||
@@ -53342,9 +53342,9 @@ static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
                          InVT.getVectorNumElements());
     SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
     if (IsStrict)
-      return DAG.getNode(ISD::STRICT_UINT_TO_FP, dl, {VT, MVT::Other},
+      return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
                          {N->getOperand(0), P});
-    return DAG.getNode(ISD::UINT_TO_FP, dl, VT, P);
+    return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
   }
 
   // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))

diff  --git a/llvm/test/CodeGen/X86/avx512fp16-cvt-ph-w-vl-intrinsics.ll b/llvm/test/CodeGen/X86/avx512fp16-cvt-ph-w-vl-intrinsics.ll
index f3f4223b6e87770..1318f607ea93190 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-cvt-ph-w-vl-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-cvt-ph-w-vl-intrinsics.ll
@@ -720,7 +720,7 @@ define <4 x half> @test_u8tofp4(<4 x i8> %arg0) {
 ; CHECK-LABEL: test_u8tofp4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; CHECK-NEXT:    vcvtuw2ph %xmm0, %xmm0
+; CHECK-NEXT:    vcvtw2ph %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = uitofp <4 x i8> %arg0 to <4 x half>
   ret <4 x half> %res
@@ -741,7 +741,7 @@ define <2 x half> @test_u1tofp2(<2 x i1> %arg0) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovqw %xmm0, %xmm0
 ; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
-; CHECK-NEXT:    vcvtuw2ph %xmm0, %xmm0
+; CHECK-NEXT:    vcvtw2ph %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = uitofp <2 x i1> %arg0 to <2 x half>
   ret <2 x half> %res
@@ -762,7 +762,7 @@ define <2 x half> @test_u33tofp2(<2 x i33> %arg0) {
 ; CHECK-LABEL: test_u33tofp2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vandpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
-; CHECK-NEXT:    vcvtuqq2ph %xmm0, %xmm0
+; CHECK-NEXT:    vcvtqq2ph %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = uitofp <2 x i33> %arg0 to <2 x half>
   ret <2 x half> %res

diff  --git a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
index 080ad3a7b0b463e..f4c43ff0cf9c875 100644
--- a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
+++ b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
@@ -266,13 +266,8 @@ define <8 x half> @fmul_pow2_8xhalf(<8 x i16> %i) {
 ; CHECK-AVX2-NEXT:    vpsllvd %ymm0, %ymm1, %ymm0
 ; CHECK-AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
 ; CHECK-AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; CHECK-AVX2-NEXT:    vbroadcastss {{.*#+}} ymm1 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
-; CHECK-AVX2-NEXT:    vbroadcastss {{.*#+}} ymm2 = [0,21248,0,21248,0,21248,0,21248,0,21248,0,21248,0,21248,0,21248]
-; CHECK-AVX2-NEXT:    vsubps %ymm1, %ymm2, %ymm1
 ; CHECK-AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
-; CHECK-AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
-; CHECK-AVX2-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; CHECK-AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; CHECK-AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; CHECK-AVX2-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; CHECK-AVX2-NEXT:    vzeroupper
@@ -378,7 +373,7 @@ define <8 x half> @fmul_pow2_8xhalf(<8 x i16> %i) {
 ; CHECK-NO-FASTFMA-NEXT:    vpsllvd %ymm0, %ymm1, %ymm0
 ; CHECK-NO-FASTFMA-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; CHECK-NO-FASTFMA-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; CHECK-NO-FASTFMA-NEXT:    vcvtudq2ps %zmm0, %zmm0
+; CHECK-NO-FASTFMA-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; CHECK-NO-FASTFMA-NEXT:    vcvtps2ph $4, %ymm0, %xmm0
 ; CHECK-NO-FASTFMA-NEXT:    vcvtph2ps %xmm0, %ymm0
 ; CHECK-NO-FASTFMA-NEXT:    vbroadcastss {{.*#+}} ymm1 = [8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3]
@@ -1108,7 +1103,7 @@ define <2 x half> @fmul_pow_shl_cnt_vec_fail_to_large(<2 x i16> %cnt) nounwind {
 ; CHECK-NO-FASTFMA-NEXT:    vpsllvd %ymm0, %ymm1, %ymm0
 ; CHECK-NO-FASTFMA-NEXT:    vpmovdw %zmm0, %ymm0
 ; CHECK-NO-FASTFMA-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; CHECK-NO-FASTFMA-NEXT:    vcvtudq2ps %zmm0, %zmm0
+; CHECK-NO-FASTFMA-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; CHECK-NO-FASTFMA-NEXT:    vcvtps2ph $4, %ymm0, %xmm0
 ; CHECK-NO-FASTFMA-NEXT:    vcvtph2ps %xmm0, %ymm0
 ; CHECK-NO-FASTFMA-NEXT:    vbroadcastss {{.*#+}} ymm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1,1.5E+1,1.5E+1,1.5E+1,1.5E+1]
@@ -1122,7 +1117,7 @@ define <2 x half> @fmul_pow_shl_cnt_vec_fail_to_large(<2 x i16> %cnt) nounwind {
 ; CHECK-FMA-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2]
 ; CHECK-FMA-NEXT:    vpsllvw %xmm0, %xmm1, %xmm0
 ; CHECK-FMA-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; CHECK-FMA-NEXT:    vcvtudq2ps %ymm0, %ymm0
+; CHECK-FMA-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; CHECK-FMA-NEXT:    vcvtps2ph $4, %ymm0, %xmm0
 ; CHECK-FMA-NEXT:    vcvtph2ps %xmm0, %ymm0
 ; CHECK-FMA-NEXT:    vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0

diff  --git a/llvm/test/CodeGen/X86/select-narrow-int-to-fp.ll b/llvm/test/CodeGen/X86/select-narrow-int-to-fp.ll
index 15bea0dd4a46950..9bc0dbf5dcb5866 100644
--- a/llvm/test/CodeGen/X86/select-narrow-int-to-fp.ll
+++ b/llvm/test/CodeGen/X86/select-narrow-int-to-fp.ll
@@ -101,7 +101,7 @@ define <16 x half> @vector_uint8ToHalf(<16 x i8> %int8) {
 ; CHECK-WITH_FP16-LABEL: vector_uint8ToHalf:
 ; CHECK-WITH_FP16:       # %bb.0:
 ; CHECK-WITH_FP16-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; CHECK-WITH_FP16-NEXT:    vcvtuw2ph %ymm0, %ymm0
+; CHECK-WITH_FP16-NEXT:    vcvtw2ph %ymm0, %ymm0
 ; CHECK-WITH_FP16-NEXT:    retq
     %fp32 = uitofp <16 x i8> %int8 to <16 x half>
     ret <16 x half> %fp32

diff  --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-128-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-128-fp16.ll
index ebdf12321b22f0a..4437969bfa9a9dc 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-128-fp16.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-128-fp16.ll
@@ -74,13 +74,13 @@ define <8 x half> @uitofp_v8i1_v8f16(<8 x i1> %x) #0 {
 ; X86-LABEL: uitofp_v8i1_v8f16:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
-; X86-NEXT:    vcvtuw2ph %xmm0, %xmm0
+; X86-NEXT:    vcvtw2ph %xmm0, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: uitofp_v8i1_v8f16:
 ; X64:       # %bb.0:
 ; X64-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
-; X64-NEXT:    vcvtuw2ph %xmm0, %xmm0
+; X64-NEXT:    vcvtw2ph %xmm0, %xmm0
 ; X64-NEXT:    retq
  %result = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1> %x,
                                                               metadata !"round.dynamic",
@@ -104,7 +104,7 @@ define <8 x half> @uitofp_v8i8_v8f16(<8 x i8> %x) #0 {
 ; CHECK-LABEL: uitofp_v8i8_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; CHECK-NEXT:    vcvtuw2ph %xmm0, %xmm0
+; CHECK-NEXT:    vcvtw2ph %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
  %result = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8> %x,
                                                               metadata !"round.dynamic",

diff  --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-256-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-256-fp16.ll
index 93c340ebde76e97..5a2f4465c7f411e 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-256-fp16.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-256-fp16.ll
@@ -34,14 +34,14 @@ define <16 x half> @uitofp_v16i1_v16f16(<16 x i1> %x) #0 {
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
 ; X86-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; X86-NEXT:    vcvtuw2ph %ymm0, %ymm0
+; X86-NEXT:    vcvtw2ph %ymm0, %ymm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: uitofp_v16i1_v16f16:
 ; X64:       # %bb.0:
 ; X64-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; X64-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; X64-NEXT:    vcvtuw2ph %ymm0, %ymm0
+; X64-NEXT:    vcvtw2ph %ymm0, %ymm0
 ; X64-NEXT:    retq
  %result = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1> %x,
                                                               metadata !"round.dynamic",
@@ -65,7 +65,7 @@ define <16 x half> @uitofp_v16i8_v16f16(<16 x i8> %x) #0 {
 ; CHECK-LABEL: uitofp_v16i8_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; CHECK-NEXT:    vcvtuw2ph %ymm0, %ymm0
+; CHECK-NEXT:    vcvtw2ph %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
  %result = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8> %x,
                                                               metadata !"round.dynamic",

diff  --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-512-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-512-fp16.ll
index 5067a2e9c42123d..654d767a7549c71 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-512-fp16.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-512-fp16.ll
@@ -32,14 +32,14 @@ define <32 x half> @uitofp_v32i1_v32f16(<32 x i1> %x) #0 {
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
 ; X86-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; X86-NEXT:    vcvtuw2ph %zmm0, %zmm0
+; X86-NEXT:    vcvtw2ph %zmm0, %zmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: uitofp_v32i1_v32f16:
 ; X64:       # %bb.0:
 ; X64-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
 ; X64-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; X64-NEXT:    vcvtuw2ph %zmm0, %zmm0
+; X64-NEXT:    vcvtw2ph %zmm0, %zmm0
 ; X64-NEXT:    retq
  %result = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1> %x,
                                                               metadata !"round.dynamic",
@@ -63,7 +63,7 @@ define <32 x half> @uitofp_v32i8_v32f16(<32 x i8> %x) #0 {
 ; CHECK-LABEL: uitofp_v32i8_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; CHECK-NEXT:    vcvtuw2ph %zmm0, %zmm0
+; CHECK-NEXT:    vcvtw2ph %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
  %result = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8> %x,
                                                               metadata !"round.dynamic",


        


More information about the llvm-commits mailing list