[llvm-branch-commits] [llvm] 6e0577f - [X86] getScalarMaskingNode - FIXUPIMM scalar ops take upper elements from second operand (#179101)
Cullen Rhodes via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Feb 3 01:15:39 PST 2026
Author: Simon Pilgrim
Date: 2026-02-03T09:15:27Z
New Revision: 6e0577f5cf893b9d948b8332b9baeed352ba669c
URL: https://github.com/llvm/llvm-project/commit/6e0577f5cf893b9d948b8332b9baeed352ba669c
DIFF: https://github.com/llvm/llvm-project/commit/6e0577f5cf893b9d948b8332b9baeed352ba669c.diff
LOG: [X86] getScalarMaskingNode - FIXUPIMM scalar ops take upper elements from second operand (#179101)
FIXUPIMMSS/SD instructions passthrough the SECOND operand upper elements, and not the first like most (2-op) instructions
Fixes #179057
(cherry picked from commit 49d2323447aec77c3d1ae8c941f3f8a126ff1480)
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/avx512-intrinsics.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a354704c5958b..5935f2eb344e1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -26574,7 +26574,8 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
const X86Subtarget &Subtarget,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG,
+ unsigned UpperEltOpSrc = 0) {
auto *MaskConst = dyn_cast<ConstantSDNode>(Mask);
if (MaskConst && (MaskConst->getZExtValue() & 0x1))
return Op;
@@ -26600,8 +26601,8 @@ static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
SmallVector<int, 16> ShuffleMask(VT.getVectorNumElements());
std::iota(ShuffleMask.begin(), ShuffleMask.end(), 0);
ShuffleMask[0] = VT.getVectorNumElements();
- return DAG.getVectorShuffle(VT, dl, Op.getOperand(0), PreservedSrc,
- ShuffleMask);
+ return DAG.getVectorShuffle(VT, dl, Op.getOperand(UpperEltOpSrc),
+ PreservedSrc, ShuffleMask);
}
return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
@@ -27262,7 +27263,8 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
- return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
+ return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG,
+ /*UpperEltOpSrc=*/1);
}
case ROUNDP: {
assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
index 21bac9e7bb04d..b979f7531cd36 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
@@ -5521,6 +5521,7 @@ define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x fl
define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss_passthrough_zero_mask(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ss_passthrough_zero_mask:
; CHECK: # %bb.0:
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 -2, i32 4)
ret <4 x float> %res
@@ -5566,8 +5567,8 @@ define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x f
define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss_passthrough_zero_mask(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ss_passthrough_zero_mask:
; CHECK: # %bb.0:
-; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 -2, i32 4)
ret <4 x float> %res
@@ -5701,6 +5702,7 @@ define <2 x double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x
define <2 x double>@test_int_x86_avx512_mask_fixupimm_sd_passthrough_zero_mask(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_sd_passthrough_zero_mask:
; CHECK: # %bb.0:
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 -2, i32 4)
ret <2 x double> %res
@@ -5746,8 +5748,8 @@ define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x
define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd_passthrough_zero_mask(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_sd_passthrough_zero_mask:
; CHECK: # %bb.0:
-; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 -2, i32 4)
ret <2 x double> %res
More information about the llvm-branch-commits
mailing list