[llvm] [X86] Truncate i64 sub to i32 when upper 33 bits are zeros (PR #145850)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 21 08:36:08 PDT 2025
https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/145850
>From 506bc0cfe476bf055162e5f99b3d76eaa609c859 Mon Sep 17 00:00:00 2001
From: omkar-mohanty <franzohouser at gmail.com>
Date: Mon, 23 Jun 2025 18:17:50 +0530
Subject: [PATCH] [X86] Truncate i64 sub to i32 when upper 33 bits are zeros
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 20 ++++
.../test/CodeGen/X86/reduce-i64-sub-vector.ll | 98 +++++++++++++++++++
llvm/test/CodeGen/X86/reduce-i64-sub.ll | 96 ++++++++++++++++++
3 files changed, 214 insertions(+)
create mode 100644 llvm/test/CodeGen/X86/reduce-i64-sub-vector.ll
create mode 100644 llvm/test/CodeGen/X86/reduce-i64-sub.ll
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 347ba1262b66b..506eb095fe581 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -58064,8 +58064,28 @@ static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
EVT VT = N->getValueType(0);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
+ unsigned int Opcode = N->getOpcode();
SDLoc DL(N);
+ // Use a 32-bit sub+zext if upper 33 bits known zero.
+ if (VT == MVT::i64 && Subtarget.is64Bit()) {
+ APInt HiMask = APInt::getHighBitsSet(64, 33);
+ if (DAG.MaskedValueIsZero(Op0, HiMask) &&
+ DAG.MaskedValueIsZero(Op1, HiMask)) {
+ SDValue LHS = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op0);
+ SDValue RHS = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
+ bool NUW = Op0->getFlags().hasNoUnsignedWrap();
+ NUW = NUW & DAG.willNotOverflowAdd(false, LHS, RHS);
+ SDNodeFlags Flags;
+ Flags.setNoUnsignedWrap(NUW);
+ // Always true since in the worst case 0 - 2147483647 = -2147483647, still
+ // fits in i32
+ Flags.setNoSignedWrap(true);
+ SDValue Sub = DAG.getNode(Opcode, DL, MVT::i32, LHS, RHS, Flags);
+ return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Sub);
+ }
+ }
+
auto IsNonOpaqueConstant = [&](SDValue Op) {
return DAG.isConstantIntBuildVectorOrConstantInt(Op,
/*AllowOpaques*/ false);
diff --git a/llvm/test/CodeGen/X86/reduce-i64-sub-vector.ll b/llvm/test/CodeGen/X86/reduce-i64-sub-vector.ll
new file mode 100644
index 0000000000000..e19cf89c0a5ee
--- /dev/null
+++ b/llvm/test/CodeGen/X86/reduce-i64-sub-vector.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
+
+define <2 x i64> @test1(ptr%ptr) {
+; SSE-LABEL: test1:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: movzbl (%rdi), %eax
+; SSE-NEXT: movzbl %al, %ecx
+; SSE-NEXT: shrb %al
+; SSE-NEXT: movzbl %al, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: andl $1, %ecx
+; SSE-NEXT: negl %ecx
+; SSE-NEXT: movd %ecx, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: retq
+;
+; SSE2-LABEL: test1:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movzbl (%rdi), %eax
+; SSE2-NEXT: movzbl %al, %ecx
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: negl %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: negl %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: test1:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movzbl (%rdi), %eax
+; SSSE3-NEXT: movzbl %al, %ecx
+; SSSE3-NEXT: shrb %al
+; SSSE3-NEXT: movzbl %al, %eax
+; SSSE3-NEXT: negl %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: negl %ecx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: test1:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movzbl (%rdi), %eax
+; SSE41-NEXT: movzbl %al, %ecx
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: negl %eax
+; SSE41-NEXT: movd %eax, %xmm1
+; SSE41-NEXT: andl $1, %ecx
+; SSE41-NEXT: negl %ecx
+; SSE41-NEXT: movd %ecx, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: test1:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: movzbl (%rdi), %eax
+; AVX1-NEXT: movzbl %al, %ecx
+; AVX1-NEXT: shrb %al
+; AVX1-NEXT: movzbl %al, %eax
+; AVX1-NEXT: negl %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: negl %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test1:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: movzbl (%rdi), %eax
+; AVX2-NEXT: movzbl %al, %ecx
+; AVX2-NEXT: shrb %al
+; AVX2-NEXT: movzbl %al, %eax
+; AVX2-NEXT: negl %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: negl %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: retq
+entry:
+ %X = load <2 x i1>, ptr %ptr
+ %Y = sext <2 x i1> %X to <2 x i64>
+ ret <2 x i64> %Y
+}
+
diff --git a/llvm/test/CodeGen/X86/reduce-i64-sub.ll b/llvm/test/CodeGen/X86/reduce-i64-sub.ll
new file mode 100644
index 0000000000000..f57e57f161603
--- /dev/null
+++ b/llvm/test/CodeGen/X86/reduce-i64-sub.ll
@@ -0,0 +1,96 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -disable-cgp-branch-opts | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -disable-cgp-branch-opts | FileCheck %s --check-prefix=X64
+
+; Truncate to 32 bit subtraction since first 48 bits are zeros
+define i64 @test1(i16 %a, i16 %b) nounwind {
+; X86-LABEL: test1:
+; X86: # %bb.0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: sbbl %edx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test1:
+; X64: # %bb.0:
+; X64-NEXT: movzwl %si, %ecx
+; X64-NEXT: movzwl %di, %eax
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+ %zext_a = zext i16 %a to i64
+ %zext_b = zext i16 %b to i64
+ %sub = sub i64 %zext_a, %zext_b
+ ret i64 %sub
+}
+
+; Do not truncate to 32 bit subtraction if 32nd bit is set
+define i64 @test2(i16 %a, i16 %b) nounwind {
+; X86-LABEL: test2:
+; X86: # %bb.0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: movl $1, %edx
+; X86-NEXT: sbbl $0, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test2:
+; X64: # %bb.0:
+; X64-NEXT: movzwl %di, %ecx
+; X64-NEXT: movzwl %si, %edx
+; X64-NEXT: movabsq $4294967296, %rax # imm = 0x100000000
+; X64-NEXT: orq %rcx, %rax
+; X64-NEXT: subq %rdx, %rax
+; X64-NEXT: retq
+ %zext_a = zext i16 %a to i64
+ %zext_b = zext i16 %b to i64
+ %or_a = or i64 %zext_a, 4294967296
+ %sub = sub i64 %or_a, %zext_b
+ ret i64 %sub
+}
+
+; Do not truncate to 32 bit subtraction in case of sign extension
+define i64 @test3(i16 %a, i16 %b) nounwind {
+; X86-LABEL: test3:
+; X86: # %bb.0:
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: sarl $31, %edx
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: sbbl $0, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test3:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: movswq %di, %rax
+; X64-NEXT: movzwl %si, %ecx
+; X64-NEXT: subq %rcx, %rax
+; X64-NEXT: retq
+ %sext_a = sext i16 %a to i64
+ %zext_b = zext i16 %b to i64
+ %sub = sub i64 %sext_a, %zext_b
+ ret i64 %sub
+}
+
+define i64 @test4(i16 %x) nounwind {
+; X86-LABEL: test4:
+; X86: # %bb.0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: negl %eax
+; X86-NEXT: sbbl %edx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test4:
+; X64: # %bb.0:
+; X64-NEXT: movzwl %di, %eax
+; X64-NEXT: negl %eax
+; X64-NEXT: retq
+ %zext_x = zext i16 %x to i64
+ %sub = sub i64 0, %zext_x
+ ret i64 %sub
+}
More information about the llvm-commits
mailing list