[llvm-branch-commits] [llvm] [X86] Remove extra MOV after widening atomic load (PR #138635)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri May 9 13:03:32 PDT 2025
https://github.com/jofrn updated https://github.com/llvm/llvm-project/pull/138635
>From e8dc4c235fb8d1f4ad8b6978b7dfe4fcc871f8dd Mon Sep 17 00:00:00 2001
From: jofernau_amdeng <joe.fernau at amd.com>
Date: Tue, 6 May 2025 01:48:11 -0400
Subject: [PATCH] [X86] Remove extra MOV after widening atomic load
This change adds patterns to optimize out an extra MOV
present after widening the atomic load.
commit-id:45989503
---
llvm/lib/Target/X86/X86InstrCompiler.td | 7 ++++
llvm/test/CodeGen/X86/atomic-load-store.ll | 43 ++++++++++++----------
llvm/test/CodeGen/X86/atomic-unordered.ll | 3 +-
3 files changed, 31 insertions(+), 22 deletions(-)
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index efa1e8bd7f3e3..d327ca6e99f87 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -1204,6 +1204,13 @@ def : Pat<(i16 (atomic_load_nonext_16 addr:$src)), (MOV16rm addr:$src)>;
def : Pat<(i32 (atomic_load_nonext_32 addr:$src)), (MOV32rm addr:$src)>;
def : Pat<(i64 (atomic_load_nonext_64 addr:$src)), (MOV64rm addr:$src)>;
+def : Pat<(v4i32 (scalar_to_vector (i32 (anyext (i16 (atomic_load_16 addr:$src)))))),
+ (MOVDI2PDIrm addr:$src)>; // load atomic <2 x i8>
+def : Pat<(v4i32 (scalar_to_vector (i32 (atomic_load_32 addr:$src)))),
+ (MOVDI2PDIrm addr:$src)>; // load atomic <2 x i16>
+def : Pat<(v2i64 (scalar_to_vector (i64 (atomic_load_64 addr:$src)))),
+ (MOV64toPQIrm addr:$src)>; // load atomic <2 x i32,float>
+
// Floating point loads/stores.
def : Pat<(atomic_store_32 (i32 (bitconvert (f32 FR32:$src))), addr:$dst),
(MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>;
diff --git a/llvm/test/CodeGen/X86/atomic-load-store.ll b/llvm/test/CodeGen/X86/atomic-load-store.ll
index 9ee8b4fc5ac7f..935d058a52f8f 100644
--- a/llvm/test/CodeGen/X86/atomic-load-store.ll
+++ b/llvm/test/CodeGen/X86/atomic-load-store.ll
@@ -149,8 +149,7 @@ define <1 x i64> @atomic_vec1_i64_align(ptr %x) nounwind {
define <2 x i8> @atomic_vec2_i8(ptr %x) {
; CHECK3-LABEL: atomic_vec2_i8:
; CHECK3: ## %bb.0:
-; CHECK3-NEXT: movzwl (%rdi), %eax
-; CHECK3-NEXT: movd %eax, %xmm0
+; CHECK3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK3-NEXT: retq
;
; CHECK0-LABEL: atomic_vec2_i8:
@@ -165,11 +164,15 @@ define <2 x i8> @atomic_vec2_i8(ptr %x) {
}
define <2 x i16> @atomic_vec2_i16(ptr %x) {
-; CHECK-LABEL: atomic_vec2_i16:
-; CHECK: ## %bb.0:
-; CHECK-NEXT: movl (%rdi), %eax
-; CHECK-NEXT: movd %eax, %xmm0
-; CHECK-NEXT: retq
+; CHECK3-LABEL: atomic_vec2_i16:
+; CHECK3: ## %bb.0:
+; CHECK3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK3-NEXT: retq
+;
+; CHECK0-LABEL: atomic_vec2_i16:
+; CHECK0: ## %bb.0:
+; CHECK0-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK0-NEXT: retq
%ret = load atomic <2 x i16>, ptr %x acquire, align 4
ret <2 x i16> %ret
}
@@ -177,8 +180,7 @@ define <2 x i16> @atomic_vec2_i16(ptr %x) {
define <2 x ptr addrspace(270)> @atomic_vec2_ptr270(ptr %x) {
; CHECK-LABEL: atomic_vec2_ptr270:
; CHECK: ## %bb.0:
-; CHECK-NEXT: movq (%rdi), %rax
-; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: movq (%rdi), %xmm0
; CHECK-NEXT: retq
%ret = load atomic <2 x ptr addrspace(270)>, ptr %x acquire, align 8
ret <2 x ptr addrspace(270)> %ret
@@ -187,8 +189,7 @@ define <2 x ptr addrspace(270)> @atomic_vec2_ptr270(ptr %x) {
define <2 x i32> @atomic_vec2_i32_align(ptr %x) {
; CHECK-LABEL: atomic_vec2_i32_align:
; CHECK: ## %bb.0:
-; CHECK-NEXT: movq (%rdi), %rax
-; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: movq (%rdi), %xmm0
; CHECK-NEXT: retq
%ret = load atomic <2 x i32>, ptr %x acquire, align 8
ret <2 x i32> %ret
@@ -197,8 +198,7 @@ define <2 x i32> @atomic_vec2_i32_align(ptr %x) {
define <2 x float> @atomic_vec2_float_align(ptr %x) {
; CHECK-LABEL: atomic_vec2_float_align:
; CHECK: ## %bb.0:
-; CHECK-NEXT: movq (%rdi), %rax
-; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: movq (%rdi), %xmm0
; CHECK-NEXT: retq
%ret = load atomic <2 x float>, ptr %x acquire, align 8
ret <2 x float> %ret
@@ -354,11 +354,15 @@ define <2 x i32> @atomic_vec2_i32(ptr %x) nounwind {
}
define <4 x i8> @atomic_vec4_i8(ptr %x) nounwind {
-; CHECK-LABEL: atomic_vec4_i8:
-; CHECK: ## %bb.0:
-; CHECK-NEXT: movl (%rdi), %eax
-; CHECK-NEXT: movd %eax, %xmm0
-; CHECK-NEXT: retq
+; CHECK3-LABEL: atomic_vec4_i8:
+; CHECK3: ## %bb.0:
+; CHECK3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK3-NEXT: retq
+;
+; CHECK0-LABEL: atomic_vec4_i8:
+; CHECK0: ## %bb.0:
+; CHECK0-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK0-NEXT: retq
%ret = load atomic <4 x i8>, ptr %x acquire, align 4
ret <4 x i8> %ret
}
@@ -366,8 +370,7 @@ define <4 x i8> @atomic_vec4_i8(ptr %x) nounwind {
define <4 x i16> @atomic_vec4_i16(ptr %x) nounwind {
; CHECK-LABEL: atomic_vec4_i16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: movq (%rdi), %rax
-; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: movq (%rdi), %xmm0
; CHECK-NEXT: retq
%ret = load atomic <4 x i16>, ptr %x acquire, align 8
ret <4 x i16> %ret
diff --git a/llvm/test/CodeGen/X86/atomic-unordered.ll b/llvm/test/CodeGen/X86/atomic-unordered.ll
index e8e0ee0b7ef49..637029b81d8b5 100644
--- a/llvm/test/CodeGen/X86/atomic-unordered.ll
+++ b/llvm/test/CodeGen/X86/atomic-unordered.ll
@@ -2275,8 +2275,7 @@ define i64 @load_i16_anyext_i64(ptr %ptr) {
;
; CHECK-O3-LABEL: load_i16_anyext_i64:
; CHECK-O3: # %bb.0:
-; CHECK-O3-NEXT: movzwl (%rdi), %eax
-; CHECK-O3-NEXT: vmovd %eax, %xmm0
+; CHECK-O3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-O3-NEXT: vmovq %xmm0, %rax
; CHECK-O3-NEXT: retq
%v = load atomic i16, ptr %ptr unordered, align 8
More information about the llvm-branch-commits
mailing list