[llvm] [SelectionDAG] Use unaligned store/load to move AVX registers onto stack for `insertelement` (PR #82130)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 8 07:46:39 PDT 2024
================
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+
+define <8 x i32> @foo(<8 x i32> %arg1, i32 %n) #0 {
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: andl $7, %edi
+; CHECK-NEXT: movl $42, -40(%rsp,%rdi,4)
+; CHECK-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0
+; CHECK-NEXT: retq
+entry:
+ %a = insertelement <8 x i32> %arg1, i32 42, i32 %n
+ ret <8 x i32> %a
+}
+
+define <8 x i32> @foo2(<8 x i32> %arg1, i32 %n) alignstack(8) #0 {
+; CHECK-LABEL: foo2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: andl $7, %edi
+; CHECK-NEXT: movl $42, -32(%rsp,%rdi,4)
+; CHECK-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0
+; CHECK-NEXT: retq
+entry:
+ %a = insertelement <8 x i32> %arg1, i32 42, i32 %n
+ ret <8 x i32> %a
+}
+
+define <8 x i32> @foo3(<8 x i32> %arg1, i32 %n) alignstack(16) #0 {
+; CHECK-LABEL: foo3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: andl $7, %edi
+; CHECK-NEXT: movl $42, -40(%rsp,%rdi,4)
+; CHECK-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0
+; CHECK-NEXT: retq
+entry:
+ %a = insertelement <8 x i32> %arg1, i32 42, i32 %n
+ ret <8 x i32> %a
+}
+
+define <8 x i32> @foo4(<8 x i32> %arg1, i32 %n) alignstack(64) #0 {
+; CHECK-LABEL: foo4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmovaps %ymm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: andl $7, %edi
+; CHECK-NEXT: movl $42, -56(%rsp,%rdi,4)
+; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %ymm0
+; CHECK-NEXT: retq
+entry:
+ %a = insertelement <8 x i32> %arg1, i32 42, i32 %n
+ ret <8 x i32> %a
+}
+
+define <8 x i32> @foo5(<8 x i32> %arg1, i32 %n) alignstack(256) #0 {
+; CHECK-LABEL: foo5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: subq $120, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 128
+; CHECK-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: andl $7, %edi
+; CHECK-NEXT: movl $42, 64(%rsp,%rdi,4)
+; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm0
+; CHECK-NEXT: addq $120, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+entry:
+ %a = insertelement <8 x i32> %arg1, i32 42, i32 %n
+ ret <8 x i32> %a
+}
+
+define <8 x i16> @foo6(<8 x i16> %arg1, i32 %n) #0 {
+; CHECK-LABEL: foo6:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: andl $7, %edi
+; CHECK-NEXT: movw $42, -24(%rsp,%rdi,2)
+; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; CHECK-NEXT: retq
+entry:
+ %a = insertelement <8 x i16> %arg1, i16 42, i32 %n
+ ret <8 x i16> %a
+}
+
+define <8 x i8> @foo7(<8 x i8> %arg1, i32 %n) #0 {
+; CHECK-LABEL: foo7:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: andl $15, %edi
+; CHECK-NEXT: movb $42, -24(%rsp,%rdi)
+; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; CHECK-NEXT: retq
+entry:
+ %a = insertelement <8 x i8> %arg1, i8 42, i32 %n
+ ret <8 x i8> %a
+}
+
+define <8 x i64> @foo8(<8 x i64> %arg1, i32 %n) #0 {
+; CHECK-LABEL: foo8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: andl $7, %edi
+; CHECK-NEXT: movq $42, -72(%rsp,%rdi,8)
+; CHECK-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0
+; CHECK-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm1
+; CHECK-NEXT: retq
+entry:
+ %a = insertelement <8 x i64> %arg1, i64 42, i32 %n
+ ret <8 x i64> %a
+}
+
+attributes #0 = { "no-realign-stack" "target-cpu"="haswell" }
----------------
RKSimon wrote:
replace target-cpu with -mattr=+avx in the RUN line
https://github.com/llvm/llvm-project/pull/82130
More information about the llvm-commits
mailing list