[llvm] [X86][GlobalISel] Support G_FADD, G_FSUB, G_FMUL, G_FDIV (PR #87339)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 2 10:05:09 PDT 2024


================
@@ -0,0 +1,134 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefixes=AVX
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=AVX
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefixes=AVX512
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=AVX512
+
+define <4 x float> @test_fadd_v4s32(<4 x float> %arg1, <4 x float> %arg2) {
+; SSE-LABEL: test_fadd_v4s32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    addps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_fadd_v4s32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_fadd_v4s32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vaddps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %ret = fadd <4 x float> %arg1, %arg2
+  ret <4 x float> %ret
+}
+
+define <2 x double> @test_fadd_v2s64(<2 x double> %arg1, <2 x double> %arg2) {
+; SSE-LABEL: test_fadd_v2s64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    addpd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_fadd_v2s64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_fadd_v2s64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %ret = fadd <2 x double> %arg1, %arg2
+  ret <2 x double> %ret
+}
+
+define <8 x float> @test_fadd_v8s32(<8 x float> %arg1, <8 x float> %arg2) {
+; SSE-LABEL: test_fadd_v8s32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    addps %xmm2, %xmm0
+; SSE-NEXT:    addps %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_fadd_v8s32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_fadd_v8s32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %ret = fadd <8 x float> %arg1, %arg2
+  ret <8 x float> %ret
+}
+
+define <4 x double> @test_fadd_v4s64(<4 x double> %arg1, <4 x double> %arg2) {
+; SSE-LABEL: test_fadd_v4s64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    addpd %xmm2, %xmm0
+; SSE-NEXT:    addpd %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_fadd_v4s64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_fadd_v4s64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %ret = fadd <4 x double> %arg1, %arg2
+  ret <4 x double> %ret
+}
+
+define <16 x float> @test_fadd_v16s32(<16 x float> %arg1, <16 x float> %arg2) {
+; SSE-LABEL: test_fadd_v16s32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    addps %xmm4, %xmm0
+; SSE-NEXT:    addps %xmm5, %xmm1
+; SSE-NEXT:    addps %xmm6, %xmm2
+; SSE-NEXT:    addps %xmm7, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_fadd_v16s32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddps %ymm2, %ymm0, %ymm0
+; AVX-NEXT:    vaddps %ymm3, %ymm1, %ymm1
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_fadd_v16s32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vaddps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %ret = fadd <16 x float> %arg1, %arg2
+  ret <16 x float> %ret
+}
+
+define <8 x double> @test_fadd_v8s64(<8 x double> %arg1, <8 x double> %arg2) {
+; SSE-LABEL: test_fadd_v8s64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    addpd %xmm4, %xmm0
+; SSE-NEXT:    addpd %xmm5, %xmm1
+; SSE-NEXT:    addpd %xmm6, %xmm2
+; SSE-NEXT:    addpd %xmm7, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_fadd_v8s64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
+; AVX-NEXT:    vaddpd %ymm3, %ymm1, %ymm1
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_fadd_v8s64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %ret = fadd <8 x double> %arg1, %arg2
+  ret <8 x double> %ret
+}
----------------
RKSimon wrote:

Could we add a couple of oddvectors: maybe `<2 x float> and <3 x double>`?

https://github.com/llvm/llvm-project/pull/87339


More information about the llvm-commits mailing list