[llvm] ed92ac7 - Add missing encoding comments from fma4 folded intrinsics tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Feb 8 03:30:19 PST 2020
Author: Simon Pilgrim
Date: 2020-02-08T11:24:22Z
New Revision: ed92ac73aff55211b0b67537fc36e663815cc9f7
URL: https://github.com/llvm/llvm-project/commit/ed92ac73aff55211b0b67537fc36e663815cc9f7
DIFF: https://github.com/llvm/llvm-project/commit/ed92ac73aff55211b0b67537fc36e663815cc9f7.diff
LOG: Add missing encoding comments from fma4 folded intrinsics tests
Added:
Modified:
llvm/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll b/llvm/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
index 236f3ff19dac..7947421205ac 100644
--- a/llvm/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
+++ b/llvm/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
@@ -1,13 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx -mattr=+fma4 | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=bdver2 -mattr=+avx,-fma | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx -mattr=+fma4 -show-mc-encoding | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=bdver2 -mattr=+avx,-fma -show-mc-encoding | FileCheck %s
; VFMADD
define < 4 x float > @test_x86_fma4_vfmadd_ss_load(< 4 x float > %a0, < 4 x float > %a1, float* %a2) {
; CHECK-LABEL: test_x86_fma4_vfmadd_ss_load:
; CHECK: # %bb.0:
-; CHECK-NEXT: vfmaddss (%rdi), %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: vfmaddss (%rdi), %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6a,0x07,0x10]
+; CHECK-NEXT: retq # encoding: [0xc3]
%x = load float , float *%a2
%y = insertelement <4 x float> undef, float %x, i32 0
%res = call < 4 x float > @llvm.x86.fma4.vfmadd.ss(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %y)
@@ -16,8 +16,8 @@ define < 4 x float > @test_x86_fma4_vfmadd_ss_load(< 4 x float > %a0, < 4 x floa
define < 4 x float > @test_x86_fma4_vfmadd_ss_load2(< 4 x float > %a0, float* %a1, < 4 x float > %a2) {
; CHECK-LABEL: test_x86_fma4_vfmadd_ss_load2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vfmaddss %xmm1, (%rdi), %xmm0, %xmm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: vfmaddss %xmm1, (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x6a,0x07,0x10]
+; CHECK-NEXT: retq # encoding: [0xc3]
%x = load float , float *%a1
%y = insertelement <4 x float> undef, float %x, i32 0
%res = call < 4 x float > @llvm.x86.fma4.vfmadd.ss(< 4 x float > %a0, < 4 x float > %y, < 4 x float > %a2)
@@ -29,8 +29,8 @@ declare < 4 x float > @llvm.x86.fma4.vfmadd.ss(< 4 x float >, < 4 x float >, < 4
define < 2 x double > @test_x86_fma4_vfmadd_sd_load(< 2 x double > %a0, < 2 x double > %a1, double* %a2) {
; CHECK-LABEL: test_x86_fma4_vfmadd_sd_load:
; CHECK: # %bb.0:
-; CHECK-NEXT: vfmaddsd (%rdi), %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: vfmaddsd (%rdi), %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6b,0x07,0x10]
+; CHECK-NEXT: retq # encoding: [0xc3]
%x = load double , double *%a2
%y = insertelement <2 x double> undef, double %x, i32 0
%res = call < 2 x double > @llvm.x86.fma4.vfmadd.sd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %y)
@@ -39,8 +39,8 @@ define < 2 x double > @test_x86_fma4_vfmadd_sd_load(< 2 x double > %a0, < 2 x do
define < 2 x double > @test_x86_fma4_vfmadd_sd_load2(< 2 x double > %a0, double* %a1, < 2 x double > %a2) {
; CHECK-LABEL: test_x86_fma4_vfmadd_sd_load2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vfmaddsd %xmm1, (%rdi), %xmm0, %xmm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: vfmaddsd %xmm1, (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x6b,0x07,0x10]
+; CHECK-NEXT: retq # encoding: [0xc3]
%x = load double , double *%a1
%y = insertelement <2 x double> undef, double %x, i32 0
%res = call < 2 x double > @llvm.x86.fma4.vfmadd.sd(< 2 x double > %a0, < 2 x double > %y, < 2 x double > %a2)
@@ -50,8 +50,8 @@ declare < 2 x double > @llvm.x86.fma4.vfmadd.sd(< 2 x double >, < 2 x double >,
define < 4 x float > @test_x86_fma_vfmadd_ps_load(< 4 x float > %a0, < 4 x float > %a1, < 4 x float >* %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_ps_load:
; CHECK: # %bb.0:
-; CHECK-NEXT: vfmaddps (%rdi), %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: vfmaddps (%rdi), %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x68,0x07,0x10]
+; CHECK-NEXT: retq # encoding: [0xc3]
%x = load <4 x float>, <4 x float>* %a2
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %x)
ret < 4 x float > %res
@@ -59,8 +59,8 @@ define < 4 x float > @test_x86_fma_vfmadd_ps_load(< 4 x float > %a0, < 4 x float
define < 4 x float > @test_x86_fma_vfmadd_ps_load2(< 4 x float > %a0, < 4 x float >* %a1, < 4 x float > %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_ps_load2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vfmaddps %xmm1, (%rdi), %xmm0, %xmm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: vfmaddps %xmm1, (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x68,0x07,0x10]
+; CHECK-NEXT: retq # encoding: [0xc3]
%x = load <4 x float>, <4 x float>* %a1
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float > %a0, < 4 x float > %x, < 4 x float > %a2)
ret < 4 x float > %res
@@ -71,9 +71,9 @@ declare < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float >, < 4 x float >, < 4
define < 4 x float > @test_x86_fma_vfmadd_ps_load3(< 4 x float >* %a0, < 4 x float >* %a1, < 4 x float > %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_ps_load3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps (%rdi), %xmm1
-; CHECK-NEXT: vfmaddps %xmm0, (%rsi), %xmm1, %xmm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: vmovaps (%rdi), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0f]
+; CHECK-NEXT: vfmaddps %xmm0, (%rsi), %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x68,0x06,0x00]
+; CHECK-NEXT: retq # encoding: [0xc3]
%x = load <4 x float>, <4 x float>* %a0
%y = load <4 x float>, <4 x float>* %a1
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float > %x, < 4 x float > %y, < 4 x float > %a2)
@@ -83,8 +83,8 @@ define < 4 x float > @test_x86_fma_vfmadd_ps_load3(< 4 x float >* %a0, < 4 x flo
define < 2 x double > @test_x86_fma_vfmadd_pd_load(< 2 x double > %a0, < 2 x double > %a1, < 2 x double >* %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_pd_load:
; CHECK: # %bb.0:
-; CHECK-NEXT: vfmaddpd (%rdi), %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: vfmaddpd (%rdi), %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x69,0x07,0x10]
+; CHECK-NEXT: retq # encoding: [0xc3]
%x = load <2 x double>, <2 x double>* %a2
%res = call < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %x)
ret < 2 x double > %res
@@ -92,8 +92,8 @@ define < 2 x double > @test_x86_fma_vfmadd_pd_load(< 2 x double > %a0, < 2 x dou
define < 2 x double > @test_x86_fma_vfmadd_pd_load2(< 2 x double > %a0, < 2 x double >* %a1, < 2 x double > %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_pd_load2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vfmaddpd %xmm1, (%rdi), %xmm0, %xmm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: vfmaddpd %xmm1, (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x69,0x07,0x10]
+; CHECK-NEXT: retq # encoding: [0xc3]
%x = load <2 x double>, <2 x double>* %a1
%res = call < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double > %a0, < 2 x double > %x, < 2 x double > %a2)
ret < 2 x double > %res
@@ -104,9 +104,9 @@ declare < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double >, < 2 x double >, <
define < 2 x double > @test_x86_fma_vfmadd_pd_load3(< 2 x double >* %a0, < 2 x double >* %a1, < 2 x double > %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_pd_load3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovapd (%rdi), %xmm1
-; CHECK-NEXT: vfmaddpd %xmm0, (%rsi), %xmm1, %xmm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: vmovapd (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0f]
+; CHECK-NEXT: vfmaddpd %xmm0, (%rsi), %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x69,0x06,0x00]
+; CHECK-NEXT: retq # encoding: [0xc3]
%x = load <2 x double>, <2 x double>* %a0
%y = load <2 x double>, <2 x double>* %a1
%res = call < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double > %x, < 2 x double > %y, < 2 x double > %a2)
More information about the llvm-commits
mailing list