[llvm] 2f853d8 - [X86] Regenerate VMOVSH assembly comments. NFC.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 17 07:11:16 PST 2025


Author: Simon Pilgrim
Date: 2025-01-17T15:10:52Z
New Revision: 2f853d851bb0eb4ba3d827909300839037d4b8fe

URL: https://github.com/llvm/llvm-project/commit/2f853d851bb0eb4ba3d827909300839037d4b8fe
DIFF: https://github.com/llvm/llvm-project/commit/2f853d851bb0eb4ba3d827909300839037d4b8fe.diff

LOG: [X86] Regenerate VMOVSH assembly comments. NFC.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/avx512fp16-arith.ll
    llvm/test/CodeGen/X86/avx512fp16-cvt.ll
    llvm/test/CodeGen/X86/avx512fp16-unsafe-fp-math.ll
    llvm/test/CodeGen/X86/cvt16-2.ll
    llvm/test/CodeGen/X86/fp-strict-scalar-fp16.ll
    llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll
    llvm/test/CodeGen/X86/half-darwin.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/avx512fp16-arith.ll b/llvm/test/CodeGen/X86/avx512fp16-arith.ll
index 8d811d8d29e065..9838c6c858bd69 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-arith.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-arith.ll
@@ -179,7 +179,7 @@ define half @add_sh(half %i, half %j, ptr %x.ptr) nounwind readnone {
 define half @sub_sh(half %i, half %j, ptr %x.ptr) nounwind readnone {
 ; CHECK-LABEL: sub_sh:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovsh (%rdi), %xmm2
+; CHECK-NEXT:    vmovsh {{.*#+}} xmm2 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; CHECK-NEXT:    vsubsh %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vsubsh %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    retq
@@ -216,7 +216,7 @@ define half @mul_sh(half %i, half %j, ptr %x.ptr) nounwind readnone {
 define half @div_sh(half %i, half %j, ptr %x.ptr) nounwind readnone {
 ; CHECK-LABEL: div_sh:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovsh (%rdi), %xmm2
+; CHECK-NEXT:    vmovsh {{.*#+}} xmm2 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; CHECK-NEXT:    vdivsh %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vdivsh %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/avx512fp16-cvt.ll b/llvm/test/CodeGen/X86/avx512fp16-cvt.ll
index e1e013528738a4..3040e58b37997f 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-cvt.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-cvt.ll
@@ -144,7 +144,7 @@ define float @f16tof32(half %b) nounwind {
 ; X86-LABEL: f16tof32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    pushl %eax
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; X86-NEXT:    vmovss %xmm0, (%esp)
 ; X86-NEXT:    flds (%esp)
@@ -166,7 +166,7 @@ define double @f16tof64(half %b) nounwind {
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
 ; X86-NEXT:    subl $8, %esp
-; X86-NEXT:    vmovsh 8(%ebp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvtsh2sd %xmm0, %xmm0, %xmm0
 ; X86-NEXT:    vmovsd %xmm0, (%esp)
 ; X86-NEXT:    fldl (%esp)
@@ -356,7 +356,7 @@ define <8 x half> @f64to8f16(<8 x double> %b) {
 define float @extload_f16_f32(ptr %x) {
 ; X64-LABEL: extload_f16_f32:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovsh (%rdi), %xmm0
+; X64-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X64-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
 ;
@@ -365,7 +365,7 @@ define float @extload_f16_f32(ptr %x) {
 ; X86-NEXT:    pushl %eax
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vmovsh (%eax), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; X86-NEXT:    vmovss %xmm0, (%esp)
 ; X86-NEXT:    flds (%esp)
@@ -380,7 +380,7 @@ define float @extload_f16_f32(ptr %x) {
 define double @extload_f16_f64(ptr %x) {
 ; X64-LABEL: extload_f16_f64:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovsh (%rdi), %xmm0
+; X64-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X64-NEXT:    vcvtsh2sd %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
 ;
@@ -394,7 +394,7 @@ define double @extload_f16_f64(ptr %x) {
 ; X86-NEXT:    andl $-8, %esp
 ; X86-NEXT:    subl $8, %esp
 ; X86-NEXT:    movl 8(%ebp), %eax
-; X86-NEXT:    vmovsh (%eax), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvtsh2sd %xmm0, %xmm0, %xmm0
 ; X86-NEXT:    vmovsd %xmm0, (%esp)
 ; X86-NEXT:    fldl (%esp)
@@ -777,7 +777,7 @@ define i64 @half_to_s64(half %x) {
 ;
 ; X86-LABEL: half_to_s64:
 ; X86:       # %bb.0:
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvttph2qq %xmm0, %xmm0
 ; X86-NEXT:    vmovd %xmm0, %eax
 ; X86-NEXT:    vpextrd $1, %xmm0, %edx
@@ -808,7 +808,7 @@ define i128 @half_to_s128(half %x) {
 ; X86-NEXT:    subl $48, %esp
 ; X86-NEXT:    .cfi_offset %esi, -12
 ; X86-NEXT:    movl 8(%ebp), %esi
-; X86-NEXT:    vmovsh 12(%ebp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vmovsh %xmm0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl %eax, (%esp)
@@ -880,7 +880,7 @@ define i64 @half_to_u64(half %x) {
 ;
 ; X86-LABEL: half_to_u64:
 ; X86:       # %bb.0:
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvttph2uqq %xmm0, %xmm0
 ; X86-NEXT:    vmovd %xmm0, %eax
 ; X86-NEXT:    vpextrd $1, %xmm0, %edx
@@ -911,7 +911,7 @@ define i128 @half_to_u128(half %x) {
 ; X86-NEXT:    subl $48, %esp
 ; X86-NEXT:    .cfi_offset %esi, -12
 ; X86-NEXT:    movl 8(%ebp), %esi
-; X86-NEXT:    vmovsh 12(%ebp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vmovsh %xmm0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl %eax, (%esp)
@@ -940,7 +940,7 @@ define x86_fp80 @half_to_f80(half %x) nounwind {
 ; X86-LABEL: half_to_f80:
 ; X86:       # %bb.0:
 ; X86-NEXT:    pushl %eax
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vmovsh %xmm0, (%esp)
 ; X86-NEXT:    calll __extendhfxf2
 ; X86-NEXT:    popl %eax
@@ -990,7 +990,7 @@ define fp128 @half_to_f128(half %x) nounwind {
 ; X86-NEXT:    andl $-16, %esp
 ; X86-NEXT:    subl $48, %esp
 ; X86-NEXT:    movl 8(%ebp), %esi
-; X86-NEXT:    vmovsh 12(%ebp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; X86-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax

diff  --git a/llvm/test/CodeGen/X86/avx512fp16-unsafe-fp-math.ll b/llvm/test/CodeGen/X86/avx512fp16-unsafe-fp-math.ll
index c9b45983e09a8b..5b92ce76d5736e 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-unsafe-fp-math.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-unsafe-fp-math.ll
@@ -112,7 +112,7 @@ define half @test_max_f16(half %a, ptr %ptr) {
 ;
 ; CHECK-LABEL: test_max_f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmovsh (%rdi), %xmm1
+; CHECK-NEXT:    vmovsh {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; CHECK-NEXT:    vmaxsh %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -130,7 +130,7 @@ define half @test_min_f16(half %a, ptr %ptr) {
 ;
 ; CHECK-LABEL: test_min_f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmovsh (%rdi), %xmm1
+; CHECK-NEXT:    vmovsh {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; CHECK-NEXT:    vminsh %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 entry:

diff  --git a/llvm/test/CodeGen/X86/cvt16-2.ll b/llvm/test/CodeGen/X86/cvt16-2.ll
index bab6768b163227..8dbbc57f105643 100644
--- a/llvm/test/CodeGen/X86/cvt16-2.ll
+++ b/llvm/test/CodeGen/X86/cvt16-2.ll
@@ -34,7 +34,7 @@ define float @test2(ptr nocapture %src) {
 ;
 ; FP16-LABEL: test2:
 ; FP16:       # %bb.0:
-; FP16-NEXT:    vmovsh (%rdi), %xmm0
+; FP16-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; FP16-NEXT:    retq
   %1 = load i16, ptr %src, align 2
@@ -77,7 +77,7 @@ define double @test4(ptr nocapture %src) {
 ;
 ; FP16-LABEL: test4:
 ; FP16:       # %bb.0:
-; FP16-NEXT:    vmovsh (%rdi), %xmm0
+; FP16-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; FP16-NEXT:    vcvtsh2sd %xmm0, %xmm0, %xmm0
 ; FP16-NEXT:    retq
   %1 = load i16, ptr %src, align 2
@@ -123,7 +123,7 @@ define x86_fp80 @test6(ptr nocapture %src) {
 ; FP16:       # %bb.0:
 ; FP16-NEXT:    pushq %rax
 ; FP16-NEXT:    .cfi_def_cfa_offset 16
-; FP16-NEXT:    vmovsh (%rdi), %xmm0
+; FP16-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; FP16-NEXT:    callq __extendhfxf2 at PLT
 ; FP16-NEXT:    popq %rax
 ; FP16-NEXT:    .cfi_def_cfa_offset 8

diff  --git a/llvm/test/CodeGen/X86/fp-strict-scalar-fp16.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-fp16.ll
index 3ecddd5279814f..bf93c8a1f5b511 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-fp16.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-fp16.ll
@@ -52,7 +52,7 @@ define half @fadd_f16(half %a, half %b) nounwind strictfp {
 ;
 ; X86-LABEL: fadd_f16:
 ; X86:       # %bb.0:
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vaddsh {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; X86-NEXT:    retl
 ;
@@ -102,7 +102,7 @@ define half @fsub_f16(half %a, half %b) nounwind strictfp {
 ;
 ; X86-LABEL: fsub_f16:
 ; X86:       # %bb.0:
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vsubsh {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; X86-NEXT:    retl
 ;
@@ -152,7 +152,7 @@ define half @fmul_f16(half %a, half %b) nounwind strictfp {
 ;
 ; X86-LABEL: fmul_f16:
 ; X86:       # %bb.0:
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vmulsh {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; X86-NEXT:    retl
 ;
@@ -202,7 +202,7 @@ define half @fdiv_f16(half %a, half %b) nounwind strictfp {
 ;
 ; X86-LABEL: fdiv_f16:
 ; X86:       # %bb.0:
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vdivsh {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; X86-NEXT:    retl
 ;
@@ -239,14 +239,14 @@ define void @fpext_f16_to_f32(ptr %val, ptr %ret) nounwind strictfp {
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    vmovsh (%ecx), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; X86-NEXT:    vmovss %xmm0, (%eax)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fpext_f16_to_f32:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovsh (%rdi), %xmm0
+; X64-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X64-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    vmovss %xmm0, (%rsi)
 ; X64-NEXT:    retq
@@ -282,14 +282,14 @@ define void @fpext_f16_to_f64(ptr %val, ptr %ret) nounwind strictfp {
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    vmovsh (%ecx), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvtsh2sd %xmm0, %xmm0, %xmm0
 ; X86-NEXT:    vmovsd %xmm0, (%eax)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fpext_f16_to_f64:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovsh (%rdi), %xmm0
+; X64-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X64-NEXT:    vcvtsh2sd %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    vmovsd %xmm0, (%rsi)
 ; X64-NEXT:    retq
@@ -418,14 +418,14 @@ define void @fsqrt_f16(ptr %a) nounwind strictfp {
 ; X86-LABEL: fsqrt_f16:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vmovsh (%eax), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vsqrtsh %xmm0, %xmm0, %xmm0
 ; X86-NEXT:    vmovsh %xmm0, (%eax)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fsqrt_f16:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovsh (%rdi), %xmm0
+; X64-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X64-NEXT:    vsqrtsh %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    vmovsh %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -510,8 +510,8 @@ define half @fma_f16(half %a, half %b, half %c) nounwind strictfp {
 ;
 ; X86-LABEL: fma_f16:
 ; X86:       # %bb.0:
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm1
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vfmadd213sh {{[0-9]+}}(%esp), %xmm1, %xmm0
 ; X86-NEXT:    retl
 ;

diff  --git a/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll
index 3b9798a2af5820..6fe5dcd2929305 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll
@@ -273,7 +273,7 @@ define half @fround16(half %f) #0 {
 ; X86-LABEL: fround16:
 ; X86:       # %bb.0:
 ; X86-NEXT:    subl $8, %esp
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; X86-NEXT:    vmovss %xmm0, (%esp)
 ; X86-NEXT:    calll roundf

diff  --git a/llvm/test/CodeGen/X86/half-darwin.ll b/llvm/test/CodeGen/X86/half-darwin.ll
index ec099db4e7ca7f..7388429143df56 100644
--- a/llvm/test/CodeGen/X86/half-darwin.ll
+++ b/llvm/test/CodeGen/X86/half-darwin.ll
@@ -82,7 +82,7 @@ define float @extendhfsf(ptr %ptr) nounwind {
 ;
 ; CHECK-FP16-LABEL: extendhfsf:
 ; CHECK-FP16:       ## %bb.0:
-; CHECK-FP16-NEXT:    vmovsh (%rdi), %xmm0
+; CHECK-FP16-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; CHECK-FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; CHECK-FP16-NEXT:    retq
 
@@ -174,7 +174,7 @@ define float @strict_extendhfsf(ptr %ptr) nounwind strictfp {
 ;
 ; CHECK-FP16-LABEL: strict_extendhfsf:
 ; CHECK-FP16:       ## %bb.0:
-; CHECK-FP16-NEXT:    vmovsh (%rdi), %xmm0
+; CHECK-FP16-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; CHECK-FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; CHECK-FP16-NEXT:    retq
 


        


More information about the llvm-commits mailing list