[llvm] 28064bf - [AArch64][GlobalISel] Update and cleanup a number of gisel tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Sat Nov 23 10:59:01 PST 2024


Author: David Green
Date: 2024-11-23T18:58:55Z
New Revision: 28064bfad12cfce959d74fa6d099312e19703f26

URL: https://github.com/llvm/llvm-project/commit/28064bfad12cfce959d74fa6d099312e19703f26
DIFF: https://github.com/llvm/llvm-project/commit/28064bfad12cfce959d74fa6d099312e19703f26.diff

LOG: [AArch64][GlobalISel] Update and cleanup a number of gisel tests. NFC

Mostly removing unnecessary -global-isel-abort=2 or adding fallback messages

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/abs.ll
    llvm/test/CodeGen/AArch64/arm64-clrsb.ll
    llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
    llvm/test/CodeGen/AArch64/arm64-vclz.ll
    llvm/test/CodeGen/AArch64/concat-vector.ll
    llvm/test/CodeGen/AArch64/extract-subvec-combine.ll
    llvm/test/CodeGen/AArch64/extract-vector-elt-sve.ll
    llvm/test/CodeGen/AArch64/fcvt-fixed.ll
    llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
    llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
    llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
    llvm/test/CodeGen/AArch64/fp-intrinsics.ll
    llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll
    llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll
    llvm/test/CodeGen/AArch64/funnel-shift.ll
    llvm/test/CodeGen/AArch64/itofp-bf16.ll
    llvm/test/CodeGen/AArch64/mingw-refptr.ll
    llvm/test/CodeGen/AArch64/mulcmle.ll
    llvm/test/CodeGen/AArch64/overflow.ll
    llvm/test/CodeGen/AArch64/phi.ll
    llvm/test/CodeGen/AArch64/sadd_sat.ll
    llvm/test/CodeGen/AArch64/sadd_sat_plus.ll
    llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
    llvm/test/CodeGen/AArch64/sext.ll
    llvm/test/CodeGen/AArch64/ssub_sat.ll
    llvm/test/CodeGen/AArch64/ssub_sat_plus.ll
    llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
    llvm/test/CodeGen/AArch64/uadd_sat.ll
    llvm/test/CodeGen/AArch64/uadd_sat_plus.ll
    llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
    llvm/test/CodeGen/AArch64/usub_sat.ll
    llvm/test/CodeGen/AArch64/usub_sat_plus.ll
    llvm/test/CodeGen/AArch64/usub_sat_vec.ll
    llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/abs.ll b/llvm/test/CodeGen/AArch64/abs.ll
index 25a14ef9a49ee8..d501d9ed24547a 100644
--- a/llvm/test/CodeGen/AArch64/abs.ll
+++ b/llvm/test/CodeGen/AArch64/abs.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 ; ===== Legal Scalars =====
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-clrsb.ll b/llvm/test/CodeGen/AArch64/arm64-clrsb.ll
index 412c2b00a5ac09..9c54238c68e2c6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-clrsb.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-clrsb.ll
@@ -1,78 +1,68 @@
-; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 |  FileCheck %s
-; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 -O0 -pass-remarks-missed=gisel* -global-isel-abort=2 |  FileCheck %s --check-prefixes=GISEL,FALLBACK
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 |  FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 -global-isel |  FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-; Function Attrs: nounwind readnone
 declare i32 @llvm.ctlz.i32(i32, i1) #0
 declare i64 @llvm.ctlz.i64(i64, i1) #1
 
-; Function Attrs: nounwind ssp
-; FALLBACK-NOT: remark{{.*}}clrsb32
 define i32 @clrsb32(i32 %x) #2 {
+; CHECK-LABEL: clrsb32:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    cls w0, w0
+; CHECK-NEXT:    ret
 entry:
   %shr = ashr i32 %x, 31
   %xor = xor i32 %shr, %x
   %mul = shl i32 %xor, 1
   %add = or i32 %mul, 1
   %0 = tail call i32 @llvm.ctlz.i32(i32 %add, i1 false)
-
   ret i32 %0
-; CHECK-LABEL: clrsb32
-; CHECK:   cls [[TEMP:w[0-9]+]], [[TEMP]]
-
-; GISEL-LABEL: clrsb32
-; GISEL: cls [[TEMP:w[0-9]+]], [[TEMP]]
 }
 
-; Function Attrs: nounwind ssp
-; FALLBACK-NOT: remark{{.*}}clrsb64
 define i64 @clrsb64(i64 %x) #3 {
+; CHECK-LABEL: clrsb64:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    cls x0, x0
+; CHECK-NEXT:    ret
 entry:
   %shr = ashr i64 %x, 63
   %xor = xor i64 %shr, %x
   %mul = shl nsw i64 %xor, 1
   %add = or i64 %mul, 1
   %0 = tail call i64 @llvm.ctlz.i64(i64 %add, i1 false)
-
   ret i64 %0
-; CHECK-LABEL: clrsb64
-; CHECK:   cls [[TEMP:x[0-9]+]], [[TEMP]]
-; GISEL-LABEL: clrsb64
-; GISEL:   cls [[TEMP:x[0-9]+]], [[TEMP]]
 }
 
-; Function Attrs: nounwind ssp
-; FALLBACK-NOT: remark{{.*}}clrsb32_zeroundef
 define i32 @clrsb32_zeroundef(i32 %x) #2 {
+; CHECK-LABEL: clrsb32_zeroundef:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    cls w0, w0
+; CHECK-NEXT:    ret
 entry:
   %shr = ashr i32 %x, 31
   %xor = xor i32 %shr, %x
   %mul = shl i32 %xor, 1
   %add = or i32 %mul, 1
   %0 = tail call i32 @llvm.ctlz.i32(i32 %add, i1 true)
-
   ret i32 %0
-; CHECK-LABEL: clrsb32_zeroundef
-; CHECK:   cls [[TEMP:w[0-9]+]], [[TEMP]]
-
-; GISEL-LABEL: clrsb32_zeroundef
-; GISEL: cls [[TEMP:w[0-9]+]], [[TEMP]]
 }
 
-; Function Attrs: nounwind ssp
-; FALLBACK-NOT: remark{{.*}}clrsb64
 define i64 @clrsb64_zeroundef(i64 %x) #3 {
+; CHECK-LABEL: clrsb64_zeroundef:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    cls x0, x0
+; CHECK-NEXT:    ret
 entry:
   %shr = ashr i64 %x, 63
   %xor = xor i64 %shr, %x
   %mul = shl nsw i64 %xor, 1
   %add = or i64 %mul, 1
   %0 = tail call i64 @llvm.ctlz.i64(i64 %add, i1 true)
-
   ret i64 %0
-; CHECK-LABEL: clrsb64_zeroundef
-; CHECK:   cls [[TEMP:x[0-9]+]], [[TEMP]]
-; GISEL-LABEL: clrsb64_zeroundef
-; GISEL:   cls [[TEMP:x[0-9]+]], [[TEMP]]
 }
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-SD: {{.*}}

diff  --git a/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll b/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
index 475affa358bd15..0e1e15f9b6b912 100644
--- a/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
@@ -1,12 +1,22 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 define void @testLeftGood8x8(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftGood8x8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sli.8b v0, v1, #3
-; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftGood8x8:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sli.8b v0, v1, #3
+; CHECK-SD-NEXT:    str d0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftGood8x8:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi.8b v2, #7
+; CHECK-GI-NEXT:    shl.8b v1, v1, #3
+; CHECK-GI-NEXT:    and.8b v0, v0, v2
+; CHECK-GI-NEXT:    orr.8b v0, v0, v1
+; CHECK-GI-NEXT:    str d0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <8 x i8> %src1, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
   %vshl_n = shl <8 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   %result = or <8 x i8> %and.i, %vshl_n
@@ -15,14 +25,23 @@ define void @testLeftGood8x8(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind
 }
 
 define void @testLeftBad8x8(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftBad8x8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi.8b v2, #165
-; CHECK-NEXT:    add.8b v1, v1, v1
-; CHECK-NEXT:    and.8b v0, v0, v2
-; CHECK-NEXT:    orr.8b v0, v0, v1
-; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftBad8x8:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    movi.8b v2, #165
+; CHECK-SD-NEXT:    add.8b v1, v1, v1
+; CHECK-SD-NEXT:    and.8b v0, v0, v2
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    str d0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftBad8x8:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi.8b v2, #165
+; CHECK-GI-NEXT:    shl.8b v1, v1, #1
+; CHECK-GI-NEXT:    and.8b v0, v0, v2
+; CHECK-GI-NEXT:    orr.8b v0, v0, v1
+; CHECK-GI-NEXT:    str d0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <8 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165>
   %vshl_n = shl <8 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %result = or <8 x i8> %and.i, %vshl_n
@@ -31,11 +50,20 @@ define void @testLeftBad8x8(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind
 }
 
 define void @testRightGood8x8(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightGood8x8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sri.8b v0, v1, #3
-; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightGood8x8:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sri.8b v0, v1, #3
+; CHECK-SD-NEXT:    str d0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightGood8x8:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi.8b v2, #224
+; CHECK-GI-NEXT:    ushr.8b v1, v1, #3
+; CHECK-GI-NEXT:    and.8b v0, v0, v2
+; CHECK-GI-NEXT:    orr.8b v0, v0, v1
+; CHECK-GI-NEXT:    str d0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <8 x i8> %src1, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
   %vshl_n = lshr <8 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   %result = or <8 x i8> %and.i, %vshl_n
@@ -60,11 +88,20 @@ define void @testRightBad8x8(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind
 }
 
 define void @testLeftGood16x8(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftGood16x8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sli.16b v0, v1, #3
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftGood16x8:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sli.16b v0, v1, #3
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftGood16x8:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi.16b v2, #7
+; CHECK-GI-NEXT:    shl.16b v1, v1, #3
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <16 x i8> %src1, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
   %vshl_n = shl <16 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   %result = or <16 x i8> %and.i, %vshl_n
@@ -73,14 +110,23 @@ define void @testLeftGood16x8(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounw
 }
 
 define void @testLeftBad16x8(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftBad16x8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi.16b v2, #165
-; CHECK-NEXT:    add.16b v1, v1, v1
-; CHECK-NEXT:    and.16b v0, v0, v2
-; CHECK-NEXT:    orr.16b v0, v0, v1
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftBad16x8:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    movi.16b v2, #165
+; CHECK-SD-NEXT:    add.16b v1, v1, v1
+; CHECK-SD-NEXT:    and.16b v0, v0, v2
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftBad16x8:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi.16b v2, #165
+; CHECK-GI-NEXT:    shl.16b v1, v1, #1
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <16 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165>
   %vshl_n = shl <16 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %result = or <16 x i8> %and.i, %vshl_n
@@ -89,11 +135,20 @@ define void @testLeftBad16x8(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwi
 }
 
 define void @testRightGood16x8(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightGood16x8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sri.16b v0, v1, #3
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightGood16x8:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sri.16b v0, v1, #3
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightGood16x8:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi.16b v2, #224
+; CHECK-GI-NEXT:    ushr.16b v1, v1, #3
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <16 x i8> %src1, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
   %vshl_n = lshr <16 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   %result = or <16 x i8> %and.i, %vshl_n
@@ -118,11 +173,20 @@ define void @testRightBad16x8(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounw
 }
 
 define void @testLeftGood4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftGood4x16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sli.4h v0, v1, #14
-; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftGood4x16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sli.4h v0, v1, #14
+; CHECK-SD-NEXT:    str d0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftGood4x16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mvni.4h v2, #192, lsl #8
+; CHECK-GI-NEXT:    shl.4h v1, v1, #14
+; CHECK-GI-NEXT:    and.8b v0, v0, v2
+; CHECK-GI-NEXT:    orr.8b v0, v0, v1
+; CHECK-GI-NEXT:    str d0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <4 x i16> %src1, <i16 16383, i16 16383, i16 16383, i16 16383>
   %vshl_n = shl <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14>
   %result = or <4 x i16> %and.i, %vshl_n
@@ -131,15 +195,25 @@ define void @testLeftGood4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounw
 }
 
 define void @testLeftBad4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftBad4x16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #16500
-; CHECK-NEXT:    shl.4h v1, v1, #14
-; CHECK-NEXT:    dup.4h v2, w8
-; CHECK-NEXT:    and.8b v0, v0, v2
-; CHECK-NEXT:    orr.8b v0, v0, v1
-; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftBad4x16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #16500 // =0x4074
+; CHECK-SD-NEXT:    shl.4h v1, v1, #14
+; CHECK-SD-NEXT:    dup.4h v2, w8
+; CHECK-SD-NEXT:    and.8b v0, v0, v2
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    str d0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftBad4x16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI9_0
+; CHECK-GI-NEXT:    shl.4h v1, v1, #14
+; CHECK-GI-NEXT:    ldr d2, [x8, :lo12:.LCPI9_0]
+; CHECK-GI-NEXT:    and.8b v0, v0, v2
+; CHECK-GI-NEXT:    orr.8b v0, v0, v1
+; CHECK-GI-NEXT:    str d0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <4 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500>
   %vshl_n = shl <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14>
   %result = or <4 x i16> %and.i, %vshl_n
@@ -148,11 +222,20 @@ define void @testLeftBad4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwi
 }
 
 define void @testRightGood4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightGood4x16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sri.4h v0, v1, #14
-; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightGood4x16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sri.4h v0, v1, #14
+; CHECK-SD-NEXT:    str d0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightGood4x16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mvni.4h v2, #3
+; CHECK-GI-NEXT:    ushr.4h v1, v1, #14
+; CHECK-GI-NEXT:    and.8b v0, v0, v2
+; CHECK-GI-NEXT:    orr.8b v0, v0, v1
+; CHECK-GI-NEXT:    str d0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <4 x i16> %src1, <i16 65532, i16 65532, i16 65532, i16 65532>
   %vshl_n = lshr <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14>
   %result = or <4 x i16> %and.i, %vshl_n
@@ -161,14 +244,24 @@ define void @testRightGood4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) noun
 }
 
 define void @testRightBad4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightBad4x16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #16500
-; CHECK-NEXT:    dup.4h v2, w8
-; CHECK-NEXT:    and.8b v0, v0, v2
-; CHECK-NEXT:    usra.4h v0, v1, #14
-; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightBad4x16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #16500 // =0x4074
+; CHECK-SD-NEXT:    dup.4h v2, w8
+; CHECK-SD-NEXT:    and.8b v0, v0, v2
+; CHECK-SD-NEXT:    usra.4h v0, v1, #14
+; CHECK-SD-NEXT:    str d0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightBad4x16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI11_0
+; CHECK-GI-NEXT:    ushr.4h v1, v1, #14
+; CHECK-GI-NEXT:    ldr d2, [x8, :lo12:.LCPI11_0]
+; CHECK-GI-NEXT:    and.8b v0, v0, v2
+; CHECK-GI-NEXT:    orr.8b v0, v0, v1
+; CHECK-GI-NEXT:    str d0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <4 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500>
   %vshl_n = lshr <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14>
   %result = or <4 x i16> %and.i, %vshl_n
@@ -177,11 +270,20 @@ define void @testRightBad4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounw
 }
 
 define void @testLeftGood8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftGood8x16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sli.8h v0, v1, #14
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftGood8x16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sli.8h v0, v1, #14
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftGood8x16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mvni.8h v2, #192, lsl #8
+; CHECK-GI-NEXT:    shl.8h v1, v1, #14
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <8 x i16> %src1, <i16 16383, i16 16383, i16 16383, i16 16383, i16 16383, i16 16383, i16 16383, i16 16383>
   %vshl_n = shl <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14>
   %result = or <8 x i16> %and.i, %vshl_n
@@ -190,15 +292,25 @@ define void @testLeftGood8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounw
 }
 
 define void @testLeftBad8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftBad8x16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #16500
-; CHECK-NEXT:    shl.8h v1, v1, #14
-; CHECK-NEXT:    dup.8h v2, w8
-; CHECK-NEXT:    and.16b v0, v0, v2
-; CHECK-NEXT:    orr.16b v0, v0, v1
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftBad8x16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #16500 // =0x4074
+; CHECK-SD-NEXT:    shl.8h v1, v1, #14
+; CHECK-SD-NEXT:    dup.8h v2, w8
+; CHECK-SD-NEXT:    and.16b v0, v0, v2
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftBad8x16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI13_0
+; CHECK-GI-NEXT:    shl.8h v1, v1, #14
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI13_0]
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <8 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500>
   %vshl_n = shl <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14>
   %result = or <8 x i16> %and.i, %vshl_n
@@ -207,11 +319,20 @@ define void @testLeftBad8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwi
 }
 
 define void @testRightGood8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightGood8x16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sri.8h v0, v1, #14
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightGood8x16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sri.8h v0, v1, #14
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightGood8x16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mvni.8h v2, #3
+; CHECK-GI-NEXT:    ushr.8h v1, v1, #14
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <8 x i16> %src1, <i16 65532, i16 65532, i16 65532, i16 65532, i16 65532, i16 65532, i16 65532, i16 65532>
   %vshl_n = lshr <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14>
   %result = or <8 x i16> %and.i, %vshl_n
@@ -220,14 +341,24 @@ define void @testRightGood8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) noun
 }
 
 define void @testRightBad8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightBad8x16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #16500
-; CHECK-NEXT:    dup.8h v2, w8
-; CHECK-NEXT:    and.16b v0, v0, v2
-; CHECK-NEXT:    usra.8h v0, v1, #14
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightBad8x16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #16500 // =0x4074
+; CHECK-SD-NEXT:    dup.8h v2, w8
+; CHECK-SD-NEXT:    and.16b v0, v0, v2
+; CHECK-SD-NEXT:    usra.8h v0, v1, #14
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightBad8x16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI15_0
+; CHECK-GI-NEXT:    ushr.8h v1, v1, #14
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI15_0]
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <8 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500>
   %vshl_n = lshr <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14>
   %result = or <8 x i16> %and.i, %vshl_n
@@ -236,11 +367,20 @@ define void @testRightBad8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounw
 }
 
 define void @testLeftGood2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftGood2x32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sli.2s v0, v1, #22
-; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftGood2x32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sli.2s v0, v1, #22
+; CHECK-SD-NEXT:    str d0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftGood2x32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi.2s v2, #63, msl #16
+; CHECK-GI-NEXT:    shl.2s v1, v1, #22
+; CHECK-GI-NEXT:    and.8b v0, v0, v2
+; CHECK-GI-NEXT:    orr.8b v0, v0, v1
+; CHECK-GI-NEXT:    str d0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <2 x i32> %src1, <i32 4194303, i32 4194303>
   %vshl_n = shl <2 x i32> %src2, <i32 22, i32 22>
   %result = or <2 x i32> %and.i, %vshl_n
@@ -249,15 +389,25 @@ define void @testLeftGood2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounw
 }
 
 define void @testLeftBad2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftBad2x32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #4194300
-; CHECK-NEXT:    shl.2s v1, v1, #22
-; CHECK-NEXT:    dup.2s v2, w8
-; CHECK-NEXT:    and.8b v0, v0, v2
-; CHECK-NEXT:    orr.8b v0, v0, v1
-; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftBad2x32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #4194300 // =0x3ffffc
+; CHECK-SD-NEXT:    shl.2s v1, v1, #22
+; CHECK-SD-NEXT:    dup.2s v2, w8
+; CHECK-SD-NEXT:    and.8b v0, v0, v2
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    str d0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftBad2x32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI17_0
+; CHECK-GI-NEXT:    shl.2s v1, v1, #22
+; CHECK-GI-NEXT:    ldr d2, [x8, :lo12:.LCPI17_0]
+; CHECK-GI-NEXT:    and.8b v0, v0, v2
+; CHECK-GI-NEXT:    orr.8b v0, v0, v1
+; CHECK-GI-NEXT:    str d0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <2 x i32> %src1, <i32 4194300, i32 4194300>
   %vshl_n = shl <2 x i32> %src2, <i32 22, i32 22>
   %result = or <2 x i32> %and.i, %vshl_n
@@ -266,11 +416,20 @@ define void @testLeftBad2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwi
 }
 
 define void @testRightGood2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightGood2x32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sri.2s v0, v1, #22
-; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightGood2x32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sri.2s v0, v1, #22
+; CHECK-SD-NEXT:    str d0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightGood2x32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mvni.2s v2, #3, msl #8
+; CHECK-GI-NEXT:    ushr.2s v1, v1, #22
+; CHECK-GI-NEXT:    and.8b v0, v0, v2
+; CHECK-GI-NEXT:    orr.8b v0, v0, v1
+; CHECK-GI-NEXT:    str d0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <2 x i32> %src1, <i32 4294966272, i32 4294966272>
   %vshl_n = lshr <2 x i32> %src2, <i32 22, i32 22>
   %result = or <2 x i32> %and.i, %vshl_n
@@ -279,15 +438,25 @@ define void @testRightGood2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) noun
 }
 
 define void @testRightBad2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightBad2x32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #4194300
-; CHECK-NEXT:    ushr.2s v1, v1, #22
-; CHECK-NEXT:    dup.2s v2, w8
-; CHECK-NEXT:    and.8b v0, v0, v2
-; CHECK-NEXT:    orr.8b v0, v0, v1
-; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightBad2x32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #4194300 // =0x3ffffc
+; CHECK-SD-NEXT:    ushr.2s v1, v1, #22
+; CHECK-SD-NEXT:    dup.2s v2, w8
+; CHECK-SD-NEXT:    and.8b v0, v0, v2
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    str d0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightBad2x32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI19_0
+; CHECK-GI-NEXT:    ushr.2s v1, v1, #22
+; CHECK-GI-NEXT:    ldr d2, [x8, :lo12:.LCPI19_0]
+; CHECK-GI-NEXT:    and.8b v0, v0, v2
+; CHECK-GI-NEXT:    orr.8b v0, v0, v1
+; CHECK-GI-NEXT:    str d0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <2 x i32> %src1, <i32 4194300, i32 4194300>
   %vshl_n = lshr <2 x i32> %src2, <i32 22, i32 22>
   %result = or <2 x i32> %and.i, %vshl_n
@@ -296,11 +465,20 @@ define void @testRightBad2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounw
 }
 
 define void @testLeftGood4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftGood4x32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sli.4s v0, v1, #22
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftGood4x32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sli.4s v0, v1, #22
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftGood4x32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi.4s v2, #63, msl #16
+; CHECK-GI-NEXT:    shl.4s v1, v1, #22
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <4 x i32> %src1, <i32 4194303, i32 4194303, i32 4194303, i32 4194303>
   %vshl_n = shl <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22>
   %result = or <4 x i32> %and.i, %vshl_n
@@ -309,15 +487,25 @@ define void @testLeftGood4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounw
 }
 
 define void @testLeftBad4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftBad4x32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #4194300
-; CHECK-NEXT:    shl.4s v1, v1, #22
-; CHECK-NEXT:    dup.4s v2, w8
-; CHECK-NEXT:    and.16b v0, v0, v2
-; CHECK-NEXT:    orr.16b v0, v0, v1
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftBad4x32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #4194300 // =0x3ffffc
+; CHECK-SD-NEXT:    shl.4s v1, v1, #22
+; CHECK-SD-NEXT:    dup.4s v2, w8
+; CHECK-SD-NEXT:    and.16b v0, v0, v2
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftBad4x32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI21_0
+; CHECK-GI-NEXT:    shl.4s v1, v1, #22
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI21_0]
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <4 x i32> %src1, <i32 4194300, i32 4194300, i32 4194300, i32 4194300>
   %vshl_n = shl <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22>
   %result = or <4 x i32> %and.i, %vshl_n
@@ -326,11 +514,20 @@ define void @testLeftBad4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwi
 }
 
 define void @testRightGood4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightGood4x32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sri.4s v0, v1, #22
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightGood4x32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sri.4s v0, v1, #22
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightGood4x32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mvni.4s v2, #3, msl #8
+; CHECK-GI-NEXT:    ushr.4s v1, v1, #22
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <4 x i32> %src1, <i32 4294966272, i32 4294966272, i32 4294966272, i32 4294966272>
   %vshl_n = lshr <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22>
   %result = or <4 x i32> %and.i, %vshl_n
@@ -339,15 +536,25 @@ define void @testRightGood4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) noun
 }
 
 define void @testRightBad4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightBad4x32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #4194300
-; CHECK-NEXT:    ushr.4s v1, v1, #22
-; CHECK-NEXT:    dup.4s v2, w8
-; CHECK-NEXT:    and.16b v0, v0, v2
-; CHECK-NEXT:    orr.16b v0, v0, v1
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightBad4x32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #4194300 // =0x3ffffc
+; CHECK-SD-NEXT:    ushr.4s v1, v1, #22
+; CHECK-SD-NEXT:    dup.4s v2, w8
+; CHECK-SD-NEXT:    and.16b v0, v0, v2
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightBad4x32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI23_0
+; CHECK-GI-NEXT:    ushr.4s v1, v1, #22
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI23_0]
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <4 x i32> %src1, <i32 4194300, i32 4194300, i32 4194300, i32 4194300>
   %vshl_n = lshr <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22>
   %result = or <4 x i32> %and.i, %vshl_n
@@ -356,11 +563,20 @@ define void @testRightBad4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounw
 }
 
 define void @testLeftGood2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftGood2x64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sli.2d v0, v1, #48
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftGood2x64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sli.2d v0, v1, #48
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftGood2x64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi.2d v2, #0x00ffffffffffff
+; CHECK-GI-NEXT:    shl.2d v1, v1, #48
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <2 x i64> %src1, <i64 281474976710655, i64 281474976710655>
   %vshl_n = shl <2 x i64> %src2, <i64 48, i64 48>
   %result = or <2 x i64> %and.i, %vshl_n
@@ -369,16 +585,26 @@ define void @testLeftGood2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounw
 }
 
 define void @testLeftBad2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftBad2x64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #10
-; CHECK-NEXT:    shl.2d v1, v1, #48
-; CHECK-NEXT:    movk x8, #1, lsl #48
-; CHECK-NEXT:    dup.2d v2, x8
-; CHECK-NEXT:    and.16b v0, v0, v2
-; CHECK-NEXT:    orr.16b v0, v0, v1
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftBad2x64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov x8, #10 // =0xa
+; CHECK-SD-NEXT:    shl.2d v1, v1, #48
+; CHECK-SD-NEXT:    movk x8, #1, lsl #48
+; CHECK-SD-NEXT:    dup.2d v2, x8
+; CHECK-SD-NEXT:    and.16b v0, v0, v2
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftBad2x64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI25_0
+; CHECK-GI-NEXT:    shl.2d v1, v1, #48
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI25_0]
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <2 x i64> %src1, <i64 281474976710666, i64 281474976710666>
   %vshl_n = shl <2 x i64> %src2, <i64 48, i64 48>
   %result = or <2 x i64> %and.i, %vshl_n
@@ -387,11 +613,20 @@ define void @testLeftBad2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwi
 }
 
 define void @testRightGood2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightGood2x64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sri.2d v0, v1, #48
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightGood2x64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sri.2d v0, v1, #48
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightGood2x64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi.2d v2, #0xffffffffffff0000
+; CHECK-GI-NEXT:    ushr.2d v1, v1, #48
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <2 x i64> %src1, <i64 18446744073709486080, i64 18446744073709486080>
   %vshl_n = lshr <2 x i64> %src2, <i64 48, i64 48>
   %result = or <2 x i64> %and.i, %vshl_n
@@ -400,16 +635,26 @@ define void @testRightGood2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) noun
 }
 
 define void @testRightBad2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testRightBad2x64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #10
-; CHECK-NEXT:    ushr.2d v1, v1, #48
-; CHECK-NEXT:    movk x8, #1, lsl #48
-; CHECK-NEXT:    dup.2d v2, x8
-; CHECK-NEXT:    and.16b v0, v0, v2
-; CHECK-NEXT:    orr.16b v0, v0, v1
-; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testRightBad2x64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov x8, #10 // =0xa
+; CHECK-SD-NEXT:    ushr.2d v1, v1, #48
+; CHECK-SD-NEXT:    movk x8, #1, lsl #48
+; CHECK-SD-NEXT:    dup.2d v2, x8
+; CHECK-SD-NEXT:    and.16b v0, v0, v2
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    str q0, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testRightBad2x64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI27_0
+; CHECK-GI-NEXT:    ushr.2d v1, v1, #48
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI27_0]
+; CHECK-GI-NEXT:    and.16b v0, v0, v2
+; CHECK-GI-NEXT:    orr.16b v0, v0, v1
+; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ret
   %and.i = and <2 x i64> %src1, <i64 281474976710666, i64 281474976710666>
   %vshl_n = lshr <2 x i64> %src2, <i64 48, i64 48>
   %result = or <2 x i64> %and.i, %vshl_n
@@ -418,11 +663,19 @@ define void @testRightBad2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounw
 }
 
 define void @testLeftShouldNotCreateSLI1x128(<1 x i128> %src1, <1 x i128> %src2, ptr %dest) nounwind {
-; CHECK-LABEL: testLeftShouldNotCreateSLI1x128:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    bfi x1, x2, #6, #58
-; CHECK-NEXT:    stp x0, x1, [x4]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: testLeftShouldNotCreateSLI1x128:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    bfi x1, x2, #6, #58
+; CHECK-SD-NEXT:    stp x0, x1, [x4]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: testLeftShouldNotCreateSLI1x128:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov.d v0[0], x0
+; CHECK-GI-NEXT:    bfi x1, x2, #6, #58
+; CHECK-GI-NEXT:    mov.d v0[1], x1
+; CHECK-GI-NEXT:    str q0, [x4]
+; CHECK-GI-NEXT:    ret
   %and.i = and <1 x i128> %src1, <i128 1180591620717411303423>
   %vshl_n = shl <1 x i128> %src2, <i128 70>
   %result = or <1 x i128> %and.i, %vshl_n

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vclz.ll b/llvm/test/CodeGen/AArch64/arm64-vclz.ll
index 38c0572e23f890..c65e75c89e8da9 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vclz.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vclz.ll
@@ -1,154 +1,254 @@
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
-; RUN: llc < %s -global-isel -global-isel-abort=2 -pass-remarks-missed=gisel* -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; FALLBACK-NOT: remark{{.*}}test_vclz_u8
 define <8 x i8> @test_vclz_u8(<8 x i8> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclz_u8:
-  ; CHECK: clz.8b v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclz_u8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.8b v0, v0
+; CHECK-NEXT:    ret
   %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) nounwind
   ret <8 x i8> %vclz.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclz_s8
 define <8 x i8> @test_vclz_s8(<8 x i8> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclz_s8:
-  ; CHECK: clz.8b v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclz_s8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.8b v0, v0
+; CHECK-NEXT:    ret
   %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) nounwind
   ret <8 x i8> %vclz.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclz_u16
 define <4 x i16> @test_vclz_u16(<4 x i16> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclz_u16:
-  ; CHECK: clz.4h v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclz_u16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.4h v0, v0
+; CHECK-NEXT:    ret
   %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) nounwind
   ret <4 x i16> %vclz1.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclz_s16
 define <4 x i16> @test_vclz_s16(<4 x i16> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclz_s16:
-  ; CHECK: clz.4h v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclz_s16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.4h v0, v0
+; CHECK-NEXT:    ret
   %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) nounwind
   ret <4 x i16> %vclz1.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclz_u32
 define <2 x i32> @test_vclz_u32(<2 x i32> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclz_u32:
-  ; CHECK: clz.2s v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclz_u32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.2s v0, v0
+; CHECK-NEXT:    ret
   %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) nounwind
   ret <2 x i32> %vclz1.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclz_s32
 define <2 x i32> @test_vclz_s32(<2 x i32> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclz_s32:
-  ; CHECK: clz.2s v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclz_s32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.2s v0, v0
+; CHECK-NEXT:    ret
   %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) nounwind
   ret <2 x i32> %vclz1.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclz_u64
 define <1 x i64> @test_vclz_u64(<1 x i64> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclz_u64:
+; CHECK-SD-LABEL: test_vclz_u64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ushr d1, d0, #1
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    ushr d1, d0, #2
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    ushr d1, d0, #4
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    ushr d1, d0, #8
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    ushr d1, d0, #16
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    ushr d1, d0, #32
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    mvn.8b v0, v0
+; CHECK-SD-NEXT:    cnt.8b v0, v0
+; CHECK-SD-NEXT:    uaddlp.4h v0, v0
+; CHECK-SD-NEXT:    uaddlp.2s v0, v0
+; CHECK-SD-NEXT:    uaddlp.1d v0, v0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_vclz_u64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    clz x8, x8
+; CHECK-GI-NEXT:    fmov d0, x8
+; CHECK-GI-NEXT:    ret
   %vclz1.i = tail call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %a, i1 false) nounwind
   ret <1 x i64> %vclz1.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclz_s64
 define <1 x i64> @test_vclz_s64(<1 x i64> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclz_s64:
+; CHECK-SD-LABEL: test_vclz_s64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ushr d1, d0, #1
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    ushr d1, d0, #2
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    ushr d1, d0, #4
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    ushr d1, d0, #8
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    ushr d1, d0, #16
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    ushr d1, d0, #32
+; CHECK-SD-NEXT:    orr.8b v0, v0, v1
+; CHECK-SD-NEXT:    mvn.8b v0, v0
+; CHECK-SD-NEXT:    cnt.8b v0, v0
+; CHECK-SD-NEXT:    uaddlp.4h v0, v0
+; CHECK-SD-NEXT:    uaddlp.2s v0, v0
+; CHECK-SD-NEXT:    uaddlp.1d v0, v0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_vclz_s64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    clz x8, x8
+; CHECK-GI-NEXT:    fmov d0, x8
+; CHECK-GI-NEXT:    ret
   %vclz1.i = tail call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %a, i1 false) nounwind
   ret <1 x i64> %vclz1.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclzq_u8
 define <16 x i8> @test_vclzq_u8(<16 x i8> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclzq_u8:
-  ; CHECK: clz.16b v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclzq_u8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.16b v0, v0
+; CHECK-NEXT:    ret
   %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) nounwind
   ret <16 x i8> %vclz.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclzq_s8
 define <16 x i8> @test_vclzq_s8(<16 x i8> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclzq_s8:
-  ; CHECK: clz.16b v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclzq_s8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.16b v0, v0
+; CHECK-NEXT:    ret
   %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) nounwind
   ret <16 x i8> %vclz.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclzq_u16
 define <8 x i16> @test_vclzq_u16(<8 x i16> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclzq_u16:
-  ; CHECK: clz.8h v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclzq_u16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.8h v0, v0
+; CHECK-NEXT:    ret
   %vclz1.i = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false) nounwind
   ret <8 x i16> %vclz1.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclzq_s16
 define <8 x i16> @test_vclzq_s16(<8 x i16> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclzq_s16:
-  ; CHECK: clz.8h v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclzq_s16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.8h v0, v0
+; CHECK-NEXT:    ret
   %vclz1.i = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false) nounwind
   ret <8 x i16> %vclz1.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclzq_u32
 define <4 x i32> @test_vclzq_u32(<4 x i32> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclzq_u32:
-  ; CHECK: clz.4s v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclzq_u32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.4s v0, v0
+; CHECK-NEXT:    ret
   %vclz1.i = tail call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false) nounwind
   ret <4 x i32> %vclz1.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclzq_s32
 define <4 x i32> @test_vclzq_s32(<4 x i32> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclzq_s32:
-  ; CHECK: clz.4s v0, v0
-  ; CHECK-NEXT: ret
+; CHECK-LABEL: test_vclzq_s32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz.4s v0, v0
+; CHECK-NEXT:    ret
   %vclz1.i = tail call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false) nounwind
   ret <4 x i32> %vclz1.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclzq_u64
 define <2 x i64> @test_vclzq_u64(<2 x i64> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclzq_u64:
+; CHECK-SD-LABEL: test_vclzq_u64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #1
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #2
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #4
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #8
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #16
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #32
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    mvn.16b v0, v0
+; CHECK-SD-NEXT:    cnt.16b v0, v0
+; CHECK-SD-NEXT:    uaddlp.8h v0, v0
+; CHECK-SD-NEXT:    uaddlp.4s v0, v0
+; CHECK-SD-NEXT:    uaddlp.2d v0, v0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_vclzq_u64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    mov.d x9, v0[1]
+; CHECK-GI-NEXT:    clz x8, x8
+; CHECK-GI-NEXT:    mov.d v0[0], x8
+; CHECK-GI-NEXT:    clz x8, x9
+; CHECK-GI-NEXT:    mov.d v0[1], x8
+; CHECK-GI-NEXT:    ret
   %vclz1.i = tail call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 false) nounwind
   ret <2 x i64> %vclz1.i
 }
 
-; FALLBACK-NOT: remark{{.*}}test_vclzq_s64
 define <2 x i64> @test_vclzq_s64(<2 x i64> %a) nounwind readnone ssp {
-  ; CHECK-LABEL: test_vclzq_s64:
+; CHECK-SD-LABEL: test_vclzq_s64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #1
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #2
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #4
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #8
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #16
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    ushr.2d v1, v0, #32
+; CHECK-SD-NEXT:    orr.16b v0, v0, v1
+; CHECK-SD-NEXT:    mvn.16b v0, v0
+; CHECK-SD-NEXT:    cnt.16b v0, v0
+; CHECK-SD-NEXT:    uaddlp.8h v0, v0
+; CHECK-SD-NEXT:    uaddlp.4s v0, v0
+; CHECK-SD-NEXT:    uaddlp.2d v0, v0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_vclzq_s64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    mov.d x9, v0[1]
+; CHECK-GI-NEXT:    clz x8, x8
+; CHECK-GI-NEXT:    mov.d v0[0], x8
+; CHECK-GI-NEXT:    clz x8, x9
+; CHECK-GI-NEXT:    mov.d v0[1], x8
+; CHECK-GI-NEXT:    ret
   %vclz1.i = tail call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 false) nounwind
   ret <2 x i64> %vclz1.i
 }
 
 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) nounwind readnone
-
 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone
-
 declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) nounwind readnone
-
 declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) nounwind readnone
-
 declare <1 x i64> @llvm.ctlz.v1i64(<1 x i64>, i1) nounwind readnone
-
 declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) nounwind readnone
-
 declare <4 x i16> @llvm.ctlz.v4i16(<4 x i16>, i1) nounwind readnone
-
 declare <8 x i8> @llvm.ctlz.v8i8(<8 x i8>, i1) nounwind readnone

diff  --git a/llvm/test/CodeGen/AArch64/concat-vector.ll b/llvm/test/CodeGen/AArch64/concat-vector.ll
index d9aaae20afc69e..d4d89a7c9c22e5 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 define <4 x i8> @concat1(<2 x i8> %A, <2 x i8> %B) {
 ; CHECK-SD-LABEL: concat1:

diff  --git a/llvm/test/CodeGen/AArch64/extract-subvec-combine.ll b/llvm/test/CodeGen/AArch64/extract-subvec-combine.ll
index 43c6e01911462a..75d55773b3681e 100644
--- a/llvm/test/CodeGen/AArch64/extract-subvec-combine.ll
+++ b/llvm/test/CodeGen/AArch64/extract-subvec-combine.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 define <2 x i32> @and_extract_zext_idx0(<4 x i16> %vec) nounwind {
 ; CHECK-SD-LABEL: and_extract_zext_idx0:

diff  --git a/llvm/test/CodeGen/AArch64/extract-vector-elt-sve.ll b/llvm/test/CodeGen/AArch64/extract-vector-elt-sve.ll
index d18af3d5ae9450..7705d8949ca1ed 100644
--- a/llvm/test/CodeGen/AArch64/extract-vector-elt-sve.ll
+++ b/llvm/test/CodeGen/AArch64/extract-vector-elt-sve.ll
@@ -2,6 +2,13 @@
 ; RUN: llc -mtriple=aarch64 -mattr=+sve -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc -mtriple=aarch64 -mattr=+sve -aarch64-enable-gisel-sve=1 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
+; CHECK-GI:       warning: Instruction selection used fallback path for insert_vscale_8_i16_zero
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for insert_vscale_8_i16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for insert_vscale_16_i8_zero
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for insert_vscale_16_i8
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for extract_vscale_16_i8
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for extract_vscale_16_i8_zero
+
 define <vscale x 2 x i64> @insert_vscale_2_i64_zero(<vscale x 2 x i64> %vec, i64 %elt) {
 ; CHECK-SD-LABEL: insert_vscale_2_i64_zero:
 ; CHECK-SD:       // %bb.0: // %entry

diff  --git a/llvm/test/CodeGen/AArch64/fcvt-fixed.ll b/llvm/test/CodeGen/AArch64/fcvt-fixed.ll
index 7056a4d28fed39..51aad4fe25d3b8 100644
--- a/llvm/test/CodeGen/AArch64/fcvt-fixed.ll
+++ b/llvm/test/CodeGen/AArch64/fcvt-fixed.ll
@@ -1,164 +1,308 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefixes=CHECK,CHECK-NO16
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-NO16,CHECK-SD-NO16
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-FP16,CHECK-SD-FP16
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-NO16,CHECK-GI-NO16
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-FP16,CHECK-GI-FP16
 
 ; fptoui
 
 define i32 @fcvtzs_f32_i32_7(float %flt) {
-; CHECK-LABEL: fcvtzs_f32_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs w0, s0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_f32_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs w0, s0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_f32_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzs w0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 128.0
   %cvt = fptosi float %fix to i32
   ret i32 %cvt
 }
 
 define i32 @fcvtzs_f32_i32_32(float %flt) {
-; CHECK-LABEL: fcvtzs_f32_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs w0, s0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_f32_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs w0, s0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_f32_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #1333788672 // =0x4f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzs w0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 4294967296.0
   %cvt = fptosi float %fix to i32
   ret i32 %cvt
 }
 
 define i64 @fcvtzs_f32_i64_7(float %flt) {
-; CHECK-LABEL: fcvtzs_f32_i64_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs x0, s0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_f32_i64_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs x0, s0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_f32_i64_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzs x0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 128.0
   %cvt = fptosi float %fix to i64
   ret i64 %cvt
 }
 
 define i64 @fcvtzs_f32_i64_64(float %flt) {
-; CHECK-LABEL: fcvtzs_f32_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs x0, s0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_f32_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs x0, s0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_f32_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #1602224128 // =0x5f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzs x0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 18446744073709551616.0
   %cvt = fptosi float %fix to i64
   ret i64 %cvt
 }
 
 define i32 @fcvtzs_f64_i32_7(double %dbl) {
-; CHECK-LABEL: fcvtzs_f64_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs w0, d0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_f64_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs w0, d0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_f64_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzs w0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 128.0
   %cvt = fptosi double %fix to i32
   ret i32 %cvt
 }
 
 define i32 @fcvtzs_f64_i32_32(double %dbl) {
-; CHECK-LABEL: fcvtzs_f64_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs w0, d0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_f64_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs w0, d0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_f64_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4751297606875873280 // =0x41f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzs w0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 4294967296.0
   %cvt = fptosi double %fix to i32
   ret i32 %cvt
 }
 
 define i64 @fcvtzs_f64_i64_7(double %dbl) {
-; CHECK-LABEL: fcvtzs_f64_i64_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs x0, d0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_f64_i64_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs x0, d0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_f64_i64_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzs x0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 128.0
   %cvt = fptosi double %fix to i64
   ret i64 %cvt
 }
 
 define i64 @fcvtzs_f64_i64_64(double %dbl) {
-; CHECK-LABEL: fcvtzs_f64_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs x0, d0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_f64_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs x0, d0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_f64_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4895412794951729152 // =0x43f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzs x0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 18446744073709551616.0
   %cvt = fptosi double %fix to i64
   ret i64 %cvt
 }
 
 define i32 @fcvtzs_f16_i32_7(half %flt) {
-; CHECK-NO16-LABEL: fcvtzs_f16_i32_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #67, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzs w0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzs_f16_i32_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzs w0, h0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzs_f16_i32_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzs w0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzs_f16_i32_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzs w0, h0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzs_f16_i32_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzs w0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzs_f16_i32_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI8_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI8_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzs w0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %flt, 128.0
   %cvt = fptosi half %fix to i32
   ret i32 %cvt
 }
 
 define i32 @fcvtzs_f16_i32_15(half %flt) {
-; CHECK-NO16-LABEL: fcvtzs_f16_i32_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #71, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzs w0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzs_f16_i32_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzs w0, h0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzs_f16_i32_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #71, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzs w0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzs_f16_i32_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzs w0, h0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzs_f16_i32_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzs w0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzs_f16_i32_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI9_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI9_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzs w0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %flt, 32768.0
   %cvt = fptosi half %fix to i32
   ret i32 %cvt
 }
 
 define i64 @fcvtzs_f16_i64_7(half %flt) {
-; CHECK-NO16-LABEL: fcvtzs_f16_i64_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #67, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzs x0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzs_f16_i64_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzs x0, h0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzs_f16_i64_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzs x0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzs_f16_i64_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzs x0, h0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzs_f16_i64_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzs x0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzs_f16_i64_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI10_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI10_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzs x0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %flt, 128.0
   %cvt = fptosi half %fix to i64
   ret i64 %cvt
 }
 
 define i64 @fcvtzs_f16_i64_15(half %flt) {
-; CHECK-NO16-LABEL: fcvtzs_f16_i64_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #71, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzs x0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzs_f16_i64_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzs x0, h0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzs_f16_i64_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #71, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzs x0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzs_f16_i64_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzs x0, h0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzs_f16_i64_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzs x0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzs_f16_i64_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI11_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI11_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzs x0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %flt, 32768.0
   %cvt = fptosi half %fix to i64
   ret i64 %cvt
@@ -167,160 +311,302 @@ define i64 @fcvtzs_f16_i64_15(half %flt) {
 ; fptoui
 
 define i32 @fcvtzu_f32_i32_7(float %flt) {
-; CHECK-LABEL: fcvtzu_f32_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu w0, s0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_f32_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu w0, s0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_f32_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzu w0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 128.0
   %cvt = fptoui float %fix to i32
   ret i32 %cvt
 }
 
 define i32 @fcvtzu_f32_i32_32(float %flt) {
-; CHECK-LABEL: fcvtzu_f32_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu w0, s0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_f32_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu w0, s0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_f32_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #1333788672 // =0x4f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzu w0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 4294967296.0
   %cvt = fptoui float %fix to i32
   ret i32 %cvt
 }
 
 define i64 @fcvtzu_f32_i64_7(float %flt) {
-; CHECK-LABEL: fcvtzu_f32_i64_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu x0, s0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_f32_i64_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu x0, s0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_f32_i64_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzu x0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 128.0
   %cvt = fptoui float %fix to i64
   ret i64 %cvt
 }
 
 define i64 @fcvtzu_f32_i64_64(float %flt) {
-; CHECK-LABEL: fcvtzu_f32_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu x0, s0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_f32_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu x0, s0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_f32_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #1602224128 // =0x5f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzu x0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 18446744073709551616.0
   %cvt = fptoui float %fix to i64
   ret i64 %cvt
 }
 
 define i32 @fcvtzu_f64_i32_7(double %dbl) {
-; CHECK-LABEL: fcvtzu_f64_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu w0, d0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_f64_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu w0, d0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_f64_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzu w0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 128.0
   %cvt = fptoui double %fix to i32
   ret i32 %cvt
 }
 
 define i32 @fcvtzu_f64_i32_32(double %dbl) {
-; CHECK-LABEL: fcvtzu_f64_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu w0, d0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_f64_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu w0, d0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_f64_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4751297606875873280 // =0x41f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzu w0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 4294967296.0
   %cvt = fptoui double %fix to i32
   ret i32 %cvt
 }
 
 define i64 @fcvtzu_f64_i64_7(double %dbl) {
-; CHECK-LABEL: fcvtzu_f64_i64_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu x0, d0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_f64_i64_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu x0, d0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_f64_i64_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzu x0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 128.0
   %cvt = fptoui double %fix to i64
   ret i64 %cvt
 }
 
 define i64 @fcvtzu_f64_i64_64(double %dbl) {
-; CHECK-LABEL: fcvtzu_f64_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu x0, d0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_f64_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu x0, d0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_f64_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4895412794951729152 // =0x43f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzu x0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 18446744073709551616.0
   %cvt = fptoui double %fix to i64
   ret i64 %cvt
 }
 
 define i32 @fcvtzu_f16_i32_7(half %flt) {
-; CHECK-NO16-LABEL: fcvtzu_f16_i32_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #67, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzu w0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzu_f16_i32_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzu w0, h0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzu_f16_i32_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzu w0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzu_f16_i32_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzu w0, h0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzu_f16_i32_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzu w0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzu_f16_i32_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI20_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI20_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzu w0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %flt, 128.0
   %cvt = fptoui half %fix to i32
   ret i32 %cvt
 }
 
 define i32 @fcvtzu_f16_i32_15(half %flt) {
-; CHECK-NO16-LABEL: fcvtzu_f16_i32_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #71, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzu w0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzu_f16_i32_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzu w0, h0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzu_f16_i32_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #71, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzu w0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzu_f16_i32_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzu w0, h0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzu_f16_i32_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzu w0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzu_f16_i32_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI21_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI21_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzu w0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %flt, 32768.0
   %cvt = fptoui half %fix to i32
   ret i32 %cvt
 }
 
 define i64 @fcvtzu_f16_i64_7(half %flt) {
-; CHECK-NO16-LABEL: fcvtzu_f16_i64_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #67, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzu x0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzu_f16_i64_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzu x0, h0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzu_f16_i64_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzu x0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzu_f16_i64_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzu x0, h0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzu_f16_i64_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzu x0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzu_f16_i64_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI22_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI22_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzu x0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %flt, 128.0
   %cvt = fptoui half %fix to i64
   ret i64 %cvt
 }
 
 define i64 @fcvtzu_f16_i64_15(half %flt) {
-; CHECK-NO16-LABEL: fcvtzu_f16_i64_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #71, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzu x0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzu_f16_i64_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzu x0, h0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzu_f16_i64_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #71, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzu x0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzu_f16_i64_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzu x0, h0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzu_f16_i64_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzu x0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzu_f16_i64_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI23_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI23_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzu x0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %flt, 32768.0
   %cvt = fptoui half %fix to i64
   ret i64 %cvt
@@ -329,160 +615,302 @@ define i64 @fcvtzu_f16_i64_15(half %flt) {
 ; sitofp
 
 define float @scvtf_f32_i32_7(i32 %int) {
-; CHECK-LABEL: scvtf_f32_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    scvtf s0, w0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: scvtf_f32_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    scvtf s0, w0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: scvtf_f32_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v0.2s, #67, lsl #24
+; CHECK-GI-NEXT:    scvtf s1, w0
+; CHECK-GI-NEXT:    fdiv s0, s1, s0
+; CHECK-GI-NEXT:    ret
   %cvt = sitofp i32 %int to float
   %fix = fdiv float %cvt, 128.0
   ret float %fix
 }
 
 define float @scvtf_f32_i32_32(i32 %int) {
-; CHECK-LABEL: scvtf_f32_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    scvtf s0, w0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: scvtf_f32_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    scvtf s0, w0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: scvtf_f32_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    scvtf s0, w0
+; CHECK-GI-NEXT:    mov w8, #1333788672 // =0x4f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NEXT:    ret
   %cvt = sitofp i32 %int to float
   %fix = fdiv float %cvt, 4294967296.0
   ret float %fix
 }
 
 define float @scvtf_f32_i64_7(i64 %long) {
-; CHECK-LABEL: scvtf_f32_i64_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    scvtf s0, x0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: scvtf_f32_i64_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    scvtf s0, x0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: scvtf_f32_i64_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v0.2s, #67, lsl #24
+; CHECK-GI-NEXT:    scvtf s1, x0
+; CHECK-GI-NEXT:    fdiv s0, s1, s0
+; CHECK-GI-NEXT:    ret
   %cvt = sitofp i64 %long to float
   %fix = fdiv float %cvt, 128.0
   ret float %fix
 }
 
 define float @scvtf_f32_i64_64(i64 %long) {
-; CHECK-LABEL: scvtf_f32_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    scvtf s0, x0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: scvtf_f32_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    scvtf s0, x0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: scvtf_f32_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    scvtf s0, x0
+; CHECK-GI-NEXT:    mov w8, #1602224128 // =0x5f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NEXT:    ret
   %cvt = sitofp i64 %long to float
   %fix = fdiv float %cvt, 18446744073709551616.0
   ret float %fix
 }
 
 define double @scvtf_f64_i32_7(i32 %int) {
-; CHECK-LABEL: scvtf_f64_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    scvtf d0, w0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: scvtf_f64_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    scvtf d0, w0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: scvtf_f64_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    scvtf d0, w0
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fdiv d0, d0, d1
+; CHECK-GI-NEXT:    ret
   %cvt = sitofp i32 %int to double
   %fix = fdiv double %cvt, 128.0
   ret double %fix
 }
 
 define double @scvtf_f64_i32_32(i32 %int) {
-; CHECK-LABEL: scvtf_f64_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    scvtf d0, w0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: scvtf_f64_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    scvtf d0, w0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: scvtf_f64_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    scvtf d0, w0
+; CHECK-GI-NEXT:    mov x8, #4751297606875873280 // =0x41f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fdiv d0, d0, d1
+; CHECK-GI-NEXT:    ret
   %cvt = sitofp i32 %int to double
   %fix = fdiv double %cvt, 4294967296.0
   ret double %fix
 }
 
 define double @scvtf_f64_i64_7(i64 %long) {
-; CHECK-LABEL: scvtf_f64_i64_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    scvtf d0, x0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: scvtf_f64_i64_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    scvtf d0, x0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: scvtf_f64_i64_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    scvtf d0, x0
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fdiv d0, d0, d1
+; CHECK-GI-NEXT:    ret
   %cvt = sitofp i64 %long to double
   %fix = fdiv double %cvt, 128.0
   ret double %fix
 }
 
 define double @scvtf_f64_i64_64(i64 %long) {
-; CHECK-LABEL: scvtf_f64_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    scvtf d0, x0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: scvtf_f64_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    scvtf d0, x0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: scvtf_f64_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    scvtf d0, x0
+; CHECK-GI-NEXT:    mov x8, #4895412794951729152 // =0x43f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fdiv d0, d0, d1
+; CHECK-GI-NEXT:    ret
   %cvt = sitofp i64 %long to double
   %fix = fdiv double %cvt, 18446744073709551616.0
   ret double %fix
 }
 
 define half @scvtf_f16_i32_7(i32 %int) {
-; CHECK-NO16-LABEL: scvtf_f16_i32_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    scvtf s1, w0
-; CHECK-NO16-NEXT:    movi v0.2s, #60, lsl #24
-; CHECK-NO16-NEXT:    fcvt h1, s1
-; CHECK-NO16-NEXT:    fcvt s1, h1
-; CHECK-NO16-NEXT:    fmul s0, s1, s0
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: scvtf_f16_i32_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    scvtf h0, w0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: scvtf_f16_i32_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    scvtf s1, w0
+; CHECK-SD-NO16-NEXT:    movi v0.2s, #60, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt h1, s1
+; CHECK-SD-NO16-NEXT:    fcvt s1, h1
+; CHECK-SD-NO16-NEXT:    fmul s0, s1, s0
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: scvtf_f16_i32_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    scvtf h0, w0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: scvtf_f16_i32_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    scvtf s0, w0
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: scvtf_f16_i32_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    scvtf h0, w0
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI32_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI32_0]
+; CHECK-GI-FP16-NEXT:    fdiv h0, h0, h1
+; CHECK-GI-FP16-NEXT:    ret
   %cvt = sitofp i32 %int to half
   %fix = fdiv half %cvt, 128.0
   ret half %fix
 }
 
 define half @scvtf_f16_i32_15(i32 %int) {
-; CHECK-NO16-LABEL: scvtf_f16_i32_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    scvtf s1, w0
-; CHECK-NO16-NEXT:    movi v0.2s, #56, lsl #24
-; CHECK-NO16-NEXT:    fcvt h1, s1
-; CHECK-NO16-NEXT:    fcvt s1, h1
-; CHECK-NO16-NEXT:    fmul s0, s1, s0
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: scvtf_f16_i32_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    scvtf h0, w0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: scvtf_f16_i32_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    scvtf s1, w0
+; CHECK-SD-NO16-NEXT:    movi v0.2s, #56, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt h1, s1
+; CHECK-SD-NO16-NEXT:    fcvt s1, h1
+; CHECK-SD-NO16-NEXT:    fmul s0, s1, s0
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: scvtf_f16_i32_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    scvtf h0, w0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: scvtf_f16_i32_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    scvtf s0, w0
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: scvtf_f16_i32_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    scvtf h0, w0
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI33_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI33_0]
+; CHECK-GI-FP16-NEXT:    fdiv h0, h0, h1
+; CHECK-GI-FP16-NEXT:    ret
   %cvt = sitofp i32 %int to half
   %fix = fdiv half %cvt, 32768.0
   ret half %fix
 }
 
 define half @scvtf_f16_i64_7(i64 %long) {
-; CHECK-NO16-LABEL: scvtf_f16_i64_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    scvtf s1, x0
-; CHECK-NO16-NEXT:    movi v0.2s, #60, lsl #24
-; CHECK-NO16-NEXT:    fcvt h1, s1
-; CHECK-NO16-NEXT:    fcvt s1, h1
-; CHECK-NO16-NEXT:    fmul s0, s1, s0
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: scvtf_f16_i64_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    scvtf h0, x0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: scvtf_f16_i64_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    scvtf s1, x0
+; CHECK-SD-NO16-NEXT:    movi v0.2s, #60, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt h1, s1
+; CHECK-SD-NO16-NEXT:    fcvt s1, h1
+; CHECK-SD-NO16-NEXT:    fmul s0, s1, s0
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: scvtf_f16_i64_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    scvtf h0, x0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: scvtf_f16_i64_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    scvtf s0, x0
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: scvtf_f16_i64_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    scvtf h0, x0
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI34_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI34_0]
+; CHECK-GI-FP16-NEXT:    fdiv h0, h0, h1
+; CHECK-GI-FP16-NEXT:    ret
   %cvt = sitofp i64 %long to half
   %fix = fdiv half %cvt, 128.0
   ret half %fix
 }
 
 define half @scvtf_f16_i64_15(i64 %long) {
-; CHECK-NO16-LABEL: scvtf_f16_i64_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    scvtf s1, x0
-; CHECK-NO16-NEXT:    movi v0.2s, #56, lsl #24
-; CHECK-NO16-NEXT:    fcvt h1, s1
-; CHECK-NO16-NEXT:    fcvt s1, h1
-; CHECK-NO16-NEXT:    fmul s0, s1, s0
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: scvtf_f16_i64_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    scvtf h0, x0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: scvtf_f16_i64_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    scvtf s1, x0
+; CHECK-SD-NO16-NEXT:    movi v0.2s, #56, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt h1, s1
+; CHECK-SD-NO16-NEXT:    fcvt s1, h1
+; CHECK-SD-NO16-NEXT:    fmul s0, s1, s0
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: scvtf_f16_i64_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    scvtf h0, x0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: scvtf_f16_i64_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    scvtf s0, x0
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: scvtf_f16_i64_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    scvtf h0, x0
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI35_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI35_0]
+; CHECK-GI-FP16-NEXT:    fdiv h0, h0, h1
+; CHECK-GI-FP16-NEXT:    ret
   %cvt = sitofp i64 %long to half
   %fix = fdiv half %cvt, 32768.0
   ret half %fix
@@ -491,160 +919,302 @@ define half @scvtf_f16_i64_15(i64 %long) {
 ; uitofp
 
 define float @ucvtf_f32_i32_7(i32 %int) {
-; CHECK-LABEL: ucvtf_f32_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ucvtf s0, w0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: ucvtf_f32_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ucvtf s0, w0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: ucvtf_f32_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v0.2s, #67, lsl #24
+; CHECK-GI-NEXT:    ucvtf s1, w0
+; CHECK-GI-NEXT:    fdiv s0, s1, s0
+; CHECK-GI-NEXT:    ret
   %cvt = uitofp i32 %int to float
   %fix = fdiv float %cvt, 128.0
   ret float %fix
 }
 
 define float @ucvtf_f32_i32_32(i32 %int) {
-; CHECK-LABEL: ucvtf_f32_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ucvtf s0, w0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: ucvtf_f32_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ucvtf s0, w0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: ucvtf_f32_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ucvtf s0, w0
+; CHECK-GI-NEXT:    mov w8, #1333788672 // =0x4f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NEXT:    ret
   %cvt = uitofp i32 %int to float
   %fix = fdiv float %cvt, 4294967296.0
   ret float %fix
 }
 
 define float @ucvtf_f32_i64_7(i64 %long) {
-; CHECK-LABEL: ucvtf_f32_i64_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ucvtf s0, x0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: ucvtf_f32_i64_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ucvtf s0, x0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: ucvtf_f32_i64_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v0.2s, #67, lsl #24
+; CHECK-GI-NEXT:    ucvtf s1, x0
+; CHECK-GI-NEXT:    fdiv s0, s1, s0
+; CHECK-GI-NEXT:    ret
   %cvt = uitofp i64 %long to float
   %fix = fdiv float %cvt, 128.0
   ret float %fix
 }
 
 define float @ucvtf_f32_i64_64(i64 %long) {
-; CHECK-LABEL: ucvtf_f32_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ucvtf s0, x0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: ucvtf_f32_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ucvtf s0, x0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: ucvtf_f32_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ucvtf s0, x0
+; CHECK-GI-NEXT:    mov w8, #1602224128 // =0x5f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NEXT:    ret
   %cvt = uitofp i64 %long to float
   %fix = fdiv float %cvt, 18446744073709551616.0
   ret float %fix
 }
 
 define double @ucvtf_f64_i32_7(i32 %int) {
-; CHECK-LABEL: ucvtf_f64_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ucvtf d0, w0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: ucvtf_f64_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ucvtf d0, w0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: ucvtf_f64_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ucvtf d0, w0
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fdiv d0, d0, d1
+; CHECK-GI-NEXT:    ret
   %cvt = uitofp i32 %int to double
   %fix = fdiv double %cvt, 128.0
   ret double %fix
 }
 
 define double @ucvtf_f64_i32_32(i32 %int) {
-; CHECK-LABEL: ucvtf_f64_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ucvtf d0, w0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: ucvtf_f64_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ucvtf d0, w0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: ucvtf_f64_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ucvtf d0, w0
+; CHECK-GI-NEXT:    mov x8, #4751297606875873280 // =0x41f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fdiv d0, d0, d1
+; CHECK-GI-NEXT:    ret
   %cvt = uitofp i32 %int to double
   %fix = fdiv double %cvt, 4294967296.0
   ret double %fix
 }
 
 define double @ucvtf_f64_i64_7(i64 %long) {
-; CHECK-LABEL: ucvtf_f64_i64_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ucvtf d0, x0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: ucvtf_f64_i64_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ucvtf d0, x0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: ucvtf_f64_i64_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ucvtf d0, x0
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fdiv d0, d0, d1
+; CHECK-GI-NEXT:    ret
   %cvt = uitofp i64 %long to double
   %fix = fdiv double %cvt, 128.0
   ret double %fix
 }
 
 define double @ucvtf_f64_i64_64(i64 %long) {
-; CHECK-LABEL: ucvtf_f64_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ucvtf d0, x0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: ucvtf_f64_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ucvtf d0, x0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: ucvtf_f64_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ucvtf d0, x0
+; CHECK-GI-NEXT:    mov x8, #4895412794951729152 // =0x43f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fdiv d0, d0, d1
+; CHECK-GI-NEXT:    ret
   %cvt = uitofp i64 %long to double
   %fix = fdiv double %cvt, 18446744073709551616.0
   ret double %fix
 }
 
 define half @ucvtf_f16_i32_7(i32 %int) {
-; CHECK-NO16-LABEL: ucvtf_f16_i32_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    ucvtf s1, w0
-; CHECK-NO16-NEXT:    movi v0.2s, #60, lsl #24
-; CHECK-NO16-NEXT:    fcvt h1, s1
-; CHECK-NO16-NEXT:    fcvt s1, h1
-; CHECK-NO16-NEXT:    fmul s0, s1, s0
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: ucvtf_f16_i32_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    ucvtf h0, w0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: ucvtf_f16_i32_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    ucvtf s1, w0
+; CHECK-SD-NO16-NEXT:    movi v0.2s, #60, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt h1, s1
+; CHECK-SD-NO16-NEXT:    fcvt s1, h1
+; CHECK-SD-NO16-NEXT:    fmul s0, s1, s0
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: ucvtf_f16_i32_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    ucvtf h0, w0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: ucvtf_f16_i32_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    ucvtf s0, w0
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: ucvtf_f16_i32_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    ucvtf h0, w0
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI44_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI44_0]
+; CHECK-GI-FP16-NEXT:    fdiv h0, h0, h1
+; CHECK-GI-FP16-NEXT:    ret
   %cvt = uitofp i32 %int to half
   %fix = fdiv half %cvt, 128.0
   ret half %fix
 }
 
 define half @ucvtf_f16_i32_15(i32 %int) {
-; CHECK-NO16-LABEL: ucvtf_f16_i32_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    ucvtf s1, w0
-; CHECK-NO16-NEXT:    movi v0.2s, #56, lsl #24
-; CHECK-NO16-NEXT:    fcvt h1, s1
-; CHECK-NO16-NEXT:    fcvt s1, h1
-; CHECK-NO16-NEXT:    fmul s0, s1, s0
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: ucvtf_f16_i32_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    ucvtf h0, w0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: ucvtf_f16_i32_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    ucvtf s1, w0
+; CHECK-SD-NO16-NEXT:    movi v0.2s, #56, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt h1, s1
+; CHECK-SD-NO16-NEXT:    fcvt s1, h1
+; CHECK-SD-NO16-NEXT:    fmul s0, s1, s0
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: ucvtf_f16_i32_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    ucvtf h0, w0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: ucvtf_f16_i32_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    ucvtf s0, w0
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: ucvtf_f16_i32_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    ucvtf h0, w0
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI45_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI45_0]
+; CHECK-GI-FP16-NEXT:    fdiv h0, h0, h1
+; CHECK-GI-FP16-NEXT:    ret
   %cvt = uitofp i32 %int to half
   %fix = fdiv half %cvt, 32768.0
   ret half %fix
 }
 
 define half @ucvtf_f16_i64_7(i64 %long) {
-; CHECK-NO16-LABEL: ucvtf_f16_i64_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    ucvtf s1, x0
-; CHECK-NO16-NEXT:    movi v0.2s, #60, lsl #24
-; CHECK-NO16-NEXT:    fcvt h1, s1
-; CHECK-NO16-NEXT:    fcvt s1, h1
-; CHECK-NO16-NEXT:    fmul s0, s1, s0
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: ucvtf_f16_i64_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    ucvtf h0, x0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: ucvtf_f16_i64_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    ucvtf s1, x0
+; CHECK-SD-NO16-NEXT:    movi v0.2s, #60, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt h1, s1
+; CHECK-SD-NO16-NEXT:    fcvt s1, h1
+; CHECK-SD-NO16-NEXT:    fmul s0, s1, s0
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: ucvtf_f16_i64_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    ucvtf h0, x0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: ucvtf_f16_i64_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    ucvtf s0, x0
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: ucvtf_f16_i64_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    ucvtf h0, x0
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI46_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI46_0]
+; CHECK-GI-FP16-NEXT:    fdiv h0, h0, h1
+; CHECK-GI-FP16-NEXT:    ret
   %cvt = uitofp i64 %long to half
   %fix = fdiv half %cvt, 128.0
   ret half %fix
 }
 
 define half @ucvtf_f16_i64_15(i64 %long) {
-; CHECK-NO16-LABEL: ucvtf_f16_i64_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    ucvtf s1, x0
-; CHECK-NO16-NEXT:    movi v0.2s, #56, lsl #24
-; CHECK-NO16-NEXT:    fcvt h1, s1
-; CHECK-NO16-NEXT:    fcvt s1, h1
-; CHECK-NO16-NEXT:    fmul s0, s1, s0
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: ucvtf_f16_i64_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    ucvtf h0, x0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: ucvtf_f16_i64_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    ucvtf s1, x0
+; CHECK-SD-NO16-NEXT:    movi v0.2s, #56, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt h1, s1
+; CHECK-SD-NO16-NEXT:    fcvt s1, h1
+; CHECK-SD-NO16-NEXT:    fmul s0, s1, s0
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: ucvtf_f16_i64_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    ucvtf h0, x0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: ucvtf_f16_i64_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    ucvtf s0, x0
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fdiv s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: ucvtf_f16_i64_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    ucvtf h0, x0
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI47_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI47_0]
+; CHECK-GI-FP16-NEXT:    fdiv h0, h0, h1
+; CHECK-GI-FP16-NEXT:    ret
   %cvt = uitofp i64 %long to half
   %fix = fdiv half %cvt, 32768.0
   ret half %fix
@@ -661,150 +1231,285 @@ declare i32 @llvm.fptosi.sat.i32.f16(half)
 declare i64 @llvm.fptosi.sat.i64.f16(half)
 
 define i32 @fcvtzs_sat_f32_i32_7(float %flt) {
-; CHECK-LABEL: fcvtzs_sat_f32_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs w0, s0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_sat_f32_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs w0, s0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_sat_f32_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzs w0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 128.0
   %cvt = call i32 @llvm.fptosi.sat.i32.f32(float %fix)
   ret i32 %cvt
 }
 
 define i32 @fcvtzs_sat_f32_i32_32(float %flt) {
-; CHECK-LABEL: fcvtzs_sat_f32_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs w0, s0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_sat_f32_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs w0, s0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_sat_f32_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #1333788672 // =0x4f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzs w0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 4294967296.0
   %cvt = call i32 @llvm.fptosi.sat.i32.f32(float %fix)
   ret i32 %cvt
 }
 
 define i64 @fcvtzs_sat_f32_i64_64(float %flt) {
-; CHECK-LABEL: fcvtzs_sat_f32_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs x0, s0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_sat_f32_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs x0, s0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_sat_f32_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #1602224128 // =0x5f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzs x0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 18446744073709551616.0
   %cvt = call i64 @llvm.fptosi.sat.i64.f32(float %fix)
   ret i64 %cvt
 }
 
 define i32 @fcvtzs_sat_f64_i32_7(double %dbl) {
-; CHECK-LABEL: fcvtzs_sat_f64_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs w0, d0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_sat_f64_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs w0, d0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_sat_f64_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzs w0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 128.0
   %cvt = call i32 @llvm.fptosi.sat.i32.f64(double %fix)
   ret i32 %cvt
 }
 
 define i32 @fcvtzs_sat_f64_i32_32(double %dbl) {
-; CHECK-LABEL: fcvtzs_sat_f64_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs w0, d0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_sat_f64_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs w0, d0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_sat_f64_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4751297606875873280 // =0x41f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzs w0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 4294967296.0
   %cvt = call i32 @llvm.fptosi.sat.i32.f64(double %fix)
   ret i32 %cvt
 }
 
 define i64 @fcvtzs_sat_f64_i64_7(double %dbl) {
-; CHECK-LABEL: fcvtzs_sat_f64_i64_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs x0, d0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_sat_f64_i64_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs x0, d0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_sat_f64_i64_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzs x0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 128.0
   %cvt = call i64 @llvm.fptosi.sat.i64.f64(double %fix)
   ret i64 %cvt
 }
 
 define i64 @fcvtzs_sat_f64_i64_64(double %dbl) {
-; CHECK-LABEL: fcvtzs_sat_f64_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs x0, d0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzs_sat_f64_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzs x0, d0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzs_sat_f64_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4895412794951729152 // =0x43f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzs x0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 18446744073709551616.0
   %cvt = call i64 @llvm.fptosi.sat.i64.f64(double %fix)
   ret i64 %cvt
 }
 
 define i32 @fcvtzs_sat_f16_i32_7(half %dbl) {
-; CHECK-NO16-LABEL: fcvtzs_sat_f16_i32_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #67, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzs w0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzs_sat_f16_i32_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzs w0, h0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzs_sat_f16_i32_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzs w0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzs_sat_f16_i32_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzs w0, h0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzs_sat_f16_i32_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzs w0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzs_sat_f16_i32_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI55_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI55_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzs w0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %dbl, 128.0
   %cvt = call i32 @llvm.fptosi.sat.i32.f16(half %fix)
   ret i32 %cvt
 }
 
 define i32 @fcvtzs_sat_f16_i32_15(half %dbl) {
-; CHECK-NO16-LABEL: fcvtzs_sat_f16_i32_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #71, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzs w0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzs_sat_f16_i32_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzs w0, h0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzs_sat_f16_i32_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #71, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzs w0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzs_sat_f16_i32_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzs w0, h0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzs_sat_f16_i32_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzs w0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzs_sat_f16_i32_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI56_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI56_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzs w0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %dbl, 32768.0
   %cvt = call i32 @llvm.fptosi.sat.i32.f16(half %fix)
   ret i32 %cvt
 }
 
 define i64 @fcvtzs_sat_f16_i64_7(half %dbl) {
-; CHECK-NO16-LABEL: fcvtzs_sat_f16_i64_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #67, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzs x0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzs_sat_f16_i64_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzs x0, h0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzs_sat_f16_i64_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzs x0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzs_sat_f16_i64_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzs x0, h0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzs_sat_f16_i64_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzs x0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzs_sat_f16_i64_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI57_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI57_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzs x0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %dbl, 128.0
   %cvt = call i64 @llvm.fptosi.sat.i64.f16(half %fix)
   ret i64 %cvt
 }
 
 define i64 @fcvtzs_sat_f16_i64_15(half %dbl) {
-; CHECK-NO16-LABEL: fcvtzs_sat_f16_i64_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #71, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzs x0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzs_sat_f16_i64_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzs x0, h0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzs_sat_f16_i64_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #71, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzs x0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzs_sat_f16_i64_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzs x0, h0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzs_sat_f16_i64_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzs x0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzs_sat_f16_i64_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI58_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI58_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzs x0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %dbl, 32768.0
   %cvt = call i64 @llvm.fptosi.sat.i64.f16(half %fix)
   ret i64 %cvt
@@ -820,151 +1525,290 @@ declare i32 @llvm.fptoui.sat.i32.f16(half)
 declare i64 @llvm.fptoui.sat.i64.f16(half)
 
 define i32 @fcvtzu_sat_f32_i32_7(float %flt) {
-; CHECK-LABEL: fcvtzu_sat_f32_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu w0, s0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_sat_f32_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu w0, s0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_sat_f32_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzu w0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 128.0
   %cvt = call i32 @llvm.fptoui.sat.i32.f32(float %fix)
   ret i32 %cvt
 }
 
 define i32 @fcvtzu_sat_f32_i32_32(float %flt) {
-; CHECK-LABEL: fcvtzu_sat_f32_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu w0, s0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_sat_f32_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu w0, s0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_sat_f32_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #1333788672 // =0x4f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzu w0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 4294967296.0
   %cvt = call i32 @llvm.fptoui.sat.i32.f32(float %fix)
   ret i32 %cvt
 }
 
 define i64 @fcvtzu_sat_f32_i64_64(float %flt) {
-; CHECK-LABEL: fcvtzu_sat_f32_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu x0, s0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_sat_f32_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu x0, s0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_sat_f32_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #1602224128 // =0x5f800000
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fcvtzu x0, s0
+; CHECK-GI-NEXT:    ret
   %fix = fmul float %flt, 18446744073709551616.0
   %cvt = call i64 @llvm.fptoui.sat.i64.f32(float %fix)
   ret i64 %cvt
 }
 
 define i32 @fcvtzu_sat_f64_i32_7(double %dbl) {
-; CHECK-LABEL: fcvtzu_sat_f64_i32_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu w0, d0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_sat_f64_i32_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu w0, d0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_sat_f64_i32_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzu w0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 128.0
   %cvt = call i32 @llvm.fptoui.sat.i32.f64(double %fix)
   ret i32 %cvt
 }
 
 define i32 @fcvtzu_sat_f64_i32_32(double %dbl) {
-; CHECK-LABEL: fcvtzu_sat_f64_i32_32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu w0, d0, #32
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_sat_f64_i32_32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu w0, d0, #32
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_sat_f64_i32_32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4751297606875873280 // =0x41f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzu w0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 4294967296.0
   %cvt = call i32 @llvm.fptoui.sat.i32.f64(double %fix)
   ret i32 %cvt
 }
 
 define i64 @fcvtzu_sat_f64_i64_7(double %dbl) {
-; CHECK-LABEL: fcvtzu_sat_f64_i64_7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu x0, d0, #7
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_sat_f64_i64_7:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu x0, d0, #7
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_sat_f64_i64_7:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4638707616191610880 // =0x4060000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzu x0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 128.0
   %cvt = call i64 @llvm.fptoui.sat.i64.f64(double %fix)
   ret i64 %cvt
 }
 
 define i64 @fcvtzu_sat_f64_i64_64(double %dbl) {
-; CHECK-LABEL: fcvtzu_sat_f64_i64_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu x0, d0, #64
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcvtzu_sat_f64_i64_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcvtzu x0, d0, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcvtzu_sat_f64_i64_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #4895412794951729152 // =0x43f0000000000000
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fcvtzu x0, d0
+; CHECK-GI-NEXT:    ret
   %fix = fmul double %dbl, 18446744073709551616.0
   %cvt = call i64 @llvm.fptoui.sat.i64.f64(double %fix)
   ret i64 %cvt
 }
 
 define i32 @fcvtzu_sat_f16_i32_7(half %dbl) {
-; CHECK-NO16-LABEL: fcvtzu_sat_f16_i32_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #67, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzu w0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzu_sat_f16_i32_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzu w0, h0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzu_sat_f16_i32_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzu w0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzu_sat_f16_i32_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzu w0, h0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzu_sat_f16_i32_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzu w0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzu_sat_f16_i32_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI66_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI66_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzu w0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %dbl, 128.0
   %cvt = call i32 @llvm.fptoui.sat.i32.f16(half %fix)
   ret i32 %cvt
 }
 
 define i32 @fcvtzu_sat_f16_i32_15(half %dbl) {
-; CHECK-NO16-LABEL: fcvtzu_sat_f16_i32_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #71, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzu w0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzu_sat_f16_i32_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzu w0, h0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzu_sat_f16_i32_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #71, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzu w0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzu_sat_f16_i32_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzu w0, h0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzu_sat_f16_i32_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzu w0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzu_sat_f16_i32_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI67_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI67_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzu w0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %dbl, 32768.0
   %cvt = call i32 @llvm.fptoui.sat.i32.f16(half %fix)
   ret i32 %cvt
 }
 
 define i64 @fcvtzu_sat_f16_i64_7(half %dbl) {
-; CHECK-NO16-LABEL: fcvtzu_sat_f16_i64_7:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #67, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzu x0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzu_sat_f16_i64_7:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzu x0, h0, #7
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzu_sat_f16_i64_7:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #67, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzu x0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzu_sat_f16_i64_7:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzu x0, h0, #7
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzu_sat_f16_i64_7:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #22528 // =0x5800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzu x0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzu_sat_f16_i64_7:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI68_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI68_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzu x0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %dbl, 128.0
   %cvt = call i64 @llvm.fptoui.sat.i64.f16(half %fix)
   ret i64 %cvt
 }
 
 define i64 @fcvtzu_sat_f16_i64_15(half %dbl) {
-; CHECK-NO16-LABEL: fcvtzu_sat_f16_i64_15:
-; CHECK-NO16:       // %bb.0:
-; CHECK-NO16-NEXT:    movi v1.2s, #71, lsl #24
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fmul s0, s0, s1
-; CHECK-NO16-NEXT:    fcvt h0, s0
-; CHECK-NO16-NEXT:    fcvt s0, h0
-; CHECK-NO16-NEXT:    fcvtzu x0, s0
-; CHECK-NO16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: fcvtzu_sat_f16_i64_15:
-; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    fcvtzu x0, h0, #15
-; CHECK-FP16-NEXT:    ret
+; CHECK-SD-NO16-LABEL: fcvtzu_sat_f16_i64_15:
+; CHECK-SD-NO16:       // %bb.0:
+; CHECK-SD-NO16-NEXT:    movi v1.2s, #71, lsl #24
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NO16-NEXT:    fcvt h0, s0
+; CHECK-SD-NO16-NEXT:    fcvt s0, h0
+; CHECK-SD-NO16-NEXT:    fcvtzu x0, s0
+; CHECK-SD-NO16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: fcvtzu_sat_f16_i64_15:
+; CHECK-SD-FP16:       // %bb.0:
+; CHECK-SD-FP16-NEXT:    fcvtzu x0, h0, #15
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NO16-LABEL: fcvtzu_sat_f16_i64_15:
+; CHECK-GI-NO16:       // %bb.0:
+; CHECK-GI-NO16-NEXT:    mov w8, #30720 // =0x7800
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fmov s1, w8
+; CHECK-GI-NO16-NEXT:    fcvt s1, h1
+; CHECK-GI-NO16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NO16-NEXT:    fcvt h0, s0
+; CHECK-GI-NO16-NEXT:    fcvt s0, h0
+; CHECK-GI-NO16-NEXT:    fcvtzu x0, s0
+; CHECK-GI-NO16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: fcvtzu_sat_f16_i64_15:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI69_0
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI69_0]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fcvtzu x0, h0
+; CHECK-GI-FP16-NEXT:    ret
   %fix = fmul half %dbl, 32768.0
   %cvt = call i64 @llvm.fptoui.sat.i64.f16(half %fix)
   ret i64 %cvt
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
+; CHECK-FP16: {{.*}}
+; CHECK-NO16: {{.*}}

diff  --git a/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
index bbfec8c7c33617..4ab5db450a7f32 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 define {<2 x half>, <2 x half>} @vector_deinterleave_v2f16_v4f16(<4 x half> %vec) {
 ; CHECK-SD-LABEL: vector_deinterleave_v2f16_v4f16:

diff  --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
index 4cce06dce44c9b..a80d51bac99297 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
@@ -1,11 +1,84 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16
 ; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
-; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16
-; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 -mattr=+fullfp16 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
+; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 -mattr=+fullfp16 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 ; Check that constrained fp intrinsics are correctly lowered.
 
+; CHECK-GI:       warning: Instruction selection used fallback path for add_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for frem_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i32_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i32_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i64_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i64_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f16_i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f16_i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f16_i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f16_i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f16_i128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f16_i128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for powi_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sin_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cos_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tan_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for asin_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for acos_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan2_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sinh_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cosh_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tanh_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pow_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log10_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log2_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp2_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llrint_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lround_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llround_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for roundeven_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_olt_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ole_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ogt_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oge_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oeq_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_one_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ult_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ule_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ugt_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_uge_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ueq_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_une_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_olt_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ole_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ogt_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oge_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oeq_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_one_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ult_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ule_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ugt_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_uge_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ueq_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_une_f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptrunc_f16_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fpext_f32_f16
 
 ; Half-precision intrinsics
 

diff  --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
index 6147afba4e603a..83e60c10897624 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
@@ -1,9 +1,86 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64 %s -disable-strictnode-mutation -o - | FileCheck %s
-; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 -disable-strictnode-mutation %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64 %s -disable-strictnode-mutation -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 -disable-strictnode-mutation %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 ; Check that constrained fp vector intrinsics are correctly lowered.
 
+; CHECK-GI:       warning: Instruction selection used fallback path for add_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v4i32_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v4i32_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v4i64_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v4i64_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v4f32_v4i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v4f32_v4i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v4f32_v4i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v4f32_v4i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for roundeven_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for add_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v2i32_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v2i32_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v2i64_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v2i64_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v2f64_v2i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v2f64_v2i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v2f64_v2i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v2f64_v2i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for roundeven_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for add_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v1i32_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v1i32_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v1i64_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v1i64_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v1f64_v1i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v1f64_v1i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v1f64_v1i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v1f64_v1i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for roundeven_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_v1f61
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_v1f61
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptrunc_v2f32_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fpext_v2f64_v2f32
 
 ; Single-precision intrinsics
 
@@ -882,3 +959,7 @@ declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double>, <1 x d
 
 declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata)
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-SD: {{.*}}

diff  --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
index fd3a0c3207606c..f2a14a9b73fa16 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
@@ -1,543 +1,1037 @@
-; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s
-; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 ; Check that constrained fp intrinsics are correctly lowered.
 
+; CHECK-GI:       warning: Instruction selection used fallback path for add_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for frem_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i32_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i32_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i64_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i64_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f32_i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f32_i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f32_i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f32_i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f32_i128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f32_i128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for powi_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sin_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cos_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tan_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for asin_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for acos_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan2_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sinh_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cosh_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tanh_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pow_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log10_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log2_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp2_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llrint_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maximum_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minimum_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lround_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llround_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for roundeven_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_olt_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ole_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ogt_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oge_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oeq_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_one_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ult_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ule_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ugt_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_uge_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ueq_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_une_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_olt_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ole_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ogt_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oge_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oeq_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_one_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ult_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ule_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ugt_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_uge_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ueq_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_une_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for add_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for frem_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i32_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i32_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i64_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i64_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f64_i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f64_i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f64_i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f64_i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f64_i128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f64_i128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for powi_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sin_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cos_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tan_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for asin_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for acos_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan2_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sinh_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cosh_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tanh_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pow_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log10_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log2_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp2_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llrint_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maximum_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minimum_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lround_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llround_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for roundeven_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_olt_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ole_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ogt_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oge_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oeq_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_one_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ult_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ule_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ugt_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_uge_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ueq_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_une_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_olt_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ole_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ogt_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oge_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oeq_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_one_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ult_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ule_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ugt_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_uge_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ueq_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_une_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for add_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for frem_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i32_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i32_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i64_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i64_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f128_i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f128_i32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f128_i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f128_i64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f128_i128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f128_i128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for powi_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sin_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cos_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tan_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for asin_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for acos_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan2_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sinh_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cosh_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tanh_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pow_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log10_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log2_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp2_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llrint_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lround_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llround_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_olt_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ole_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ogt_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oge_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oeq_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_one_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ult_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ule_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ugt_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_uge_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ueq_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_une_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_olt_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ole_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ogt_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oge_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oeq_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_one_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ult_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ule_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ugt_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_uge_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ueq_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_une_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptrunc_f32_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptrunc_f32_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptrunc_f64_f128
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fpext_f64_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fpext_f128_f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fpext_f128_f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sin_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cos_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tan_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for asin_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for acos_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan2_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sinh_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cosh_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tanh_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pow_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log2_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log10_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp_v1f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp2_v1f64
+
 
 ; Single-precision intrinsics
 
-; CHECK-LABEL: add_f32:
-; CHECK: fadd s0, s0, s1
 define float @add_f32(float %x, float %y) #0 {
+; CHECK-LABEL: add_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: sub_f32:
-; CHECK: fsub s0, s0, s1
 define float @sub_f32(float %x, float %y) #0 {
+; CHECK-LABEL: sub_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsub s0, s0, s1
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.fsub.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: mul_f32:
-; CHECK: fmul s0, s0, s1
 define float @mul_f32(float %x, float %y) #0 {
+; CHECK-LABEL: mul_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmul s0, s0, s1
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.fmul.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: div_f32:
-; CHECK: fdiv s0, s0, s1
 define float @div_f32(float %x, float %y) #0 {
+; CHECK-LABEL: div_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fdiv s0, s0, s1
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: frem_f32:
-; CHECK: bl fmodf
 define float @frem_f32(float %x, float %y) #0 {
+; CHECK-LABEL: frem_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl fmodf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.frem.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: fma_f32:
-; CHECK: fmadd s0, s0, s1, s2
 define float @fma_f32(float %x, float %y, float %z) #0 {
+; CHECK-LABEL: fma_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd s0, s0, s1, s2
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.fma.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: fptosi_i32_f32:
-; CHECK: fcvtzs w0, s0
 define i32 @fptosi_i32_f32(float %x) #0 {
+; CHECK-LABEL: fptosi_i32_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs w0, s0
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: fptoui_i32_f32:
-; CHECK: fcvtzu w0, s0
 define i32 @fptoui_i32_f32(float %x) #0 {
+; CHECK-LABEL: fptoui_i32_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu w0, s0
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: fptosi_i64_f32:
-; CHECK: fcvtzs x0, s0
 define i64 @fptosi_i64_f32(float %x) #0 {
+; CHECK-LABEL: fptosi_i64_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs x0, s0
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: fptoui_i64_f32:
-; CHECK: fcvtzu x0, s0
 define i64 @fptoui_i64_f32(float %x) #0 {
+; CHECK-LABEL: fptoui_i64_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu x0, s0
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: sitofp_f32_i32:
-; CHECK: scvtf s0, w0
 define float @sitofp_f32_i32(i32 %x) #0 {
+; CHECK-LABEL: sitofp_f32_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    scvtf s0, w0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: uitofp_f32_i32:
-; CHECK: ucvtf s0, w0
 define float @uitofp_f32_i32(i32 %x) #0 {
+; CHECK-LABEL: uitofp_f32_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ucvtf s0, w0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: sitofp_f32_i64:
-; CHECK: scvtf s0, x0
 define float @sitofp_f32_i64(i64 %x) #0 {
+; CHECK-LABEL: sitofp_f32_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    scvtf s0, x0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: uitofp_f32_i64:
-; CHECK: ucvtf s0, x0
 define float @uitofp_f32_i64(i64 %x) #0 {
+; CHECK-LABEL: uitofp_f32_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ucvtf s0, x0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: sitofp_f32_i128:
-; CHECK: bl __floattisf
 define float @sitofp_f32_i128(i128 %x) #0 {
+; CHECK-LABEL: sitofp_f32_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __floattisf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.sitofp.f32.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: uitofp_f32_i128:
-; CHECK: bl __floatuntisf
 define float @uitofp_f32_i128(i128 %x) #0 {
+; CHECK-LABEL: uitofp_f32_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __floatuntisf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.uitofp.f32.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: sqrt_f32:
-; CHECK: fsqrt s0, s0
 define float @sqrt_f32(float %x) #0 {
+; CHECK-LABEL: sqrt_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsqrt s0, s0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.sqrt.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: powi_f32:
-; CHECK: bl __powisf2
 define float @powi_f32(float %x, i32 %y) #0 {
+; CHECK-LABEL: powi_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __powisf2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.powi.f32(float %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: sin_f32:
-; CHECK: bl sinf
 define float @sin_f32(float %x) #0 {
+; CHECK-LABEL: sin_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl sinf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.sin.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: cos_f32:
-; CHECK: bl cosf
 define float @cos_f32(float %x) #0 {
+; CHECK-LABEL: cos_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl cosf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.cos.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: tan_f32:
-; CHECK: bl tanf
 define float @tan_f32(float %x) #0 {
+; CHECK-LABEL: tan_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl tanf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.tan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: asin_f32:
-; CHECK: bl asinf
 define float @asin_f32(float %x) #0 {
+; CHECK-LABEL: asin_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl asinf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.asin.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: acos_f32:
-; CHECK: bl acosf
 define float @acos_f32(float %x) #0 {
+; CHECK-LABEL: acos_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl acosf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.acos.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: atan_f32:
-; CHECK: bl atanf
 define float @atan_f32(float %x) #0 {
+; CHECK-LABEL: atan_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl atanf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.atan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: atan2_f32:
-; CHECK: bl atan2f
 define float @atan2_f32(float %x, float %y) #0 {
+; CHECK-LABEL: atan2_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl atan2f
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.atan2.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: sinh_f32:
-; CHECK: bl sinhf
 define float @sinh_f32(float %x) #0 {
+; CHECK-LABEL: sinh_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl sinhf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.sinh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: cosh_f32:
-; CHECK: bl coshf
 define float @cosh_f32(float %x) #0 {
+; CHECK-LABEL: cosh_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl coshf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.cosh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: tanh_f32:
-; CHECK: bl tanhf
 define float @tanh_f32(float %x) #0 {
+; CHECK-LABEL: tanh_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl tanhf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.tanh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: pow_f32:
-; CHECK: bl powf
 define float @pow_f32(float %x, float %y) #0 {
+; CHECK-LABEL: pow_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl powf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.pow.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: log_f32:
-; CHECK: bl logf
 define float @log_f32(float %x) #0 {
+; CHECK-LABEL: log_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl logf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.log.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: log10_f32:
-; CHECK: bl log10f
 define float @log10_f32(float %x) #0 {
+; CHECK-LABEL: log10_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl log10f
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.log10.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: log2_f32:
-; CHECK: bl log2f
 define float @log2_f32(float %x) #0 {
+; CHECK-LABEL: log2_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl log2f
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.log2.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: exp_f32:
-; CHECK: bl expf
 define float @exp_f32(float %x) #0 {
+; CHECK-LABEL: exp_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl expf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.exp.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: exp2_f32:
-; CHECK: bl exp2f
 define float @exp2_f32(float %x) #0 {
+; CHECK-LABEL: exp2_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl exp2f
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.exp2.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: rint_f32:
-; CHECK: frintx s0, s0
 define float @rint_f32(float %x) #0 {
+; CHECK-LABEL: rint_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.rint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: nearbyint_f32:
-; CHECK: frinti s0, s0
 define float @nearbyint_f32(float %x) #0 {
+; CHECK-LABEL: nearbyint_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinti s0, s0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.nearbyint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: lrint_f32:
-; CHECK: frintx [[REG:s[0-9]+]], s0
-; CHECK: fcvtzs w0, [[REG]]
 define i32 @lrint_f32(float %x) #0 {
+; CHECK-LABEL: lrint_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    fcvtzs w0, s0
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: llrint_f32:
-; CHECK: frintx [[REG:s[0-9]+]], s0
-; CHECK: fcvtzs x0, [[REG]]
 define i64 @llrint_f32(float %x) #0 {
+; CHECK-LABEL: llrint_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    fcvtzs x0, s0
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: maxnum_f32:
-; CHECK: fmaxnm s0, s0, s1
 define float @maxnum_f32(float %x, float %y) #0 {
+; CHECK-LABEL: maxnum_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnm s0, s0, s1
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.maxnum.f32(float %x, float %y, metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: minnum_f32:
-; CHECK: fminnm s0, s0, s1
 define float @minnum_f32(float %x, float %y) #0 {
+; CHECK-LABEL: minnum_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminnm s0, s0, s1
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.minnum.f32(float %x, float %y, metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: maximum_f32:
-; CHECK: fmax s0, s0, s1
 define float @maximum_f32(float %x, float %y) #0 {
+; CHECK-LABEL: maximum_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmax s0, s0, s1
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.maximum.f32(float %x, float %y, metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: minimum_f32:
-; CHECK: fmin s0, s0, s1
 define float @minimum_f32(float %x, float %y) #0 {
+; CHECK-LABEL: minimum_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmin s0, s0, s1
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.minimum.f32(float %x, float %y, metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: ceil_f32:
-; CHECK: frintp s0, s0
 define float @ceil_f32(float %x) #0 {
+; CHECK-LABEL: ceil_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp s0, s0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.ceil.f32(float %x, metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: floor_f32:
-; CHECK: frintm s0, s0
 define float @floor_f32(float %x) #0 {
+; CHECK-LABEL: floor_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm s0, s0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.floor.f32(float %x, metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: lround_f32:
-; CHECK: fcvtas w0, s0
 define i32 @lround_f32(float %x) #0 {
+; CHECK-LABEL: lround_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas w0, s0
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: llround_f32:
-; CHECK: fcvtas x0, s0
 define i64 @llround_f32(float %x) #0 {
+; CHECK-LABEL: llround_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas x0, s0
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: round_f32:
-; CHECK: frinta s0, s0
 define float @round_f32(float %x) #0 {
+; CHECK-LABEL: round_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta s0, s0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.round.f32(float %x, metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: roundeven_f32:
-; CHECK: frintn s0, s0
 define float @roundeven_f32(float %x) #0 {
+; CHECK-LABEL: roundeven_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.roundeven.f32(float %x, metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: trunc_f32:
-; CHECK: frintz s0, s0
 define float @trunc_f32(float %x) #0 {
+; CHECK-LABEL: trunc_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz s0, s0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.trunc.f32(float %x, metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: fcmp_olt_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_olt_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_olt_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w0, mi
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ole_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_ole_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_ole_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w0, ls
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ogt_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_ogt_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_ogt_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_oge_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_oge_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_oge_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_oeq_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_oeq_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_oeq_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_one_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_one_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_one_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w8, mi
+; CHECK-NEXT:    csinc w0, w8, wzr, le
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ult_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_ult_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_ult_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ule_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_ule_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_ule_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w0, le
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ugt_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_ugt_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_ugt_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_uge_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_uge_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_uge_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w0, pl
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ueq_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_ueq_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_ueq_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    csinc w0, w8, wzr, vc
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_une_f32:
-; CHECK: fcmp s0, s1
 define i32 @fcmp_une_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmp_une_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_olt_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_olt_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_olt_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w0, mi
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ole_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_ole_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_ole_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w0, ls
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ogt_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_ogt_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_ogt_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_oge_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_oge_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_oge_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_oeq_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_oeq_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_oeq_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_one_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_one_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_one_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w8, mi
+; CHECK-NEXT:    csinc w0, w8, wzr, le
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ult_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_ult_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_ult_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ule_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_ule_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_ule_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w0, le
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ugt_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_ugt_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_ugt_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_uge_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_uge_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_uge_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w0, pl
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ueq_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_ueq_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_ueq_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    csinc w0, w8, wzr, vc
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_une_f32:
-; CHECK: fcmpe s0, s1
 define i32 @fcmps_une_f32(float %a, float %b) #0 {
+; CHECK-LABEL: fcmps_une_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe s0, s1
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
@@ -546,538 +1040,792 @@ define i32 @fcmps_une_f32(float %a, float %b) #0 {
 
 ; Double-precision intrinsics
 
-; CHECK-LABEL: add_f64:
-; CHECK: fadd d0, d0, d1
 define double @add_f64(double %x, double %y) #0 {
+; CHECK-LABEL: add_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fadd d0, d0, d1
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: sub_f64:
-; CHECK: fsub d0, d0, d1
 define double @sub_f64(double %x, double %y) #0 {
+; CHECK-LABEL: sub_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsub d0, d0, d1
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: mul_f64:
-; CHECK: fmul d0, d0, d1
 define double @mul_f64(double %x, double %y) #0 {
+; CHECK-LABEL: mul_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmul d0, d0, d1
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.fmul.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: div_f64:
-; CHECK: fdiv d0, d0, d1
 define double @div_f64(double %x, double %y) #0 {
+; CHECK-LABEL: div_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fdiv d0, d0, d1
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.fdiv.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: frem_f64:
-; CHECK: bl fmod
 define double @frem_f64(double %x, double %y) #0 {
+; CHECK-LABEL: frem_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl fmod
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.frem.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: fma_f64:
-; CHECK: fmadd d0, d0, d1, d2
 define double @fma_f64(double %x, double %y, double %z) #0 {
+; CHECK-LABEL: fma_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd d0, d0, d1, d2
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.fma.f64(double %x, double %y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: fptosi_i32_f64:
-; CHECK: fcvtzs w0, d0
 define i32 @fptosi_i32_f64(double %x) #0 {
+; CHECK-LABEL: fptosi_i32_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs w0, d0
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: fptoui_i32_f64:
-; CHECK: fcvtzu w0, d0
 define i32 @fptoui_i32_f64(double %x) #0 {
+; CHECK-LABEL: fptoui_i32_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu w0, d0
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: fptosi_i64_f64:
-; CHECK: fcvtzs x0, d0
 define i64 @fptosi_i64_f64(double %x) #0 {
+; CHECK-LABEL: fptosi_i64_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs x0, d0
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: fptoui_i64_f64:
-; CHECK: fcvtzu x0, d0
 define i64 @fptoui_i64_f64(double %x) #0 {
+; CHECK-LABEL: fptoui_i64_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu x0, d0
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: sitofp_f64_i32:
-; CHECK: scvtf d0, w0
 define double @sitofp_f64_i32(i32 %x) #0 {
+; CHECK-LABEL: sitofp_f64_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    scvtf d0, w0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: uitofp_f64_i32:
-; CHECK: ucvtf d0, w0
 define double @uitofp_f64_i32(i32 %x) #0 {
+; CHECK-LABEL: uitofp_f64_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ucvtf d0, w0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: sitofp_f64_i64:
-; CHECK: scvtf d0, x0
 define double @sitofp_f64_i64(i64 %x) #0 {
+; CHECK-LABEL: sitofp_f64_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    scvtf d0, x0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: uitofp_f64_i64:
-; CHECK: ucvtf d0, x0
 define double @uitofp_f64_i64(i64 %x) #0 {
+; CHECK-LABEL: uitofp_f64_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ucvtf d0, x0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: sitofp_f64_i128:
-; CHECK: bl __floattidf
 define double @sitofp_f64_i128(i128 %x) #0 {
+; CHECK-LABEL: sitofp_f64_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __floattidf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.sitofp.f64.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: uitofp_f64_i128:
-; CHECK: bl __floatuntidf
 define double @uitofp_f64_i128(i128 %x) #0 {
+; CHECK-LABEL: uitofp_f64_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __floatuntidf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.uitofp.f64.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: sqrt_f64:
-; CHECK: fsqrt d0, d0
 define double @sqrt_f64(double %x) #0 {
+; CHECK-LABEL: sqrt_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsqrt d0, d0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.sqrt.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: powi_f64:
-; CHECK: bl __powidf2
 define double @powi_f64(double %x, i32 %y) #0 {
+; CHECK-LABEL: powi_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __powidf2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.powi.f64(double %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: sin_f64:
-; CHECK: bl sin
 define double @sin_f64(double %x) #0 {
+; CHECK-LABEL: sin_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl sin
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.sin.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: cos_f64:
-; CHECK: bl cos
 define double @cos_f64(double %x) #0 {
+; CHECK-LABEL: cos_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl cos
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.cos.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: tan_f64:
-; CHECK: bl tan
 define double @tan_f64(double %x) #0 {
+; CHECK-LABEL: tan_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl tan
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.tan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: asin_f64:
-; CHECK: bl asin
 define double @asin_f64(double %x) #0 {
+; CHECK-LABEL: asin_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl asin
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.asin.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: acos_f64:
-; CHECK: bl acos
 define double @acos_f64(double %x) #0 {
+; CHECK-LABEL: acos_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl acos
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.acos.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: atan_f64:
-; CHECK: bl atan
 define double @atan_f64(double %x) #0 {
+; CHECK-LABEL: atan_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl atan
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.atan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: atan2_f64:
-; CHECK: bl atan2
 define double @atan2_f64(double %x, double %y) #0 {
+; CHECK-LABEL: atan2_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl atan2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.atan2.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: sinh_f64:
-; CHECK: bl sinh
 define double @sinh_f64(double %x) #0 {
+; CHECK-LABEL: sinh_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl sinh
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.sinh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: cosh_f64:
-; CHECK: bl cosh
 define double @cosh_f64(double %x) #0 {
+; CHECK-LABEL: cosh_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl cosh
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.cosh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: tanh_f64:
-; CHECK: bl tanh
 define double @tanh_f64(double %x) #0 {
+; CHECK-LABEL: tanh_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl tanh
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.tanh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: pow_f64:
-; CHECK: bl pow
 define double @pow_f64(double %x, double %y) #0 {
+; CHECK-LABEL: pow_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl pow
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.pow.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: log_f64:
-; CHECK: bl log
 define double @log_f64(double %x) #0 {
+; CHECK-LABEL: log_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl log
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.log.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: log10_f64:
-; CHECK: bl log10
 define double @log10_f64(double %x) #0 {
+; CHECK-LABEL: log10_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl log10
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.log10.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: log2_f64:
-; CHECK: bl log2
 define double @log2_f64(double %x) #0 {
+; CHECK-LABEL: log2_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl log2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.log2.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: exp_f64:
-; CHECK: bl exp
 define double @exp_f64(double %x) #0 {
+; CHECK-LABEL: exp_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl exp
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.exp.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: exp2_f64:
-; CHECK: bl exp2
 define double @exp2_f64(double %x) #0 {
+; CHECK-LABEL: exp2_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl exp2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.exp2.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: rint_f64:
-; CHECK: frintx d0, d0
 define double @rint_f64(double %x) #0 {
+; CHECK-LABEL: rint_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.rint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: nearbyint_f64:
-; CHECK: frinti d0, d0
 define double @nearbyint_f64(double %x) #0 {
+; CHECK-LABEL: nearbyint_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinti d0, d0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.nearbyint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: lrint_f64:
-; CHECK: frintx [[REG:d[0-9]+]], d0
-; CHECK: fcvtzs w0, [[REG]]
 define i32 @lrint_f64(double %x) #0 {
+; CHECK-LABEL: lrint_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs w0, d0
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: llrint_f64:
-; CHECK: frintx [[REG:d[0-9]+]], d0
-; CHECK: fcvtzs x0, [[REG]]
 define i64 @llrint_f64(double %x) #0 {
+; CHECK-LABEL: llrint_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs x0, d0
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: maxnum_f64:
-; CHECK: fmaxnm d0, d0, d1
 define double @maxnum_f64(double %x, double %y) #0 {
+; CHECK-LABEL: maxnum_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnm d0, d0, d1
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.maxnum.f64(double %x, double %y, metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: minnum_f64:
-; CHECK: fminnm d0, d0, d1
 define double @minnum_f64(double %x, double %y) #0 {
+; CHECK-LABEL: minnum_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminnm d0, d0, d1
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.minnum.f64(double %x, double %y, metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: maximum_f64:
-; CHECK: fmax d0, d0, d1
 define double @maximum_f64(double %x, double %y) #0 {
+; CHECK-LABEL: maximum_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmax d0, d0, d1
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.maximum.f64(double %x, double %y, metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: minimum_f64:
-; CHECK: fmin d0, d0, d1
 define double @minimum_f64(double %x, double %y) #0 {
+; CHECK-LABEL: minimum_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmin d0, d0, d1
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.minimum.f64(double %x, double %y, metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: ceil_f64:
-; CHECK: frintp d0, d0
 define double @ceil_f64(double %x) #0 {
+; CHECK-LABEL: ceil_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp d0, d0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.ceil.f64(double %x, metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: floor_f64:
-; CHECK: frintm d0, d0
 define double @floor_f64(double %x) #0 {
+; CHECK-LABEL: floor_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm d0, d0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.floor.f64(double %x, metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: lround_f64:
-; CHECK: fcvtas w0, d0
 define i32 @lround_f64(double %x) #0 {
+; CHECK-LABEL: lround_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas w0, d0
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: llround_f64:
-; CHECK: fcvtas x0, d0
 define i64 @llround_f64(double %x) #0 {
+; CHECK-LABEL: llround_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas x0, d0
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: round_f64:
-; CHECK: frinta d0, d0
 define double @round_f64(double %x) #0 {
+; CHECK-LABEL: round_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta d0, d0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.round.f64(double %x, metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: roundeven_f64:
-; CHECK: frintn d0, d0
 define double @roundeven_f64(double %x) #0 {
+; CHECK-LABEL: roundeven_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.roundeven.f64(double %x, metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: trunc_f64:
-; CHECK: frintz d0, d0
 define double @trunc_f64(double %x) #0 {
+; CHECK-LABEL: trunc_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz d0, d0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.trunc.f64(double %x, metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: fcmp_olt_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_olt_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_olt_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w0, mi
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ole_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_ole_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_ole_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w0, ls
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ogt_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_ogt_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_ogt_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_oge_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_oge_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_oge_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_oeq_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_oeq_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_oeq_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_one_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_one_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_one_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w8, mi
+; CHECK-NEXT:    csinc w0, w8, wzr, le
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ult_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_ult_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_ult_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ule_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_ule_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_ule_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w0, le
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ugt_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_ugt_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_ugt_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_uge_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_uge_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_uge_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w0, pl
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ueq_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_ueq_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_ueq_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    csinc w0, w8, wzr, vc
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_une_f64:
-; CHECK: fcmp d0, d1
 define i32 @fcmp_une_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmp_une_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_olt_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_olt_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_olt_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w0, mi
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ole_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_ole_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_ole_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w0, ls
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ogt_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_ogt_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_ogt_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_oge_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_oge_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_oge_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_oeq_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_oeq_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_oeq_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_one_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_one_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_one_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w8, mi
+; CHECK-NEXT:    csinc w0, w8, wzr, le
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ult_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_ult_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_ult_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ule_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_ule_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_ule_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w0, le
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ugt_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_ugt_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_ugt_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_uge_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_uge_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_uge_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w0, pl
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ueq_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_ueq_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_ueq_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    csinc w0, w8, wzr, vc
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_une_f64:
-; CHECK: fcmpe d0, d1
 define i32 @fcmps_une_f64(double %a, double %b) #0 {
+; CHECK-LABEL: fcmps_une_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmpe d0, d1
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
@@ -1086,515 +1834,1015 @@ define i32 @fcmps_une_f64(double %a, double %b) #0 {
 
 ; Long-double-precision intrinsics
 
-; CHECK-LABEL: add_f128:
-; CHECK: bl __addtf3
 define fp128 @add_f128(fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: add_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __addtf3
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.fadd.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: sub_f128:
-; CHECK: bl __subtf3
 define fp128 @sub_f128(fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: sub_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __subtf3
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.fsub.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: mul_f128:
-; CHECK: bl __multf3
 define fp128 @mul_f128(fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: mul_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __multf3
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.fmul.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: div_f128:
-; CHECK: bl __divtf3
 define fp128 @div_f128(fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: div_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __divtf3
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.fdiv.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: frem_f128:
-; CHECK: bl fmodl
 define fp128 @frem_f128(fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: frem_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl fmodl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.frem.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: fma_f128:
-; CHECK: fmal
 define fp128 @fma_f128(fp128 %x, fp128 %y, fp128 %z) #0 {
+; CHECK-LABEL: fma_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl fmal
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.fma.f128(fp128 %x, fp128 %y, fp128 %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: fptosi_i32_f128:
-; CHECK: bl __fixtfsi
 define i32 @fptosi_i32_f128(fp128 %x) #0 {
+; CHECK-LABEL: fptosi_i32_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __fixtfsi
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: fptoui_i32_f128:
-; CHECK: bl __fixunstfsi
 define i32 @fptoui_i32_f128(fp128 %x) #0 {
+; CHECK-LABEL: fptoui_i32_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __fixunstfsi
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: fptosi_i64_f128:
-; CHECK: bl __fixtfdi
 define i64 @fptosi_i64_f128(fp128 %x) #0 {
+; CHECK-LABEL: fptosi_i64_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __fixtfdi
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: fptoui_i64_f128:
-; CHECK: bl __fixunstfdi
 define i64 @fptoui_i64_f128(fp128 %x) #0 {
+; CHECK-LABEL: fptoui_i64_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __fixunstfdi
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: sitofp_f128_i32:
-; CHECK: bl __floatsitf
 define fp128 @sitofp_f128_i32(i32 %x) #0 {
+; CHECK-LABEL: sitofp_f128_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __floatsitf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: uitofp_f128_i32:
-; CHECK: bl __floatunsitf
 define fp128 @uitofp_f128_i32(i32 %x) #0 {
+; CHECK-LABEL: uitofp_f128_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __floatunsitf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: sitofp_f128_i64:
-; CHECK: bl __floatditf
 define fp128 @sitofp_f128_i64(i64 %x) #0 {
+; CHECK-LABEL: sitofp_f128_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __floatditf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: uitofp_f128_i64:
-; CHECK: bl __floatunditf
 define fp128 @uitofp_f128_i64(i64 %x) #0 {
+; CHECK-LABEL: uitofp_f128_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __floatunditf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: sitofp_f128_i128:
-; CHECK: bl __floattitf
 define fp128 @sitofp_f128_i128(i128 %x) #0 {
+; CHECK-LABEL: sitofp_f128_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __floattitf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.sitofp.f128.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: uitofp_f128_i128:
-; CHECK: bl __floatuntitf
 define fp128 @uitofp_f128_i128(i128 %x) #0 {
+; CHECK-LABEL: uitofp_f128_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __floatuntitf
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.uitofp.f128.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: sqrt_f128:
-; CHECK: bl sqrtl
 define fp128 @sqrt_f128(fp128 %x) #0 {
+; CHECK-LABEL: sqrt_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl sqrtl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.sqrt.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: powi_f128:
-; CHECK: bl __powitf2
 define fp128 @powi_f128(fp128 %x, i32 %y) #0 {
+; CHECK-LABEL: powi_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __powitf2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.powi.f128(fp128 %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: sin_f128:
-; CHECK: bl sinl
 define fp128 @sin_f128(fp128 %x) #0 {
+; CHECK-LABEL: sin_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl sinl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.sin.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: cos_f128:
-; CHECK: bl cosl
 define fp128 @cos_f128(fp128 %x) #0 {
+; CHECK-LABEL: cos_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl cosl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.cos.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: tan_f128:
-; CHECK: bl tanl
 define fp128 @tan_f128(fp128 %x) #0 {
+; CHECK-LABEL: tan_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl tanl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.tan.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: asin_f128:
-; CHECK: bl asinl
 define fp128 @asin_f128(fp128 %x) #0 {
+; CHECK-LABEL: asin_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl asinl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.asin.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: acos_f128:
-; CHECK: bl acosl
 define fp128 @acos_f128(fp128 %x) #0 {
+; CHECK-LABEL: acos_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl acosl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.acos.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: atan_f128:
-; CHECK: bl atanl
 define fp128 @atan_f128(fp128 %x) #0 {
+; CHECK-LABEL: atan_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl atanl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.atan.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: atan2_f128:
-; CHECK: bl atan2l
 define fp128 @atan2_f128(fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: atan2_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl atan2l
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.atan2.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: sinh_f128:
-; CHECK: bl sinhl
 define fp128 @sinh_f128(fp128 %x) #0 {
+; CHECK-LABEL: sinh_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl sinhl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.sinh.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: cosh_f128:
-; CHECK: bl coshl
 define fp128 @cosh_f128(fp128 %x) #0 {
+; CHECK-LABEL: cosh_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl coshl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.cosh.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: tanh_f128:
-; CHECK: bl tanhl
 define fp128 @tanh_f128(fp128 %x) #0 {
+; CHECK-LABEL: tanh_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl tanhl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.tanh.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: pow_f128:
-; CHECK: bl powl
 define fp128 @pow_f128(fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: pow_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl powl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.pow.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: log_f128:
-; CHECK: bl logl
 define fp128 @log_f128(fp128 %x) #0 {
+; CHECK-LABEL: log_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl logl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.log.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: log10_f128:
-; CHECK: bl log10l
 define fp128 @log10_f128(fp128 %x) #0 {
+; CHECK-LABEL: log10_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl log10l
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.log10.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: log2_f128:
-; CHECK: bl log2l
 define fp128 @log2_f128(fp128 %x) #0 {
+; CHECK-LABEL: log2_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl log2l
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.log2.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: exp_f128:
-; CHECK: bl expl
 define fp128 @exp_f128(fp128 %x) #0 {
+; CHECK-LABEL: exp_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl expl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.exp.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: exp2_f128:
-; CHECK: bl exp2l
 define fp128 @exp2_f128(fp128 %x) #0 {
+; CHECK-LABEL: exp2_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl exp2l
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.exp2.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: rint_f128:
-; CHECK: bl rintl
 define fp128 @rint_f128(fp128 %x) #0 {
+; CHECK-LABEL: rint_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl rintl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.rint.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: nearbyint_f128:
-; CHECK: bl nearbyintl
 define fp128 @nearbyint_f128(fp128 %x) #0 {
+; CHECK-LABEL: nearbyint_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl nearbyintl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.nearbyint.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: lrint_f128:
-; CHECK: bl lrintl
 define i32 @lrint_f128(fp128 %x) #0 {
+; CHECK-LABEL: lrint_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl lrintl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.lrint.i32.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: llrint_f128:
-; CHECK: bl llrintl
 define i64 @llrint_f128(fp128 %x) #0 {
+; CHECK-LABEL: llrint_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl llrintl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: maxnum_f128:
-; CHECK: bl fmaxl
 define fp128 @maxnum_f128(fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: maxnum_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl fmaxl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %x, fp128 %y, metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: minnum_f128:
-; CHECK: bl fminl
 define fp128 @minnum_f128(fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: minnum_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl fminl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %x, fp128 %y, metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: ceil_f128:
-; CHECK: bl ceill
 define fp128 @ceil_f128(fp128 %x) #0 {
+; CHECK-LABEL: ceil_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl ceill
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.ceil.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: floor_f128:
-; CHECK: bl floorl
 define fp128 @floor_f128(fp128 %x) #0 {
+; CHECK-LABEL: floor_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl floorl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.floor.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: lround_f128:
-; CHECK: bl lroundl
 define i32 @lround_f128(fp128 %x) #0 {
+; CHECK-LABEL: lround_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl lroundl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call i32 @llvm.experimental.constrained.lround.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
-; CHECK-LABEL: llround_f128:
-; CHECK: bl llroundl
 define i64 @llround_f128(fp128 %x) #0 {
+; CHECK-LABEL: llround_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl llroundl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
-; CHECK-LABEL: round_f128:
-; CHECK: bl roundl
 define fp128 @round_f128(fp128 %x) #0 {
+; CHECK-LABEL: round_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl roundl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.round.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: trunc_f128:
-; CHECK: bl truncl
 define fp128 @trunc_f128(fp128 %x) #0 {
+; CHECK-LABEL: trunc_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl truncl
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: fcmp_olt_f128:
-; CHECK: bl __lttf2
 define i32 @fcmp_olt_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_olt_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ole_f128:
-; CHECK: bl __letf2
 define i32 @fcmp_ole_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_ole_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __letf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, le
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ole", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ogt_f128:
-; CHECK: bl __gttf2
 define i32 @fcmp_ogt_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_ogt_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __gttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ogt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_oge_f128:
-; CHECK: bl __getf2
 define i32 @fcmp_oge_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_oge_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __getf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_oeq_f128:
-; CHECK: bl __eqtf2
 define i32 @fcmp_oeq_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_oeq_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __eqtf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"oeq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_one_f128:
-; CHECK: bl __eqtf2
 define i32 @fcmp_one_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_one_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    stp x30, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
+; CHECK-NEXT:    bl __eqtf2
+; CHECK-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-NEXT:    mov w19, w0
+; CHECK-NEXT:    bl __unordtf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    ccmp w19, #0, #4, eq
+; CHECK-NEXT:    ldp x30, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"one", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ult_f128:
-; CHECK: bl __getf2
 define i32 @fcmp_ult_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_ult_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __getf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ule_f128:
-; CHECK: bl __gttf2
 define i32 @fcmp_ule_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_ule_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __gttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, le
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ule", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ugt_f128:
-; CHECK: bl __letf2
 define i32 @fcmp_ugt_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_ugt_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __letf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ugt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_uge_f128:
-; CHECK: bl __lttf2
 define i32 @fcmp_uge_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_uge_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_ueq_f128:
-; CHECK: bl __eqtf2
 define i32 @fcmp_ueq_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_ueq_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    stp x30, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
+; CHECK-NEXT:    bl __eqtf2
+; CHECK-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-NEXT:    mov w19, w0
+; CHECK-NEXT:    bl __unordtf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    ccmp w19, #0, #4, eq
+; CHECK-NEXT:    ldp x30, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ueq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmp_une_f128:
-; CHECK: bl __netf2
 define i32 @fcmp_une_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmp_une_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __netf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"une", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_olt_f128:
-; CHECK: bl __lttf2
 define i32 @fcmps_olt_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_olt_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ole_f128:
-; CHECK: bl __letf2
 define i32 @fcmps_ole_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_ole_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __letf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, le
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ole", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ogt_f128:
-; CHECK: bl __gttf2
 define i32 @fcmps_ogt_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_ogt_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __gttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ogt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_oge_f128:
-; CHECK: bl __getf2
 define i32 @fcmps_oge_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_oge_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __getf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_oeq_f128:
-; CHECK: bl __eqtf2
 define i32 @fcmps_oeq_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_oeq_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __eqtf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"oeq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_one_f128:
-; CHECK: bl __eqtf2
 define i32 @fcmps_one_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_one_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    stp x30, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
+; CHECK-NEXT:    bl __eqtf2
+; CHECK-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-NEXT:    mov w19, w0
+; CHECK-NEXT:    bl __unordtf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    ccmp w19, #0, #4, eq
+; CHECK-NEXT:    ldp x30, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"one", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ult_f128:
-; CHECK: bl __getf2
 define i32 @fcmps_ult_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_ult_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __getf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ule_f128:
-; CHECK: bl __gttf2
 define i32 @fcmps_ule_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_ule_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __gttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, le
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ule", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ugt_f128:
-; CHECK: bl __letf2
 define i32 @fcmps_ugt_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_ugt_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __letf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ugt", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_uge_f128:
-; CHECK: bl __lttf2
 define i32 @fcmps_uge_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_uge_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_ueq_f128:
-; CHECK: bl __eqtf2
 define i32 @fcmps_ueq_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_ueq_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    stp x30, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
+; CHECK-NEXT:    bl __eqtf2
+; CHECK-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-NEXT:    mov w19, w0
+; CHECK-NEXT:    bl __unordtf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    ccmp w19, #0, #4, eq
+; CHECK-NEXT:    ldp x30, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ueq", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: fcmps_une_f128:
-; CHECK: bl __netf2
 define i32 @fcmps_une_f128(fp128 %a, fp128 %b) #0 {
+; CHECK-LABEL: fcmps_une_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __netf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"une", metadata !"fpexcept.strict") #0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
@@ -1603,156 +2851,280 @@ define i32 @fcmps_une_f128(fp128 %a, fp128 %b) #0 {
 
 ; Intrinsics to convert between floating-point types
 
-; CHECK-LABEL: fptrunc_f32_f64:
-; CHECK: fcvt s0, d0
 define float @fptrunc_f32_f64(double %x) #0 {
+; CHECK-LABEL: fptrunc_f32_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvt s0, d0
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: fptrunc_f32_f128:
-; CHECK: bl __trunctfsf2
 define float @fptrunc_f32_f128(fp128 %x) #0 {
+; CHECK-LABEL: fptrunc_f32_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __trunctfsf2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call float @llvm.experimental.constrained.fptrunc.f32.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %val
 }
 
-; CHECK-LABEL: fptrunc_f64_f128:
-; CHECK: bl __trunctfdf2
 define double @fptrunc_f64_f128(fp128 %x) #0 {
+; CHECK-LABEL: fptrunc_f64_f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __trunctfdf2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.fptrunc.f64.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: fpext_f64_f32:
-; CHECK: fcvt d0, s0
 define double @fpext_f64_f32(float %x) #0 {
+; CHECK-LABEL: fpext_f64_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    ret
   %val = call double @llvm.experimental.constrained.fpext.f64.f32(float %x, metadata !"fpexcept.strict") #0
   ret double %val
 }
 
-; CHECK-LABEL: fpext_f128_f32:
-; CHECK: bl __extendsftf2
 define fp128 @fpext_f128_f32(float %x) #0 {
+; CHECK-LABEL: fpext_f128_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __extendsftf2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %x, metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: fpext_f128_f64:
-; CHECK: bl __extenddftf2
 define fp128 @fpext_f128_f64(double %x) #0 {
+; CHECK-LABEL: fpext_f128_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl __extenddftf2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %x, metadata !"fpexcept.strict") #0
   ret fp128 %val
 }
 
-; CHECK-LABEL: sin_v1f64:
-; CHECK: bl sin
 define <1 x double> @sin_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: sin_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl sin
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.sin.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: cos_v1f64:
-; CHECK: bl cos
 define <1 x double> @cos_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: cos_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl cos
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.cos.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: tan_v1f64:
-; CHECK: bl tan
 define <1 x double> @tan_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: tan_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl tan
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.tan.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: asin_v1f64:
-; CHECK: bl asin
 define <1 x double> @asin_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: asin_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl asin
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.asin.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: acos_v1f64:
-; CHECK: bl acos
 define <1 x double> @acos_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: acos_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl acos
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.acos.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: atan_v1f64:
-; CHECK: bl atan
 define <1 x double> @atan_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: atan_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl atan
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.atan.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: atan2_v1f64:
-; CHECK: bl atan2
 define <1 x double> @atan2_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: atan2_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl atan2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.atan2.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: sinh_v1f64:
-; CHECK: bl sinh
 define <1 x double> @sinh_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: sinh_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl sinh
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.sinh.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: cosh_v1f64:
-; CHECK: bl cosh
 define <1 x double> @cosh_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: cosh_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl cosh
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.cosh.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: tanh_v1f64:
-; CHECK: bl tanh
 define <1 x double> @tanh_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: tanh_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl tanh
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.tanh.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: pow_v1f64:
-; CHECK: bl pow
 define <1 x double> @pow_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: pow_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl pow
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.pow.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: log_v1f64:
-; CHECK: bl log
 define <1 x double> @log_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: log_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl log
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.log.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: log2_v1f64:
-; CHECK: bl log2
 define <1 x double> @log2_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: log2_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl log2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.log2.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: log10_v1f64:
-; CHECK: bl log10
 define <1 x double> @log10_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: log10_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl log10
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.log10.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: exp_v1f64:
-; CHECK: bl exp
 define <1 x double> @exp_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: exp_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl exp
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.exp.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
 
-; CHECK-LABEL: exp2_v1f64:
-; CHECK: bl exp2
 define <1 x double> @exp2_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: exp2_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl exp2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = call <1 x double> @llvm.experimental.constrained.exp2.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1918,3 +3290,7 @@ declare double @llvm.experimental.constrained.fptrunc.f64.f128(fp128, metadata,
 declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
 declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata)
 declare fp128 @llvm.experimental.constrained.fpext.f128.f64(double, metadata)
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-SD: {{.*}}

diff  --git a/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll
index 17c87a5dae4199..bfb5c67801e6c2 100644
--- a/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll
+++ b/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-CVT
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc < %s -mtriple=aarch64 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-CVT
-; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc < %s -mtriple=aarch64 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-CVT
+; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
 
 ;
 ; 32-bit float to signed integer

diff  --git a/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll
index 3c19fca4a22aef..0dea7be5052d03 100644
--- a/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll
+++ b/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-CVT
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc < %s -mtriple=aarch64 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-CVT
-; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc < %s -mtriple=aarch64 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-CVT
+; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
 
 ;
 ; 32-bit float to unsigned integer

diff  --git a/llvm/test/CodeGen/AArch64/funnel-shift.ll b/llvm/test/CodeGen/AArch64/funnel-shift.ll
index 20a6dd0899b40a..3037a9552bc27e 100644
--- a/llvm/test/CodeGen/AArch64/funnel-shift.ll
+++ b/llvm/test/CodeGen/AArch64/funnel-shift.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i8 @llvm.fshl.i8(i8, i8, i8)
 declare i16 @llvm.fshl.i16(i16, i16, i16)

diff  --git a/llvm/test/CodeGen/AArch64/itofp-bf16.ll b/llvm/test/CodeGen/AArch64/itofp-bf16.ll
index 978fe0b5ba3b3c..58591b11c184fb 100644
--- a/llvm/test/CodeGen/AArch64/itofp-bf16.ll
+++ b/llvm/test/CodeGen/AArch64/itofp-bf16.ll
@@ -4,6 +4,63 @@
 ; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
 ; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
 
+; CHECK-GI:       warning: Instruction selection used fallback path for stofp_i64_bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_i64_bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_i32_bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_i32_bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_i16_bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_i16_bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_i8_bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_i8_bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v2i64_v2bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v2i64_v2bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v3i64_v3bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v3i64_v3bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v4i64_v4bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v4i64_v4bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v8i64_v8bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v8i64_v8bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v16i64_v16bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v16i64_v16bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v32i64_v32bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v32i64_v32bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v2i32_v2bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v2i32_v2bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v3i32_v3bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v3i32_v3bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v4i32_v4bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v4i32_v4bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v8i32_v8bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v8i32_v8bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v16i32_v16bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v16i32_v16bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v32i32_v32bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v32i32_v32bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v2i16_v2bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v2i16_v2bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v3i16_v3bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v3i16_v3bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v4i16_v4bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v4i16_v4bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v8i16_v8bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v8i16_v8bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v16i16_v16bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v16i16_v16bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v32i16_v32bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v32i16_v32bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v2i8_v2bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v2i8_v2bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v3i8_v3bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v3i8_v3bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v4i8_v4bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v4i8_v4bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v8i8_v8bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v8i8_v8bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v16i8_v16bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v16i8_v16bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for stofp_v32i8_v32bf16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for utofp_v32i8_v32bf16
+
 define bfloat @stofp_i64_bf16(i64 %a) {
 ; CHECK-LABEL: stofp_i64_bf16:
 ; CHECK:       // %bb.0: // %entry

diff  --git a/llvm/test/CodeGen/AArch64/mingw-refptr.ll b/llvm/test/CodeGen/AArch64/mingw-refptr.ll
index 306bee9f85c42e..cc9fac0506ff52 100644
--- a/llvm/test/CodeGen/AArch64/mingw-refptr.ll
+++ b/llvm/test/CodeGen/AArch64/mingw-refptr.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=aarch64-w64-mingw32 | FileCheck %s
-; RUN: llc < %s -global-isel -global-isel-abort=2 -pass-remarks-missed=gisel* \
-; RUN:     -mtriple=aarch64-w64-mingw32 2>&1| FileCheck %s --check-prefixes=GISEL,FALLBACK
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=aarch64-w64-mingw32 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64-w64-mingw32 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 @var = external local_unnamed_addr global i32, align 4
 @dsolocalvar = external dso_local local_unnamed_addr global i32, align 4
@@ -10,10 +10,11 @@
 
 define dso_local i32 @getVar() {
 ; CHECK-LABEL: getVar:
-; CHECK:    adrp x8, .refptr.var
-; CHECK:    ldr  x8, [x8, :lo12:.refptr.var]
-; CHECK:    ldr  w0, [x8]
-; CHECK:    ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, .refptr.var
+; CHECK-NEXT:    ldr x8, [x8, :lo12:.refptr.var]
+; CHECK-NEXT:    ldr w0, [x8]
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, ptr @var, align 4
   ret i32 %0
@@ -21,9 +22,10 @@ entry:
 
 define dso_local i32 @getDsoLocalVar() {
 ; CHECK-LABEL: getDsoLocalVar:
-; CHECK:    adrp x8, dsolocalvar
-; CHECK:    ldr  w0, [x8, :lo12:dsolocalvar]
-; CHECK:    ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, dsolocalvar
+; CHECK-NEXT:    ldr w0, [x8, :lo12:dsolocalvar]
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, ptr @dsolocalvar, align 4
   ret i32 %0
@@ -31,9 +33,10 @@ entry:
 
 define dso_local i32 @getLocalVar() {
 ; CHECK-LABEL: getLocalVar:
-; CHECK:    adrp x8, localvar
-; CHECK:    ldr  w0, [x8, :lo12:localvar]
-; CHECK:    ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, localvar
+; CHECK-NEXT:    ldr w0, [x8, :lo12:localvar]
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, ptr @localvar, align 4
   ret i32 %0
@@ -41,9 +44,10 @@ entry:
 
 define dso_local i32 @getLocalCommon() {
 ; CHECK-LABEL: getLocalCommon:
-; CHECK:    adrp x8, localcommon
-; CHECK:    ldr  w0, [x8, :lo12:localcommon]
-; CHECK:    ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, localcommon
+; CHECK-NEXT:    ldr w0, [x8, :lo12:localcommon]
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, ptr @localcommon, align 4
   ret i32 %0
@@ -51,10 +55,11 @@ entry:
 
 define dso_local i32 @getExtVar() {
 ; CHECK-LABEL: getExtVar:
-; CHECK:    adrp x8, __imp_extvar
-; CHECK:    ldr  x8, [x8, :lo12:__imp_extvar]
-; CHECK:    ldr  w0, [x8]
-; CHECK:    ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, __imp_extvar
+; CHECK-NEXT:    ldr x8, [x8, :lo12:__imp_extvar]
+; CHECK-NEXT:    ldr w0, [x8]
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, ptr @extvar, align 4
   ret i32 %0
@@ -62,7 +67,8 @@ entry:
 
 define dso_local void @callFunc() {
 ; CHECK-LABEL: callFunc:
-; CHECK:    b otherFunc
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    b otherFunc
 entry:
   tail call void @otherFunc()
   ret void
@@ -70,16 +76,40 @@ entry:
 
 declare dso_local void @otherFunc()
 
-; FALLBACK-NOT: remark:{{.*}}sspFunc
 define dso_local void @sspFunc() #0 {
 ; CHECK-LABEL: sspFunc:
-; CHECK:    adrp x8, .refptr.__stack_chk_guard
-; CHECK:    ldr  x8, [x8, :lo12:.refptr.__stack_chk_guard]
-; CHECK:    ldr  x8, [x8]
-; GISEL-LABEL: sspFunc:
-; GISEL:    adrp x8, .refptr.__stack_chk_guard
-; GISEL:    ldr  x8, [x8, :lo12:.refptr.__stack_chk_guard]
-; GISEL:    ldr  x8, [x8]
+; CHECK:       .seh_proc sspFunc
+; CHECK-NEXT:  // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    .seh_stackalloc 32
+; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg x30, 16
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    adrp x8, .refptr.__stack_chk_guard
+; CHECK-NEXT:    add x0, sp, #7
+; CHECK-NEXT:    ldr x8, [x8, :lo12:.refptr.__stack_chk_guard]
+; CHECK-NEXT:    ldr x8, [x8]
+; CHECK-NEXT:    str x8, [sp, #8]
+; CHECK-NEXT:    bl ptrUser
+; CHECK-NEXT:    adrp x8, .refptr.__stack_chk_guard
+; CHECK-NEXT:    ldr x8, [x8, :lo12:.refptr.__stack_chk_guard]
+; CHECK-NEXT:    ldr x9, [sp, #8]
+; CHECK-NEXT:    ldr x8, [x8]
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    b.ne .LBB6_2
+; CHECK-NEXT:  // %bb.1: // %entry
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x30, 16
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    .seh_stackalloc 32
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB6_2: // %entry
+; CHECK-NEXT:    bl __stack_chk_fail
+; CHECK-NEXT:    brk #0x1
+; CHECK-NEXT:    .seh_endfunclet
+; CHECK-NEXT:    .seh_endproc
 entry:
   %c = alloca i8, align 1
   call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %c)
@@ -102,3 +132,7 @@ attributes #0 = { sspstrong }
 ; CHECK:        .globl  .refptr.var
 ; CHECK: .refptr.var:
 ; CHECK:        .xword  var
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-SD: {{.*}}

diff  --git a/llvm/test/CodeGen/AArch64/mulcmle.ll b/llvm/test/CodeGen/AArch64/mulcmle.ll
index 32bc5c5e63b3e1..5b9f438ed1d437 100644
--- a/llvm/test/CodeGen/AArch64/mulcmle.ll
+++ b/llvm/test/CodeGen/AArch64/mulcmle.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 %s -o - -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 %s -o - -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 define <1 x i64> @v1i64(<1 x i64> %a) {
 ; CHECK-SD-LABEL: v1i64:

diff  --git a/llvm/test/CodeGen/AArch64/overflow.ll b/llvm/test/CodeGen/AArch64/overflow.ll
index 977141f2b84f4f..489d46f8b0e727 100644
--- a/llvm/test/CodeGen/AArch64/overflow.ll
+++ b/llvm/test/CodeGen/AArch64/overflow.ll
@@ -1,7 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,SDAG
-; RUN: llc < %s -mtriple=arm64-eabi -global-isel -global-isel-abort=2 -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,GISEL
-
+; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=arm64-eabi -global-isel -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 define zeroext i1 @saddo1.i32.unused(i32 %v1, i32 %v2, ptr %res) {
 ; CHECK-LABEL: saddo1.i32.unused:
@@ -105,19 +104,19 @@ entry:
   ret i1 %obit
 }
 define zeroext i1 @saddo.add.i32(i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %res) {
-; SDAG-LABEL: saddo.add.i32:
-; SDAG:       // %bb.0: // %entry
-; SDAG-NEXT:    add w8, w4, #100
-; SDAG-NEXT:    subs w8, w8, #100
-; SDAG-NEXT:    cset w0, vs
-; SDAG-NEXT:    str w8, [x5]
-; SDAG-NEXT:    ret
+; CHECK-SD-LABEL: saddo.add.i32:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    add w8, w4, #100
+; CHECK-SD-NEXT:    subs w8, w8, #100
+; CHECK-SD-NEXT:    cset w0, vs
+; CHECK-SD-NEXT:    str w8, [x5]
+; CHECK-SD-NEXT:    ret
 ;
-; GISEL-LABEL: saddo.add.i32:
-; GISEL:       // %bb.0: // %entry
-; GISEL-NEXT:    mov w0, wzr
-; GISEL-NEXT:    str w4, [x5]
-; GISEL-NEXT:    ret
+; CHECK-GI-LABEL: saddo.add.i32:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    mov w0, wzr
+; CHECK-GI-NEXT:    str w4, [x5]
+; CHECK-GI-NEXT:    ret
 entry:
   %lhs = add nsw i32 %v5, 100
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %lhs, i32 -100)
@@ -128,20 +127,20 @@ entry:
 }
 
 define zeroext i1 @uaddo.add.i32(i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %res) {
-; SDAG-LABEL: uaddo.add.i32:
-; SDAG:       // %bb.0: // %entry
-; SDAG-NEXT:    add w8, w4, #5
-; SDAG-NEXT:    adds w8, w8, #5
-; SDAG-NEXT:    cset w0, hs
-; SDAG-NEXT:    str w8, [x5]
-; SDAG-NEXT:    ret
+; CHECK-SD-LABEL: uaddo.add.i32:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    add w8, w4, #5
+; CHECK-SD-NEXT:    adds w8, w8, #5
+; CHECK-SD-NEXT:    cset w0, hs
+; CHECK-SD-NEXT:    str w8, [x5]
+; CHECK-SD-NEXT:    ret
 ;
-; GISEL-LABEL: uaddo.add.i32:
-; GISEL:       // %bb.0: // %entry
-; GISEL-NEXT:    adds w8, w4, #10
-; GISEL-NEXT:    cset w0, hs
-; GISEL-NEXT:    str w8, [x5]
-; GISEL-NEXT:    ret
+; CHECK-GI-LABEL: uaddo.add.i32:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    adds w8, w4, #10
+; CHECK-GI-NEXT:    cset w0, hs
+; CHECK-GI-NEXT:    str w8, [x5]
+; CHECK-GI-NEXT:    ret
 entry:
   %lhs = add nuw i32 %v5, 5
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %lhs, i32 5)

diff  --git a/llvm/test/CodeGen/AArch64/phi.ll b/llvm/test/CodeGen/AArch64/phi.ll
index eeafbaffbcc695..55942d0e421bb9 100644
--- a/llvm/test/CodeGen/AArch64/phi.ll
+++ b/llvm/test/CodeGen/AArch64/phi.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=aarch64 -global-isel=0 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel=1 -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -global-isel=1 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 define i8 @ti8(i1 %c, ptr %p, i8 %a, i8 %b) {
 ; CHECK-SD-LABEL: ti8:

diff  --git a/llvm/test/CodeGen/AArch64/sadd_sat.ll b/llvm/test/CodeGen/AArch64/sadd_sat.ll
index cb52c17e2531c8..d07fcbc29806f4 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i4 @llvm.sadd.sat.i4(i4, i4)
 declare i8 @llvm.sadd.sat.i8(i8, i8)

diff  --git a/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll b/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll
index f6fb4dd5e4b417..4a0e49518517bf 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i4 @llvm.sadd.sat.i4(i4, i4)
 declare i8 @llvm.sadd.sat.i8(i8, i8)

diff  --git a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
index 29318bd28c45d4..6a4ab837fc4720 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
@@ -2,6 +2,10 @@
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
+; CHECK-GI:       warning: Instruction selection used fallback path for v16i4
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for v16i1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for v2i128
+
 declare <1 x i8> @llvm.sadd.sat.v1i8(<1 x i8>, <1 x i8>)
 declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>)
 declare <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8>, <4 x i8>)

diff  --git a/llvm/test/CodeGen/AArch64/sext.ll b/llvm/test/CodeGen/AArch64/sext.ll
index 3604db33d5c4b3..53fbb351954fcf 100644
--- a/llvm/test/CodeGen/AArch64/sext.ll
+++ b/llvm/test/CodeGen/AArch64/sext.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 define i16 @sext_i8_to_i16(i8 %a) {
 ; CHECK-LABEL: sext_i8_to_i16:

diff  --git a/llvm/test/CodeGen/AArch64/ssub_sat.ll b/llvm/test/CodeGen/AArch64/ssub_sat.ll
index cf201d628b7e1e..23550d3c41cc7d 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i4 @llvm.ssub.sat.i4(i4, i4)
 declare i8 @llvm.ssub.sat.i8(i8, i8)

diff  --git a/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll b/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll
index cabd580e20d504..f08629c15f26c6 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i4 @llvm.ssub.sat.i4(i4, i4)
 declare i8 @llvm.ssub.sat.i8(i8, i8)

diff  --git a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
index 30e2a70ace0722..86a503038766c6 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
@@ -2,6 +2,10 @@
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
+; CHECK-GI:       warning: Instruction selection used fallback path for v16i4
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for v16i1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for v2i128
+
 declare <1 x i8> @llvm.ssub.sat.v1i8(<1 x i8>, <1 x i8>)
 declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>)
 declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>)

diff  --git a/llvm/test/CodeGen/AArch64/uadd_sat.ll b/llvm/test/CodeGen/AArch64/uadd_sat.ll
index ccf46e8fce2e15..e9d22c7be52efe 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i4 @llvm.uadd.sat.i4(i4, i4)
 declare i8 @llvm.uadd.sat.i8(i8, i8)

diff  --git a/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll b/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll
index d29564029544c9..5c81e3f20277a7 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i4 @llvm.uadd.sat.i4(i4, i4)
 declare i8 @llvm.uadd.sat.i8(i8, i8)

diff  --git a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
index badd31c1c561c5..d4587c3439967a 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
@@ -2,6 +2,10 @@
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
+; CHECK-GI:       warning: Instruction selection used fallback path for v16i4
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for v16i1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for v2i128
+
 declare <1 x i8> @llvm.uadd.sat.v1i8(<1 x i8>, <1 x i8>)
 declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
 declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>)

diff  --git a/llvm/test/CodeGen/AArch64/usub_sat.ll b/llvm/test/CodeGen/AArch64/usub_sat.ll
index 160e7e6607cdc3..54d7fc5a63b115 100644
--- a/llvm/test/CodeGen/AArch64/usub_sat.ll
+++ b/llvm/test/CodeGen/AArch64/usub_sat.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i4 @llvm.usub.sat.i4(i4, i4)
 declare i8 @llvm.usub.sat.i8(i8, i8)

diff  --git a/llvm/test/CodeGen/AArch64/usub_sat_plus.ll b/llvm/test/CodeGen/AArch64/usub_sat_plus.ll
index a9932216dbe34c..2793aeb163c94d 100644
--- a/llvm/test/CodeGen/AArch64/usub_sat_plus.ll
+++ b/llvm/test/CodeGen/AArch64/usub_sat_plus.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i4 @llvm.usub.sat.i4(i4, i4)
 declare i8 @llvm.usub.sat.i8(i8, i8)

diff  --git a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
index 45418b5c648fa3..123f4280bd8ff2 100644
--- a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
@@ -2,6 +2,10 @@
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
+; CHECK-GI:       warning: Instruction selection used fallback path for v16i4
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for v16i1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for v2i128
+
 declare <1 x i8> @llvm.usub.sat.v1i8(<1 x i8>, <1 x i8>)
 declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>)
 declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>)

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
index d71aed2d17506b..809a6d6556a7be 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i1 @llvm.vector.reduce.umax.v1i1(<1 x i1> %a)
 declare i8 @llvm.vector.reduce.umax.v1i8(<1 x i8> %a)


        


More information about the llvm-commits mailing list