[llvm] 2b84784 - [NFC][Test] Add test coverage for IEEE Long Double on Power8

QingShan Zhang via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 15 19:46:05 PST 2020


Author: QingShan Zhang
Date: 2020-11-16T03:45:51Z
New Revision: 2b84784a258da4dfd4ef99fb337f0f8b8321c200

URL: https://github.com/llvm/llvm-project/commit/2b84784a258da4dfd4ef99fb337f0f8b8321c200
DIFF: https://github.com/llvm/llvm-project/commit/2b84784a258da4dfd4ef99fb337f0f8b8321c200.diff

LOG: [NFC][Test] Add test coverage for IEEE Long Double on Power8

Added: 
    

Modified: 
    llvm/test/CodeGen/PowerPC/f128-aggregates.ll
    llvm/test/CodeGen/PowerPC/f128-arith.ll
    llvm/test/CodeGen/PowerPC/f128-bitcast.ll
    llvm/test/CodeGen/PowerPC/f128-compare.ll
    llvm/test/CodeGen/PowerPC/f128-conv.ll
    llvm/test/CodeGen/PowerPC/f128-fma.ll
    llvm/test/CodeGen/PowerPC/f128-passByValue.ll
    llvm/test/CodeGen/PowerPC/f128-rounding.ll
    llvm/test/CodeGen/PowerPC/f128-truncateNconv.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/PowerPC/f128-aggregates.ll b/llvm/test/CodeGen/PowerPC/f128-aggregates.ll
index 58ed954bca54..2dacdabd2de9 100644
--- a/llvm/test/CodeGen/PowerPC/f128-aggregates.ll
+++ b/llvm/test/CodeGen/PowerPC/f128-aggregates.ll
@@ -5,6 +5,9 @@
 ; RUN: llc -relocation-model=pic -mcpu=pwr9 -mtriple=powerpc64-unknown-unknown \
 ; RUN:   -verify-machineinstrs -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s \
 ; RUN:   | FileCheck -check-prefix=CHECK-BE %s
+; RUN: llc -relocation-model=pic -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown \
+; RUN:   -verify-machineinstrs -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s \
+; RUN:   | FileCheck %s -check-prefix=CHECK-P8
 
 ; Testing homogeneous aggregates.
 
@@ -24,6 +27,13 @@ define fp128 @testArray_01(fp128* nocapture readonly %sa) {
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    lxv v2, 32(r3)
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testArray_01:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    ld r5, 32(r3)
+; CHECK-P8-NEXT:    ld r4, 40(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    blr
 
 entry:
   %arrayidx = getelementptr inbounds fp128, fp128* %sa, i64 2
@@ -46,6 +56,14 @@ define fp128 @testArray_02() {
 ; CHECK-BE-NEXT:    ld r3, .LC0 at toc@l(r3)
 ; CHECK-BE-NEXT:    lxv v2, 32(r3)
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testArray_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r3, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r3)
+; CHECK-P8-NEXT:    ld r3, 32(r4)
+; CHECK-P8-NEXT:    ld r4, 40(r4)
+; CHECK-P8-NEXT:    blr
 
 entry:
   %0 = load fp128, fp128* getelementptr inbounds ([3 x fp128], [3 x fp128]* @a1,
@@ -62,6 +80,10 @@ define fp128 @testStruct_01(fp128 inreg returned %a.coerce) {
 ; CHECK-BE-LABEL: testStruct_01:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testStruct_01:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    blr
 
 entry:
   ret fp128 %a.coerce
@@ -78,6 +100,12 @@ define fp128 @testStruct_02([8 x fp128] %a.coerce) {
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vmr v2, v9
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testStruct_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    ld r3, 144(r1)
+; CHECK-P8-NEXT:    ld r4, 152(r1)
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a.coerce.fca.7.extract = extractvalue [8 x fp128] %a.coerce, 7
@@ -113,6 +141,22 @@ define fp128 @testStruct_03(%struct.With9fp128params* byval nocapture readonly a
 ; CHECK-BE-NEXT:    std r9, 96(r1)
 ; CHECK-BE-NEXT:    std r10, 104(r1)
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testStruct_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    ld r11, 128(r1)
+; CHECK-P8-NEXT:    ld r12, 136(r1)
+; CHECK-P8-NEXT:    std r3, 32(r1)
+; CHECK-P8-NEXT:    std r4, 40(r1)
+; CHECK-P8-NEXT:    std r5, 48(r1)
+; CHECK-P8-NEXT:    std r6, 56(r1)
+; CHECK-P8-NEXT:    mr r3, r11
+; CHECK-P8-NEXT:    mr r4, r12
+; CHECK-P8-NEXT:    std r7, 64(r1)
+; CHECK-P8-NEXT:    std r8, 72(r1)
+; CHECK-P8-NEXT:    std r9, 80(r1)
+; CHECK-P8-NEXT:    std r10, 88(r1)
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a7 = getelementptr inbounds %struct.With9fp128params,
@@ -132,6 +176,12 @@ define fp128 @testStruct_04([8 x fp128] %a.coerce) {
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vmr v2, v5
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testStruct_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mr r4, r10
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a.coerce.fca.3.extract = extractvalue [8 x fp128] %a.coerce, 3
@@ -147,6 +197,10 @@ define fp128 @testHUnion_01([1 x fp128] %a.coerce) {
 ; CHECK-BE-LABEL: testHUnion_01:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testHUnion_01:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a.coerce.fca.0.extract = extractvalue [1 x fp128] %a.coerce, 0
@@ -162,6 +216,10 @@ define fp128 @testHUnion_02([3 x fp128] %a.coerce) {
 ; CHECK-BE-LABEL: testHUnion_02:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testHUnion_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a.coerce.fca.0.extract = extractvalue [3 x fp128] %a.coerce, 0
@@ -179,6 +237,12 @@ define fp128 @testHUnion_03([3 x fp128] %a.coerce) {
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vmr v2, v3
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testHUnion_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a.coerce.fca.1.extract = extractvalue [3 x fp128] %a.coerce, 1
@@ -196,6 +260,12 @@ define fp128 @testHUnion_04([3 x fp128] %a.coerce) {
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vmr v2, v4
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testHUnion_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mr r4, r8
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a.coerce.fca.2.extract = extractvalue [3 x fp128] %a.coerce, 2
@@ -218,6 +288,12 @@ define fp128 @testMixedAggregate([3 x i128] %a.coerce) {
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrdd v2, r8, r7
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testMixedAggregate:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mr r4, r8
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a.coerce.fca.2.extract = extractvalue [3 x i128] %a.coerce, 2
@@ -236,6 +312,12 @@ define fp128 @testMixedAggregate_02([4 x i128] %a.coerce) {
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrdd v2, r6, r5
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testMixedAggregate_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a.coerce.fca.1.extract = extractvalue [4 x i128] %a.coerce, 1
@@ -266,6 +348,50 @@ define fp128 @testMixedAggregate_03([4 x i128] %sa.coerce) {
 ; CHECK-BE-NEXT:    xscvsdqp v3, v3
 ; CHECK-BE-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testMixedAggregate_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    mr r30, r10
+; CHECK-P8-NEXT:    mr r29, r6
+; CHECK-P8-NEXT:    mr r28, r5
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r28
+; CHECK-P8-NEXT:    mr r4, r29
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r29, r3
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r28, r4
+; CHECK-P8-NEXT:    bl __floatdikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r29
+; CHECK-P8-NEXT:    mr r4, r28
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %sa.coerce.fca.0.extract = extractvalue [4 x i128] %sa.coerce, 0
   %sa.sroa.0.0.extract.trunc = trunc i128 %sa.coerce.fca.0.extract to i32
@@ -309,6 +435,20 @@ define fp128 @testNestedAggregate(%struct.MixedC* byval nocapture readonly align
 ; CHECK-BE-NEXT:    std r9, 96(r1)
 ; CHECK-BE-NEXT:    std r10, 104(r1)
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testNestedAggregate:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    std r3, 32(r1)
+; CHECK-P8-NEXT:    std r4, 40(r1)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    mr r4, r8
+; CHECK-P8-NEXT:    std r8, 72(r1)
+; CHECK-P8-NEXT:    std r7, 64(r1)
+; CHECK-P8-NEXT:    std r5, 48(r1)
+; CHECK-P8-NEXT:    std r6, 56(r1)
+; CHECK-P8-NEXT:    std r9, 80(r1)
+; CHECK-P8-NEXT:    std r10, 88(r1)
+; CHECK-P8-NEXT:    blr
 
 entry:
   %c = getelementptr inbounds %struct.MixedC, %struct.MixedC* %a, i64 0, i32 1, i32 1
@@ -327,6 +467,10 @@ define fp128 @testUnion_01([1 x i128] %a.coerce) {
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrdd v2, r4, r3
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testUnion_01:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a.coerce.fca.0.extract = extractvalue [1 x i128] %a.coerce, 0
@@ -345,6 +489,10 @@ define fp128 @testUnion_02([1 x i128] %a.coerce) {
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrdd v2, r4, r3
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testUnion_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a.coerce.fca.0.extract = extractvalue [1 x i128] %a.coerce, 0
@@ -363,6 +511,12 @@ define fp128 @testUnion_03([4 x i128] %a.coerce) {
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrdd v2, r8, r7
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: testUnion_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mr r4, r8
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    blr
 
 entry:
   %a.coerce.fca.2.extract = extractvalue [4 x i128] %a.coerce, 2
@@ -419,6 +573,45 @@ define fp128 @sum_float128(i32 signext %count, ...) {
 ; CHECK-BE-NEXT:    lxv v3, 16(r3)
 ; CHECK-BE-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: sum_float128:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    cmpwi r3, 1
+; CHECK-P8-NEXT:    std r4, 88(r1)
+; CHECK-P8-NEXT:    std r5, 96(r1)
+; CHECK-P8-NEXT:    std r6, 104(r1)
+; CHECK-P8-NEXT:    std r7, 112(r1)
+; CHECK-P8-NEXT:    std r8, 120(r1)
+; CHECK-P8-NEXT:    std r9, 128(r1)
+; CHECK-P8-NEXT:    std r10, 136(r1)
+; CHECK-P8-NEXT:    blt cr0, .LBB17_2
+; CHECK-P8-NEXT:  # %bb.1: # %if.end
+; CHECK-P8-NEXT:    ld r3, 88(r1)
+; CHECK-P8-NEXT:    ld r4, 96(r1)
+; CHECK-P8-NEXT:    li r5, 0
+; CHECK-P8-NEXT:    li r6, 0
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    ld r5, 104(r1)
+; CHECK-P8-NEXT:    ld r6, 112(r1)
+; CHECK-P8-NEXT:    addi r7, r1, 120
+; CHECK-P8-NEXT:    std r7, 40(r1)
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    b .LBB17_3
+; CHECK-P8-NEXT:  .LBB17_2:
+; CHECK-P8-NEXT:    li r3, 0
+; CHECK-P8-NEXT:    li r4, 0
+; CHECK-P8-NEXT:  .LBB17_3: # %cleanup
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %ap = alloca i8*, align 8
   %0 = bitcast i8** %ap to i8*

diff  --git a/llvm/test/CodeGen/PowerPC/f128-arith.ll b/llvm/test/CodeGen/PowerPC/f128-arith.ll
index 40b123bb9276..a3716a518c8c 100644
--- a/llvm/test/CodeGen/PowerPC/f128-arith.ll
+++ b/llvm/test/CodeGen/PowerPC/f128-arith.ll
@@ -1,63 +1,182 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \
 ; RUN:   -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \
+; RUN:   -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s \
+; RUN:   -check-prefix=CHECK-P8
 
 ; Function Attrs: norecurse nounwind
 define void @qpAdd(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qpAdd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsaddqp v2, v2, v2
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpAdd:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r6, 8(r3)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %add = fadd fp128 %0, %0
   store fp128 %add, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpAdd
-; CHECK-NOT: bl __addtf3
-; CHECK: xsaddqp
-; CHECK: stxv
-; CHECK: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpSub(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qpSub:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xssubqp v2, v2, v2
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpSub:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r6, 8(r3)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    bl __subkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %sub = fsub fp128 %0, %0
   store fp128 %sub, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpSub
-; CHECK-NOT: bl __subtf3
-; CHECK: xssubqp
-; CHECK: stxv
-; CHECK: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpMul(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qpMul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsmulqp v2, v2, v2
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpMul:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r6, 8(r3)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    bl __mulkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %mul = fmul fp128 %0, %0
   store fp128 %mul, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpMul
-; CHECK-NOT: bl __multf3
-; CHECK: xsmulqp
-; CHECK: stxv
-; CHECK: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpDiv(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qpDiv:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsdivqp v2, v2, v2
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpDiv:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r6, 8(r3)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    bl __divkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %div = fdiv fp128 %0, %0
   store fp128 %div, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpDiv
-; CHECK-NOT: bl __divtf3
-; CHECK: xsdivqp
-; CHECK: stxv
-; CHECK: blr
 }
 
 define void @testLdNSt(i8* nocapture readonly %PtrC, fp128* nocapture %PtrF) {
+; CHECK-LABEL: testLdNSt:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi r3, r3, 4
+; CHECK-NEXT:    addi r4, r4, 8
+; CHECK-NEXT:    lxvx vs0, 0, r3
+; CHECK-NEXT:    stxvx vs0, 0, r4
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: testLdNSt:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addi r3, r3, 4
+; CHECK-P8-NEXT:    lxvd2x vs0, 0, r3
+; CHECK-P8-NEXT:    addi r3, r4, 8
+; CHECK-P8-NEXT:    stxvd2x vs0, 0, r3
+; CHECK-P8-NEXT:    blr
 entry:
   %add.ptr = getelementptr inbounds i8, i8* %PtrC, i64 4
   %0 = bitcast i8* %add.ptr to fp128*
@@ -67,28 +186,67 @@ entry:
   %3 = bitcast i8* %add.ptr1 to fp128*
   store fp128 %1, fp128* %3, align 16
   ret void
-; CHECK-LABEL: testLdNSt
-; CHECK: lxvx
-; CHECK: stxvx
-; CHECK-NEXT: blr
 }
 
 define void @qpSqrt(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qpSqrt:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xssqrtqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpSqrt:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r6, 8(r3)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    bl sqrtl
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.sqrt.f128(fp128 %0)
   store fp128 %1, fp128* %res, align 16
   ret void
 
-; CHECK-LABEL: qpSqrt
-; CHECK-NOT: bl sqrtl
-; CHECK: xssqrtqp
-; CHECK: stxv
-; CHECK: blr
 }
 declare fp128 @llvm.sqrt.f128(fp128 %Val)
 
 define void @qpCpsgn(fp128* nocapture readonly %a, fp128* nocapture readonly %b,
+; CHECK-LABEL: qpCpsgn:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    xscpsgnqp v2, v3, v2
+; CHECK-NEXT:    stxv v2, 0(r5)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpCpsgn:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    ld r4, 8(r4)
+; CHECK-P8-NEXT:    ld r6, 0(r3)
+; CHECK-P8-NEXT:    ld r3, 8(r3)
+; CHECK-P8-NEXT:    rotldi r4, r4, 1
+; CHECK-P8-NEXT:    rldimi r3, r4, 63, 0
+; CHECK-P8-NEXT:    std r6, 0(r5)
+; CHECK-P8-NEXT:    std r3, 8(r5)
+; CHECK-P8-NEXT:    blr
                      fp128* nocapture %res) {
 entry:
   %0 = load fp128, fp128* %a, align 16
@@ -97,30 +255,51 @@ entry:
   store fp128 %2, fp128* %res, align 16
   ret void
 
-; CHECK-LABEL: qpCpsgn
-; CHECK-NOT: rldimi
-; CHECK: xscpsgnqp
-; CHECK: stxv
-; CHECK: blr
 }
 declare fp128 @llvm.copysign.f128(fp128 %Mag, fp128 %Sgn)
 
 define void @qpAbs(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qpAbs:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsabsqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpAbs:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r3, 8(r3)
+; CHECK-P8-NEXT:    clrldi r3, r3, 1
+; CHECK-P8-NEXT:    std r5, 0(r4)
+; CHECK-P8-NEXT:    std r3, 8(r4)
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.fabs.f128(fp128 %0)
   store fp128 %1, fp128* %res, align 16
   ret void
 
-; CHECK-LABEL: qpAbs
-; CHECK-NOT: clrldi
-; CHECK: xsabsqp
-; CHECK: stxv
-; CHECK: blr
 }
 declare fp128 @llvm.fabs.f128(fp128 %Val)
 
 define void @qpNAbs(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qpNAbs:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsnabsqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpNAbs:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    ld r6, 0(r3)
+; CHECK-P8-NEXT:    ld r3, 8(r3)
+; CHECK-P8-NEXT:    li r5, -1
+; CHECK-P8-NEXT:    rldimi r3, r5, 63, 0
+; CHECK-P8-NEXT:    std r6, 0(r4)
+; CHECK-P8-NEXT:    std r3, 8(r4)
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.fabs.f128(fp128 %0)
@@ -128,32 +307,66 @@ entry:
   store fp128 %neg, fp128* %res, align 16
   ret void
 
-; CHECK-LABEL: qpNAbs
-; CHECK-NOT: bl __subtf3
-; CHECK: xsnabsqp
-; CHECK: stxv
-; CHECK: blr
 }
 
 define void @qpNeg(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qpNeg:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsnegqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpNeg:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    li r5, 1
+; CHECK-P8-NEXT:    ld r6, 0(r3)
+; CHECK-P8-NEXT:    ld r3, 8(r3)
+; CHECK-P8-NEXT:    sldi r5, r5, 63
+; CHECK-P8-NEXT:    xor r3, r3, r5
+; CHECK-P8-NEXT:    std r6, 0(r4)
+; CHECK-P8-NEXT:    std r3, 8(r4)
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %sub = fsub fp128 0xL00000000000000008000000000000000, %0
   store fp128 %sub, fp128* %res, align 16
   ret void
 
-; CHECK-LABEL: qpNeg
-; CHECK-NOT: bl __subtf3
-; CHECK: xsnegqp
-; CHECK: stxv
-; CHECK: blr
 }
 
 define fp128 @qp_sin(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qp_sin:
-; CHECK:         lxv v2, 0(r3)
-; CHECK:         bl sinf128
-; CHECK:         blr
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -32(r1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    bl sinf128
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addi r1, r1, 32
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_sin:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl sinf128
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.sin.f128(fp128 %0)
@@ -163,9 +376,36 @@ declare fp128 @llvm.sin.f128(fp128 %Val)
 
 define fp128 @qp_cos(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qp_cos:
-; CHECK:         lxv v2, 0(r3)
-; CHECK:         bl cosf128
-; CHECK:         blr
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -32(r1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    bl cosf128
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addi r1, r1, 32
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_cos:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl cosf128
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.cos.f128(fp128 %0)
@@ -175,9 +415,36 @@ declare fp128 @llvm.cos.f128(fp128 %Val)
 
 define fp128 @qp_log(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qp_log:
-; CHECK:         lxv v2, 0(r3)
-; CHECK:         bl logf128
-; CHECK:         blr
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -32(r1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    bl logf128
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addi r1, r1, 32
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_log:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl logf128
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.log.f128(fp128 %0)
@@ -187,9 +454,36 @@ declare fp128     @llvm.log.f128(fp128 %Val)
 
 define fp128 @qp_log10(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qp_log10:
-; CHECK:         lxv v2, 0(r3)
-; CHECK:         bl log10f128
-; CHECK:         blr
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -32(r1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    bl log10f128
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addi r1, r1, 32
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_log10:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl log10f128
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.log10.f128(fp128 %0)
@@ -199,9 +493,36 @@ declare fp128     @llvm.log10.f128(fp128 %Val)
 
 define fp128 @qp_log2(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qp_log2:
-; CHECK:         lxv v2, 0(r3)
-; CHECK:         bl log2f128
-; CHECK:         blr
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -32(r1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    bl log2f128
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addi r1, r1, 32
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_log2:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl log2f128
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.log2.f128(fp128 %0)
@@ -210,12 +531,42 @@ entry:
 declare fp128     @llvm.log2.f128(fp128 %Val)
 
 define fp128 @qp_minnum(fp128* nocapture readonly %a,
-                        fp128* nocapture readonly %b) {
 ; CHECK-LABEL: qp_minnum:
-; CHECK:         lxv v2, 0(r3)
-; CHECK:         lxv v3, 0(r4)
-; CHECK:         bl fminf128
-; CHECK:         blr
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -32(r1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    bl fminf128
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addi r1, r1, 32
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_minnum:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r8, 8(r3)
+; CHECK-P8-NEXT:    ld r5, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    mr r4, r8
+; CHECK-P8-NEXT:    bl fminf128
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
+                        fp128* nocapture readonly %b) {
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* %b, align 16
@@ -225,12 +576,42 @@ entry:
 declare fp128     @llvm.minnum.f128(fp128 %Val0, fp128 %Val1)
 
 define fp128 @qp_maxnum(fp128* nocapture readonly %a,
-                        fp128* nocapture readonly %b) {
 ; CHECK-LABEL: qp_maxnum:
-; CHECK:         lxv v2, 0(r3)
-; CHECK:         lxv v3, 0(r4)
-; CHECK:         bl fmaxf128
-; CHECK:         blr
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -32(r1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    bl fmaxf128
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addi r1, r1, 32
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_maxnum:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r8, 8(r3)
+; CHECK-P8-NEXT:    ld r5, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    mr r4, r8
+; CHECK-P8-NEXT:    bl fmaxf128
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
+                        fp128* nocapture readonly %b) {
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* %b, align 16
@@ -240,12 +621,42 @@ entry:
 declare fp128     @llvm.maxnum.f128(fp128 %Val0, fp128 %Val1)
 
 define fp128 @qp_pow(fp128* nocapture readonly %a,
-                     fp128* nocapture readonly %b) {
 ; CHECK-LABEL: qp_pow:
-; CHECK:         lxv v2, 0(r3)
-; CHECK:         lxv v3, 0(r4)
-; CHECK:         bl powf128
-; CHECK:         blr
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -32(r1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    bl powf128
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addi r1, r1, 32
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_pow:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r8, 8(r3)
+; CHECK-P8-NEXT:    ld r5, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    mr r4, r8
+; CHECK-P8-NEXT:    bl powf128
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
+                     fp128* nocapture readonly %b) {
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* %b, align 16
@@ -256,9 +667,36 @@ declare fp128 @llvm.pow.f128(fp128 %Val, fp128 %Power)
 
 define fp128 @qp_exp(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qp_exp:
-; CHECK:         lxv v2, 0(r3)
-; CHECK:         bl expf128
-; CHECK:         blr
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -32(r1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    bl expf128
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addi r1, r1, 32
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_exp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl expf128
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.exp.f128(fp128 %0)
@@ -268,9 +706,36 @@ declare fp128     @llvm.exp.f128(fp128 %Val)
 
 define fp128 @qp_exp2(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qp_exp2:
-; CHECK:         lxv v2, 0(r3)
-; CHECK:         bl exp2f128
-; CHECK:         blr
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -32(r1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    bl exp2f128
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addi r1, r1, 32
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_exp2:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl exp2f128
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.exp2.f128(fp128 %0)
@@ -279,12 +744,53 @@ entry:
 declare fp128     @llvm.exp2.f128(fp128 %Val)
 
 define void @qp_powi(fp128* nocapture readonly %a, i32* nocapture readonly %b,
-                     fp128* nocapture %res) {
 ; CHECK-LABEL: qp_powi:
-; CHECK:         lxv v2, 0(r3)
-; CHECK:         lwz r5, 0(r4)
-; CHECK:         bl __powikf2
-; CHECK:         blr
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    .cfi_offset r30, -16
+; CHECK-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -48(r1)
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    mr r30, r5
+; CHECK-NEXT:    lwz r5, 0(r4)
+; CHECK-NEXT:    bl __powikf2
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    stxv v2, 0(r30)
+; CHECK-NEXT:    addi r1, r1, 48
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_powi:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r6, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    lwz r8, 0(r4)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r3, r6
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __powikf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
+                     fp128* nocapture %res) {
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load i32, i32* %b, align 8
@@ -298,12 +804,50 @@ declare fp128 @llvm.powi.f128(fp128 %Val, i32 %power)
 @b = common global fp128 0xL00000000000000000000000000000000, align 16
 
 define fp128 @qp_frem() #0 {
+; CHECK-LABEL: qp_frem:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mflr r0
+; CHECK-NEXT:    std r0, 16(r1)
+; CHECK-NEXT:    stdu r1, -32(r1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset lr, 16
+; CHECK-NEXT:    addis r3, r2, a at toc@ha
+; CHECK-NEXT:    addi r3, r3, a at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b at toc@ha
+; CHECK-NEXT:    addi r3, r3, b at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    bl fmodf128
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addi r1, r1, 32
+; CHECK-NEXT:    ld r0, 16(r1)
+; CHECK-NEXT:    mtlr r0
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_frem:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, b at toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, a at toc@ha
+; CHECK-P8-NEXT:    addi r6, r5, a at toc@l
+; CHECK-P8-NEXT:    addi r7, r4, b at toc@l
+; CHECK-P8-NEXT:    ld r3, a at toc@l(r5)
+; CHECK-P8-NEXT:    ld r5, b at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r6)
+; CHECK-P8-NEXT:    ld r6, 8(r7)
+; CHECK-P8-NEXT:    bl fmodf128
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a, align 16
   %1 = load fp128, fp128* @b, align 16
   %rem = frem fp128 %0, %1
   ret fp128 %rem
-; CHECK-LABEL: qp_frem
-; CHECK: bl fmodf128
-; CHECK: blr
 }

diff  --git a/llvm/test/CodeGen/PowerPC/f128-bitcast.ll b/llvm/test/CodeGen/PowerPC/f128-bitcast.ll
index fca24f5fd541..01cbd1a42419 100644
--- a/llvm/test/CodeGen/PowerPC/f128-bitcast.ll
+++ b/llvm/test/CodeGen/PowerPC/f128-bitcast.ll
@@ -1,51 +1,89 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \
 ; RUN:   -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s
 ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64-unknown-unknown -verify-machineinstrs \
 ; RUN:   -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
 ; RUN:   FileCheck %s --check-prefix=CHECK-BE
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \
+; RUN:   -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s \
+; RUN:   -check-prefix=CHECK-P8
 
 ; Function Attrs: norecurse nounwind readnone
 define i64 @getPart1(fp128 %in) local_unnamed_addr {
+; CHECK-LABEL: getPart1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mfvsrld r3, v2
+; CHECK-NEXT:    blr
+;
+; CHECK-BE-LABEL: getPart1:
+; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    mfvsrld r3, v2
+; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: getPart1:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = bitcast fp128 %in to i128
   %a.sroa.0.0.extract.trunc = trunc i128 %0 to i64
   ret i64 %a.sroa.0.0.extract.trunc
-; CHECK-LABEL: getPart1
-; CHECK:       mfvsrld r3, v2
-; CHECK-NEXT:  blr
-; CHECK-BE-LABEL: getPart1
-; CHECK-BE:       mfvsrld r3, v2
-; CHECK-BE-NEXT:  blr
 }
 
 ; Function Attrs: norecurse nounwind readnone
 define i64 @getPart2(fp128 %in) local_unnamed_addr {
+; CHECK-LABEL: getPart2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mfvsrd r3, v2
+; CHECK-NEXT:    blr
+;
+; CHECK-BE-LABEL: getPart2:
+; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    mfvsrd r3, v2
+; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: getPart2:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = bitcast fp128 %in to i128
   %a.sroa.0.8.extract.shift = lshr i128 %0, 64
   %a.sroa.0.8.extract.trunc = trunc i128 %a.sroa.0.8.extract.shift to i64
   ret i64 %a.sroa.0.8.extract.trunc
-; CHECK-LABEL: getPart2
-; CHECK:       mfvsrd r3, v2
-; CHECK-NEXT:  blr
-; CHECK-BE-LABEL: getPart2
-; CHECK-BE:       mfvsrd r3, v2
-; CHECK-BE-NEXT:  blr
 }
 
 ; Function Attrs: norecurse nounwind readnone
 define i64 @checkBitcast(fp128 %in, <2 x i64> %in2, <2 x i64> *%out) local_unnamed_addr {
+; CHECK-LABEL: checkBitcast:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mfvsrld r3, v2
+; CHECK-NEXT:    vaddudm v2, v2, v3
+; CHECK-NEXT:    stxv v2, 0(r7)
+; CHECK-NEXT:    blr
+;
+; CHECK-BE-LABEL: checkBitcast:
+; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    mfvsrd r3, v2
+; CHECK-BE-NEXT:    vaddudm v2, v2, v3
+; CHECK-BE-NEXT:    stxv v2, 0(r7)
+; CHECK-BE-NEXT:    blr
+;
+; CHECK-P8-LABEL: checkBitcast:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mtfprd f0, r3
+; CHECK-P8-NEXT:    mtfprd f1, r4
+; CHECK-P8-NEXT:    xxmrghd v3, vs1, vs0
+; CHECK-P8-NEXT:    xxswapd vs0, v3
+; CHECK-P8-NEXT:    vaddudm v2, v3, v2
+; CHECK-P8-NEXT:    mffprd r3, f0
+; CHECK-P8-NEXT:    xxswapd vs0, v2
+; CHECK-P8-NEXT:    stxvd2x vs0, 0, r7
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = bitcast fp128 %in to <2 x i64>
   %1 = extractelement <2 x i64> %0, i64 0
   %2 = add <2 x i64> %0, %in2
   store <2 x i64> %2, <2 x i64> *%out, align 16
   ret i64 %1
-; CHECK-LABEL: checkBitcast
-; CHECK:       mfvsrld r3, v2
-; CHECK:       blr
-; CHECK-BE-LABEL: checkBitcast
-; CHECK-BE:       mfvsrd r3, v2
-; CHECK-BE:       blr
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/f128-compare.ll b/llvm/test/CodeGen/PowerPC/f128-compare.ll
index 5376b3b3f1c5..e8170b4cc6eb 100644
--- a/llvm/test/CodeGen/PowerPC/f128-compare.ll
+++ b/llvm/test/CodeGen/PowerPC/f128-compare.ll
@@ -1,83 +1,290 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \
 ; RUN:   -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \
+; RUN:   -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s \
+; RUN:   -check-prefix=CHECK-P8
 
 @a_qp = common global fp128 0xL00000000000000000000000000000000, align 16
 @b_qp = common global fp128 0xL00000000000000000000000000000000, align 16
 
 ; Function Attrs: noinline nounwind optnone
 define signext i32 @greater_qp() {
+; CHECK-LABEL: greater_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    li r4, 1
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    li r3, 0
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    iselgt r3, r4, r3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: greater_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    addi r6, r5, a_qp at toc@l
+; CHECK-P8-NEXT:    addi r7, r4, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r3, a_qp at toc@l(r5)
+; CHECK-P8-NEXT:    ld r5, b_qp at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r6)
+; CHECK-P8-NEXT:    ld r6, 8(r7)
+; CHECK-P8-NEXT:    bl __gtkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    neg r3, r3
+; CHECK-P8-NEXT:    rldicl r3, r3, 1, 63
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
   %cmp = fcmp ogt fp128 %0, %1
   %conv = zext i1 %cmp to i32
   ret i32 %conv
-; CHECK-LABEL: greater_qp
-; CHECK: xscmpuqp
-; CHECK: iselgt r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define signext i32 @less_qp() {
+; CHECK-LABEL: less_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    li r4, 1
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    li r3, 0
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    isellt r3, r4, r3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: less_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    addi r6, r5, a_qp at toc@l
+; CHECK-P8-NEXT:    addi r7, r4, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r3, a_qp at toc@l(r5)
+; CHECK-P8-NEXT:    ld r5, b_qp at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r6)
+; CHECK-P8-NEXT:    ld r6, 8(r7)
+; CHECK-P8-NEXT:    bl __ltkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    rlwinm r3, r3, 1, 31, 31
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
   %cmp = fcmp olt fp128 %0, %1
   %conv = zext i1 %cmp to i32
   ret i32 %conv
-; CHECK-LABEL: less_qp
-; CHECK: xscmpuqp
-; CHECK: isellt r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define signext i32 @greater_eq_qp() {
+; CHECK-LABEL: greater_eq_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    li r3, 1
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    cror 4*cr5+lt, un, lt
+; CHECK-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: greater_eq_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    addi r6, r5, a_qp at toc@l
+; CHECK-P8-NEXT:    addi r7, r4, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r3, a_qp at toc@l(r5)
+; CHECK-P8-NEXT:    ld r5, b_qp at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r6)
+; CHECK-P8-NEXT:    ld r6, 8(r7)
+; CHECK-P8-NEXT:    bl __gekf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    rlwinm r3, r3, 1, 31, 31
+; CHECK-P8-NEXT:    xori r3, r3, 1
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
   %cmp = fcmp oge fp128 %0, %1
   %conv = zext i1 %cmp to i32
   ret i32 %conv
-; CHECK-LABEL: greater_eq_qp
-; CHECK: xscmpuqp
-; CHECK: cror 4*cr[[REG:[0-9]+]]+lt, un, lt
-; CHECK: isel r{{[0-9]+}}, 0, r{{[0-9]+}}, 4*cr[[REG]]+lt
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define signext i32 @less_eq_qp() {
+; CHECK-LABEL: less_eq_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    li r3, 1
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    cror 4*cr5+lt, un, gt
+; CHECK-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: less_eq_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    addi r6, r5, a_qp at toc@l
+; CHECK-P8-NEXT:    addi r7, r4, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r3, a_qp at toc@l(r5)
+; CHECK-P8-NEXT:    ld r5, b_qp at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r6)
+; CHECK-P8-NEXT:    ld r6, 8(r7)
+; CHECK-P8-NEXT:    bl __lekf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    neg r3, r3
+; CHECK-P8-NEXT:    rldicl r3, r3, 1, 63
+; CHECK-P8-NEXT:    xori r3, r3, 1
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
   %cmp = fcmp ole fp128 %0, %1
   %conv = zext i1 %cmp to i32
   ret i32 %conv
-; CHECK-LABEL: less_eq_qp
-; CHECK: xscmpuqp
-; CHECK: cror 4*cr[[REG:[0-9]+]]+lt, un, gt
-; CHECK: isel r{{[0-9]+}}, 0, r{{[0-9]+}}, 4*cr[[REG]]+lt
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define signext i32 @equal_qp() {
+; CHECK-LABEL: equal_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    li r4, 1
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    li r3, 0
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    iseleq r3, r4, r3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: equal_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    addi r6, r5, a_qp at toc@l
+; CHECK-P8-NEXT:    addi r7, r4, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r3, a_qp at toc@l(r5)
+; CHECK-P8-NEXT:    ld r5, b_qp at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r6)
+; CHECK-P8-NEXT:    ld r6, 8(r7)
+; CHECK-P8-NEXT:    bl __eqkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    cntlzw r3, r3
+; CHECK-P8-NEXT:    srwi r3, r3, 5
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
   %cmp = fcmp oeq fp128 %0, %1
   %conv = zext i1 %cmp to i32
   ret i32 %conv
-; CHECK-LABEL: equal_qp
-; CHECK: xscmpuqp
-; CHECK: iseleq r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define signext i32 @not_greater_qp() {
+; CHECK-LABEL: not_greater_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    li r3, 1
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    iselgt r3, 0, r3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: not_greater_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    addi r6, r5, a_qp at toc@l
+; CHECK-P8-NEXT:    addi r7, r4, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r3, a_qp at toc@l(r5)
+; CHECK-P8-NEXT:    ld r5, b_qp at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r6)
+; CHECK-P8-NEXT:    ld r6, 8(r7)
+; CHECK-P8-NEXT:    bl __gtkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    neg r3, r3
+; CHECK-P8-NEXT:    rldicl r3, r3, 1, 63
+; CHECK-P8-NEXT:    xori r3, r3, 1
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
@@ -85,14 +292,46 @@ entry:
   %lnot = xor i1 %cmp, true
   %lnot.ext = zext i1 %lnot to i32
   ret i32 %lnot.ext
-; CHECK-LABEL: not_greater_qp
-; CHECK: xscmpuqp
-; CHECK: iselgt r{{[0-9]+}}, 0, r{{[0-9]+}}
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define signext i32 @not_less_qp() {
+; CHECK-LABEL: not_less_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    li r3, 1
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    isellt r3, 0, r3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: not_less_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    addi r6, r5, a_qp at toc@l
+; CHECK-P8-NEXT:    addi r7, r4, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r3, a_qp at toc@l(r5)
+; CHECK-P8-NEXT:    ld r5, b_qp at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r6)
+; CHECK-P8-NEXT:    ld r6, 8(r7)
+; CHECK-P8-NEXT:    bl __ltkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    rlwinm r3, r3, 1, 31, 31
+; CHECK-P8-NEXT:    xori r3, r3, 1
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
@@ -100,14 +339,46 @@ entry:
   %lnot = xor i1 %cmp, true
   %lnot.ext = zext i1 %lnot to i32
   ret i32 %lnot.ext
-; CHECK-LABEL: not_less_qp
-; CHECK: xscmpuqp
-; CHECK: isellt r{{[0-9]+}}, 0, r{{[0-9]+}}
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define signext i32 @not_greater_eq_qp() {
+; CHECK-LABEL: not_greater_eq_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    li r3, 1
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    crnor 4*cr5+lt, lt, un
+; CHECK-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: not_greater_eq_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    addi r6, r5, a_qp at toc@l
+; CHECK-P8-NEXT:    addi r7, r4, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r3, a_qp at toc@l(r5)
+; CHECK-P8-NEXT:    ld r5, b_qp at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r6)
+; CHECK-P8-NEXT:    ld r6, 8(r7)
+; CHECK-P8-NEXT:    bl __gekf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    rlwinm r3, r3, 1, 31, 31
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
@@ -115,15 +386,48 @@ entry:
   %lnot = xor i1 %cmp, true
   %lnot.ext = zext i1 %lnot to i32
   ret i32 %lnot.ext
-; CHECK-LABEL: not_greater_eq_qp
-; CHECK: xscmpuqp
-; CHECK: crnor 4*cr[[REG:[0-9]+]]+lt, lt, un
-; CHECK: isel r{{[0-9]+}}, 0, r{{[0-9]+}}, 4*cr[[REG]]+lt
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define signext i32 @not_less_eq_qp() {
+; CHECK-LABEL: not_less_eq_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    li r3, 1
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    crnor 4*cr5+lt, gt, un
+; CHECK-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: not_less_eq_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    addi r6, r5, a_qp at toc@l
+; CHECK-P8-NEXT:    addi r7, r4, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r3, a_qp at toc@l(r5)
+; CHECK-P8-NEXT:    ld r5, b_qp at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r6)
+; CHECK-P8-NEXT:    ld r6, 8(r7)
+; CHECK-P8-NEXT:    bl __lekf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    neg r3, r3
+; CHECK-P8-NEXT:    rldicl r3, r3, 1, 63
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
@@ -131,95 +435,373 @@ entry:
   %lnot = xor i1 %cmp, true
   %lnot.ext = zext i1 %lnot to i32
   ret i32 %lnot.ext
-; CHECK-LABEL: not_less_eq_qp
-; CHECK: xscmpuqp
-; CHECK: crnor 4*cr[[REG:[0-9]+]]+lt, gt, un
-; CHECK: isel r{{[0-9]+}}, 0, r{{[0-9]+}}, 4*cr[[REG]]+lt
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define signext i32 @not_equal_qp() {
+; CHECK-LABEL: not_equal_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    li r3, 1
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    iseleq r3, 0, r3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: not_equal_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    addi r6, r5, a_qp at toc@l
+; CHECK-P8-NEXT:    addi r7, r4, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r3, a_qp at toc@l(r5)
+; CHECK-P8-NEXT:    ld r5, b_qp at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r6)
+; CHECK-P8-NEXT:    ld r6, 8(r7)
+; CHECK-P8-NEXT:    bl __nekf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    cntlzw r3, r3
+; CHECK-P8-NEXT:    srwi r3, r3, 5
+; CHECK-P8-NEXT:    xori r3, r3, 1
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
   %cmp = fcmp une fp128 %0, %1
   %conv = zext i1 %cmp to i32
   ret i32 %conv
-; CHECK-LABEL: not_equal_qp
-; CHECK: xscmpuqp
-; CHECK: iseleq r{{[0-9]+}}, 0, r{{[0-9]+}}
-; CHECK: blr
 }
 
 ; Function Attrs: norecurse nounwind readonly
 define fp128 @greater_sel_qp() {
+; CHECK-LABEL: greater_sel_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    bgtlr cr0
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    vmr v2, v3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: greater_sel_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 80
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r27, -40
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -80(r1)
+; CHECK-P8-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r4, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    ld r30, a_qp at toc@l(r4)
+; CHECK-P8-NEXT:    addi r4, r4, a_qp at toc@l
+; CHECK-P8-NEXT:    ld r29, b_qp at toc@l(r3)
+; CHECK-P8-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r28, 8(r4)
+; CHECK-P8-NEXT:    ld r27, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r5, r29
+; CHECK-P8-NEXT:    mr r4, r28
+; CHECK-P8-NEXT:    mr r6, r27
+; CHECK-P8-NEXT:    bl __gtkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    cmpwi r3, 0
+; CHECK-P8-NEXT:    iselgt r3, r30, r29
+; CHECK-P8-NEXT:    iselgt r4, r28, r27
+; CHECK-P8-NEXT:    addi r1, r1, 80
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
   %cmp = fcmp ogt fp128 %0, %1
   %cond = select i1 %cmp, fp128 %0, fp128 %1
   ret fp128 %cond
-; CHECK-LABEL: greater_sel_qp
-; CHECK: xscmpuqp cr[[REG:[0-9]+]]
-; CHECK: bgtlr cr[[REG]]
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define fp128 @less_sel_qp() {
+; CHECK-LABEL: less_sel_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    bltlr cr0
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    vmr v2, v3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: less_sel_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 80
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r27, -40
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -80(r1)
+; CHECK-P8-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r4, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    ld r30, a_qp at toc@l(r4)
+; CHECK-P8-NEXT:    addi r4, r4, a_qp at toc@l
+; CHECK-P8-NEXT:    ld r29, b_qp at toc@l(r3)
+; CHECK-P8-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r28, 8(r4)
+; CHECK-P8-NEXT:    ld r27, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r5, r29
+; CHECK-P8-NEXT:    mr r4, r28
+; CHECK-P8-NEXT:    mr r6, r27
+; CHECK-P8-NEXT:    bl __ltkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    cmpwi r3, 0
+; CHECK-P8-NEXT:    isellt r3, r30, r29
+; CHECK-P8-NEXT:    isellt r4, r28, r27
+; CHECK-P8-NEXT:    addi r1, r1, 80
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
   %cmp = fcmp olt fp128 %0, %1
   %cond = select i1 %cmp, fp128 %0, fp128 %1
   ret fp128 %cond
-; CHECK-LABEL: less_sel_qp
-; CHECK: xscmpuqp cr[[REG:[0-9]+]]
-; CHECK: bltlr cr[[REG]]
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define fp128 @greater_eq_sel_qp() {
+; CHECK-LABEL: greater_eq_sel_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    crnor 4*cr5+lt, un, lt
+; CHECK-NEXT:    bclr 12, 4*cr5+lt, 0
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    vmr v2, v3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: greater_eq_sel_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 80
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r27, -40
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -80(r1)
+; CHECK-P8-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r4, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    ld r30, a_qp at toc@l(r4)
+; CHECK-P8-NEXT:    addi r4, r4, a_qp at toc@l
+; CHECK-P8-NEXT:    ld r29, b_qp at toc@l(r3)
+; CHECK-P8-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r28, 8(r4)
+; CHECK-P8-NEXT:    ld r27, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r5, r29
+; CHECK-P8-NEXT:    mr r4, r28
+; CHECK-P8-NEXT:    mr r6, r27
+; CHECK-P8-NEXT:    bl __gekf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    cmpwi r3, -1
+; CHECK-P8-NEXT:    iselgt r3, r30, r29
+; CHECK-P8-NEXT:    iselgt r4, r28, r27
+; CHECK-P8-NEXT:    addi r1, r1, 80
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
   %cmp = fcmp oge fp128 %0, %1
   %cond = select i1 %cmp, fp128 %0, fp128 %1
   ret fp128 %cond
-; CHECK-LABEL: greater_eq_sel_qp
-; CHECK: xscmpuqp
-; CHECK: crnor 4*cr[[REG:[0-9]+]]+lt, un, lt
-; CHECK: bclr {{[0-9]+}}, 4*cr[[REG]]+lt, 0
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define fp128 @less_eq_sel_qp() {
+; CHECK-LABEL: less_eq_sel_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    crnor 4*cr5+lt, un, gt
+; CHECK-NEXT:    bclr 12, 4*cr5+lt, 0
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    vmr v2, v3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: less_eq_sel_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 80
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r27, -40
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -80(r1)
+; CHECK-P8-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r4, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    ld r30, a_qp at toc@l(r4)
+; CHECK-P8-NEXT:    addi r4, r4, a_qp at toc@l
+; CHECK-P8-NEXT:    ld r29, b_qp at toc@l(r3)
+; CHECK-P8-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r28, 8(r4)
+; CHECK-P8-NEXT:    ld r27, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r5, r29
+; CHECK-P8-NEXT:    mr r4, r28
+; CHECK-P8-NEXT:    mr r6, r27
+; CHECK-P8-NEXT:    bl __lekf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    cmpwi r3, 1
+; CHECK-P8-NEXT:    isellt r3, r30, r29
+; CHECK-P8-NEXT:    isellt r4, r28, r27
+; CHECK-P8-NEXT:    addi r1, r1, 80
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
   %cmp = fcmp ole fp128 %0, %1
   %cond = select i1 %cmp, fp128 %0, fp128 %1
   ret fp128 %cond
-; CHECK-LABEL: less_eq_sel_qp
-; CHECK: xscmpuqp
-; CHECK: crnor 4*cr[[REG:[0-9]+]]+lt, un, gt
-; CHECK: bclr {{[0-9]+}}, 4*cr[[REG]]+lt, 0
-; CHECK: blr
 }
 
 ; Function Attrs: noinline nounwind optnone
 define fp128 @equal_sel_qp() {
+; CHECK-LABEL: equal_sel_qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, a_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, a_qp at toc@l
+; CHECK-NEXT:    lxvx v2, 0, r3
+; CHECK-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-NEXT:    lxvx v3, 0, r3
+; CHECK-NEXT:    xscmpuqp cr0, v2, v3
+; CHECK-NEXT:    beqlr cr0
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    vmr v2, v3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: equal_sel_qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 80
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r27, -40
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -80(r1)
+; CHECK-P8-NEXT:    addis r3, r2, b_qp at toc@ha
+; CHECK-P8-NEXT:    addis r4, r2, a_qp at toc@ha
+; CHECK-P8-NEXT:    ld r30, a_qp at toc@l(r4)
+; CHECK-P8-NEXT:    addi r4, r4, a_qp at toc@l
+; CHECK-P8-NEXT:    ld r29, b_qp at toc@l(r3)
+; CHECK-P8-NEXT:    addi r3, r3, b_qp at toc@l
+; CHECK-P8-NEXT:    ld r28, 8(r4)
+; CHECK-P8-NEXT:    ld r27, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r5, r29
+; CHECK-P8-NEXT:    mr r4, r28
+; CHECK-P8-NEXT:    mr r6, r27
+; CHECK-P8-NEXT:    bl __eqkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    cmplwi r3, 0
+; CHECK-P8-NEXT:    iseleq r3, r30, r29
+; CHECK-P8-NEXT:    iseleq r4, r28, r27
+; CHECK-P8-NEXT:    addi r1, r1, 80
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @a_qp, align 16
   %1 = load fp128, fp128* @b_qp, align 16
   %cmp = fcmp oeq fp128 %0, %1
   %cond = select i1 %cmp, fp128 %0, fp128 %1
   ret fp128 %cond
-; CHECK-LABEL: equal_sel_qp
-; CHECK: xscmpuqp cr[[REG:[0-9]+]]
-; CHECK: beqlr cr[[REG]]
-; CHECK: blr
 }

diff  --git a/llvm/test/CodeGen/PowerPC/f128-conv.ll b/llvm/test/CodeGen/PowerPC/f128-conv.ll
index 7f0c13a23ffc..f8c27cf109a7 100644
--- a/llvm/test/CodeGen/PowerPC/f128-conv.ll
+++ b/llvm/test/CodeGen/PowerPC/f128-conv.ll
@@ -1,6 +1,10 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -relocation-model=pic -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   -ppc-vsr-nums-as-vr -verify-machineinstrs -ppc-asm-full-reg-names < %s \
 ; RUN:   | FileCheck %s
+; RUN: llc -relocation-model=pic -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown \
+; RUN:   -ppc-vsr-nums-as-vr -verify-machineinstrs -ppc-asm-full-reg-names < %s \
+; RUN:   | FileCheck %s -check-prefix=CHECK-P8
 
 @mem = global [5 x i64] [i64 56, i64 63, i64 3, i64 5, i64 6], align 8
 @umem = global [5 x i64] [i64 560, i64 100, i64 34, i64 2, i64 5], align 8
@@ -11,68 +15,191 @@
 
 ; Function Attrs: norecurse nounwind
 define void @sdwConv2qp(fp128* nocapture %a, i64 %b) {
+; CHECK-LABEL: sdwConv2qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mtvsrd v2, r4
+; CHECK-NEXT:    xscvsdqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: sdwConv2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatdikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i64 %b to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: sdwConv2qp
-; CHECK: mtvsrd v[[REG:[0-9]+]], r4
-; CHECK-NEXT: xscvsdqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @sdwConv2qp_02(fp128* nocapture %a) {
+; CHECK-LABEL: sdwConv2qp_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-NEXT:    lxsd v2, 16(r4)
+; CHECK-NEXT:    xscvsdqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: sdwConv2qp_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 16(r4)
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatdikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i64, i64* getelementptr inbounds 
+  %0 = load i64, i64* getelementptr inbounds
                         ([5 x i64], [5 x i64]* @mem, i64 0, i64 2), align 8
   %conv = sitofp i64 %0 to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: sdwConv2qp_02
-; CHECK: addis r[[REG:[0-9]+]], r2, .LC0 at toc@ha
-; CHECK: ld r[[REG]], .LC0 at toc@l(r[[REG]])
-; CHECK: lxsd v[[REG0:[0-9]+]], 16(r[[REG]])
-; CHECK-NEXT: xscvsdqp v[[CONV:[0-9]+]], v[[REG0]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @sdwConv2qp_03(fp128* nocapture %a, i64* nocapture readonly %b) {
+; CHECK-LABEL: sdwConv2qp_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxsd v2, 0(r4)
+; CHECK-NEXT:    xscvsdqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: sdwConv2qp_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r4, 0(r4)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatdikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load i64, i64* %b, align 8
   %conv = sitofp i64 %0 to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: sdwConv2qp_03
-; CHECK-NOT: ld
-; CHECK: lxsd v[[REG0:[0-9]+]], 0(r4)
-; CHECK-NEXT: xscvsdqp v[[CONV:[0-9]+]], v[[REG0]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @udwConv2qp(fp128* nocapture %a, i64 %b) {
+; CHECK-LABEL: udwConv2qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mtvsrd v2, r4
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: udwConv2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatundikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i64 %b to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: udwConv2qp
-; CHECK: mtvsrd v[[REG:[0-9]+]], r4
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @udwConv2qp_02(fp128* nocapture %a) {
+; CHECK-LABEL: udwConv2qp_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r4, r2, .LC1 at toc@ha
+; CHECK-NEXT:    ld r4, .LC1 at toc@l(r4)
+; CHECK-NEXT:    lxsd v2, 32(r4)
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: udwConv2qp_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC1 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC1 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 32(r4)
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatundikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load i64, i64* getelementptr inbounds
                         ([5 x i64], [5 x i64]* @umem, i64 0, i64 4), align 8
@@ -80,33 +207,82 @@ entry:
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: udwConv2qp_02
-; CHECK: addis r[[REG:[0-9]+]], r2, .LC1 at toc@ha
-; CHECK: ld r[[REG]], .LC1 at toc@l(r[[REG]])
-; CHECK: lxsd v[[REG0:[0-9]+]], 32(r[[REG]])
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG0]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @udwConv2qp_03(fp128* nocapture %a, i64* nocapture readonly %b) {
+; CHECK-LABEL: udwConv2qp_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxsd v2, 0(r4)
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: udwConv2qp_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r4, 0(r4)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatundikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load i64, i64* %b, align 8
   %conv = uitofp i64 %0 to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: udwConv2qp_03
-; CHECK-NOT: ld
-; CHECK: lxsd v[[REG:[0-9]+]], 0(r4)
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define fp128* @sdwConv2qp_testXForm(fp128* returned %sink,
+; CHECK-LABEL: sdwConv2qp_testXForm:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lis r5, 1
+; CHECK-NEXT:    ori r5, r5, 7797
+; CHECK-NEXT:    lxsdx v2, r4, r5
+; CHECK-NEXT:    xscvsdqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: sdwConv2qp_testXForm:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    lis r5, 1
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ori r5, r5, 7797
+; CHECK-P8-NEXT:    ldx r4, r4, r5
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatdikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r5, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                                     i8* nocapture readonly %a) {
 entry:
   %add.ptr = getelementptr inbounds i8, i8* %a, i64 73333
@@ -116,15 +292,44 @@ entry:
   store fp128 %conv, fp128* %sink, align 16
   ret fp128* %sink
 
-; CHECK-LABEL: sdwConv2qp_testXForm
-; CHECK: lxsdx v[[REG:[0-9]+]],
-; CHECK-NEXT: xscvsdqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define fp128* @udwConv2qp_testXForm(fp128* returned %sink,
+; CHECK-LABEL: udwConv2qp_testXForm:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lis r5, 1
+; CHECK-NEXT:    ori r5, r5, 7797
+; CHECK-NEXT:    lxsdx v2, r4, r5
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: udwConv2qp_testXForm:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    lis r5, 1
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ori r5, r5, 7797
+; CHECK-P8-NEXT:    ldx r4, r4, r5
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatundikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r5, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                                     i8* nocapture readonly %a) {
 entry:
   %add.ptr = getelementptr inbounds i8, i8* %a, i64 73333
@@ -134,46 +339,117 @@ entry:
   store fp128 %conv, fp128* %sink, align 16
   ret fp128* %sink
 
-; CHECK-LABEL: udwConv2qp_testXForm
-; CHECK: lxsdx v[[REG:[0-9]+]],
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @swConv2qp(fp128* nocapture %a, i32 signext %b) {
+; CHECK-LABEL: swConv2qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mtvsrwa v2, r4
+; CHECK-NEXT:    xscvsdqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: swConv2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i32 %b to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: swConv2qp
-; CHECK-NOT: lwz
-; CHECK: mtvsrwa v[[REG:[0-9]+]], r4
-; CHECK-NEXT: xscvsdqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @swConv2qp_02(fp128* nocapture %a, i32* nocapture readonly %b) {
+; CHECK-LABEL: swConv2qp_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxsiwax v2, 0, r4
+; CHECK-NEXT:    xscvsdqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: swConv2qp_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    lwa r4, 0(r4)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load i32, i32* %b, align 4
   %conv = sitofp i32 %0 to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: swConv2qp_02
-; CHECK-NOT: lwz
-; CHECK: lxsiwax v[[REG:[0-9]+]], 0, r4
-; CHECK-NEXT: xscvsdqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @swConv2qp_03(fp128* nocapture %a) {
+; CHECK-LABEL: swConv2qp_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r4, r2, .LC2 at toc@ha
+; CHECK-NEXT:    ld r4, .LC2 at toc@l(r4)
+; CHECK-NEXT:    addi r4, r4, 12
+; CHECK-NEXT:    lxsiwax v2, 0, r4
+; CHECK-NEXT:    xscvsdqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: swConv2qp_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC2 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC2 at toc@l(r4)
+; CHECK-P8-NEXT:    lwa r4, 12(r4)
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load i32, i32* getelementptr inbounds
                         ([5 x i32], [5 x i32]* @swMem, i64 0, i64 3), align 4
@@ -181,49 +457,117 @@ entry:
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: swConv2qp_03
-; CHECK: addis r[[REG:[0-9]+]], r2, .LC2 at toc@ha
-; CHECK: ld r[[REG]], .LC2 at toc@l(r[[REG]])
-; CHECK: addi r[[REG2:[0-9]+]], r[[REG]], 12
-; CHECK: lxsiwax v[[REG0:[0-9]+]], 0, r[[REG2]]
-; CHECK-NEXT: xscvsdqp v[[CONV:[0-9]+]], v[[REG0]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @uwConv2qp(fp128* nocapture %a, i32 zeroext %b) {
+; CHECK-LABEL: uwConv2qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mtvsrwz v2, r4
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: uwConv2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i32 %b to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: uwConv2qp
-; CHECK-NOT: lwz
-; CHECK: mtvsrwz v[[REG:[0-9]+]], r4
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @uwConv2qp_02(fp128* nocapture %a, i32* nocapture readonly %b) {
+; CHECK-LABEL: uwConv2qp_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxsiwzx v2, 0, r4
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: uwConv2qp_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    lwz r4, 0(r4)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load i32, i32* %b, align 4
   %conv = uitofp i32 %0 to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: uwConv2qp_02
-; CHECK-NOT: lwz
-; CHECK: lxsiwzx v[[REG:[0-9]+]], 0, r4
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @uwConv2qp_03(fp128* nocapture %a) {
+; CHECK-LABEL: uwConv2qp_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r4, r2, .LC3 at toc@ha
+; CHECK-NEXT:    ld r4, .LC3 at toc@l(r4)
+; CHECK-NEXT:    addi r4, r4, 12
+; CHECK-NEXT:    lxsiwzx v2, 0, r4
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: uwConv2qp_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC3 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC3 at toc@l(r4)
+; CHECK-P8-NEXT:    lwz r4, 12(r4)
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load i32, i32* getelementptr inbounds
                         ([5 x i32], [5 x i32]* @uwMem, i64 0, i64 3), align 4
@@ -231,18 +575,41 @@ entry:
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: uwConv2qp_03
-; CHECK: addis r[[REG:[0-9]+]], r2, .LC3 at toc@ha
-; CHECK-NEXT: ld r[[REG]], .LC3 at toc@l(r[[REG]])
-; CHECK-NEXT: addi r[[REG2:[0-9]+]], r[[REG]], 12
-; CHECK-NEXT: lxsiwzx v[[REG1:[0-9]+]], 0, r[[REG2]]
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG1]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @uwConv2qp_04(fp128* nocapture %a,
+; CHECK-LABEL: uwConv2qp_04:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lwz r5, 0(r5)
+; CHECK-NEXT:    add r4, r5, r4
+; CHECK-NEXT:    mtvsrwz v2, r4
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: uwConv2qp_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    lwz r3, 0(r5)
+; CHECK-P8-NEXT:    add r3, r3, r4
+; CHECK-P8-NEXT:    clrldi r3, r3, 32
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                           i32 zeroext %b, i32* nocapture readonly %c) {
 entry:
   %0 = load i32, i32* %c, align 4
@@ -251,47 +618,116 @@ entry:
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: uwConv2qp_04
-; CHECK: lwz r[[REG:[0-9]+]], 0(r5)
-; CHECK-NEXT: add r[[REG1:[0-9]+]], r[[REG]], r[[REG1]]
-; CHECK-NEXT: mtvsrwz v[[REG0:[0-9]+]], r[[REG1]]
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG0]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @uhwConv2qp(fp128* nocapture %a, i16 zeroext %b) {
+; CHECK-LABEL: uhwConv2qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mtvsrwz v2, r4
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: uhwConv2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i16 %b to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
 
-; CHECK-LABEL: uhwConv2qp
-; CHECK: mtvsrwz v[[REG:[0-9]+]], r4
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @uhwConv2qp_02(fp128* nocapture %a, i16* nocapture readonly %b) {
+; CHECK-LABEL: uhwConv2qp_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxsihzx v2, 0, r4
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: uhwConv2qp_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    lhz r3, 0(r4)
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load i16, i16* %b, align 2
   %conv = uitofp i16 %0 to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: uhwConv2qp_02
-; CHECK: lxsihzx v[[REG:[0-9]+]], 0, r4
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @uhwConv2qp_03(fp128* nocapture %a) {
+; CHECK-LABEL: uhwConv2qp_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r4, r2, .LC4 at toc@ha
+; CHECK-NEXT:    ld r4, .LC4 at toc@l(r4)
+; CHECK-NEXT:    addi r4, r4, 6
+; CHECK-NEXT:    lxsihzx v2, 0, r4
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: uhwConv2qp_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    addis r3, r2, .LC4 at toc@ha
+; CHECK-P8-NEXT:    ld r3, .LC4 at toc@l(r3)
+; CHECK-P8-NEXT:    lhz r3, 6(r3)
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load i16, i16* getelementptr inbounds
                         ([5 x i16], [5 x i16]* @uhwMem, i64 0, i64 3), align 2
@@ -299,18 +735,41 @@ entry:
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: uhwConv2qp_03
-; CHECK: addis r[[REG0:[0-9]+]], r2, .LC4 at toc@ha
-; CHECK: ld r[[REG0]], .LC4 at toc@l(r[[REG0]])
-; CHECK: addi r[[REG0]], r[[REG0]], 6
-; CHECK: lxsihzx v[[REG:[0-9]+]], 0, r[[REG0]]
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @uhwConv2qp_04(fp128* nocapture %a, i16 zeroext %b,
+; CHECK-LABEL: uhwConv2qp_04:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lhz r5, 0(r5)
+; CHECK-NEXT:    add r4, r5, r4
+; CHECK-NEXT:    mtvsrwa v2, r4
+; CHECK-NEXT:    xscvsdqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: uhwConv2qp_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    lhz r3, 0(r5)
+; CHECK-P8-NEXT:    add r3, r3, r4
+; CHECK-P8-NEXT:    clrldi r3, r3, 32
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                            i16* nocapture readonly %c) {
 entry:
   %conv = zext i16 %b to i32
@@ -321,65 +780,157 @@ entry:
   store fp128 %conv2, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: uhwConv2qp_04
-; CHECK: lhz r[[REG0:[0-9]+]], 0(r5)
-; CHECK: add r4, r[[REG0]], r4
-; CHECK: mtvsrwa v[[REG:[0-9]+]], r4
-; CHECK-NEXT: xscvsdqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @ubConv2qp(fp128* nocapture %a, i8 zeroext %b) {
+; CHECK-LABEL: ubConv2qp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mtvsrwz v2, r4
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: ubConv2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r3, r4
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i8 %b to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: ubConv2qp
-; CHECK: mtvsrwz v[[REG:[0-9]+]], r4
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @ubConv2qp_02(fp128* nocapture %a, i8* nocapture readonly %b) {
+; CHECK-LABEL: ubConv2qp_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxsibzx v2, 0, r4
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: ubConv2qp_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    lbz r3, 0(r4)
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load i8, i8* %b, align 1
   %conv = uitofp i8 %0 to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: ubConv2qp_02
-; CHECK: lxsibzx v[[REG:[0-9]+]], 0, r4
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @ubConv2qp_03(fp128* nocapture %a) {
+; CHECK-LABEL: ubConv2qp_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r4, r2, .LC5 at toc@ha
+; CHECK-NEXT:    ld r4, .LC5 at toc@l(r4)
+; CHECK-NEXT:    addi r4, r4, 2
+; CHECK-NEXT:    lxsibzx v2, 0, r4
+; CHECK-NEXT:    xscvudqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: ubConv2qp_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    addis r3, r2, .LC5 at toc@ha
+; CHECK-P8-NEXT:    ld r3, .LC5 at toc@l(r3)
+; CHECK-P8-NEXT:    lbz r3, 2(r3)
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* getelementptr inbounds 
+  %0 = load i8, i8* getelementptr inbounds
                       ([5 x i8], [5 x i8]* @ubMem, i64 0, i64 2), align 1
   %conv = uitofp i8 %0 to fp128
   store fp128 %conv, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: ubConv2qp_03
-; CHECK: addis r[[REG0:[0-9]+]], r2, .LC5 at toc@ha
-; CHECK: ld r[[REG0]], .LC5 at toc@l(r[[REG0]])
-; CHECK: addi r[[REG0]], r[[REG0]], 2
-; CHECK: lxsibzx v[[REG:[0-9]+]], 0, r[[REG0]]
-; CHECK-NEXT: xscvudqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @ubConv2qp_04(fp128* nocapture %a, i8 zeroext %b,
+; CHECK-LABEL: ubConv2qp_04:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lbz r5, 0(r5)
+; CHECK-NEXT:    add r4, r5, r4
+; CHECK-NEXT:    mtvsrwa v2, r4
+; CHECK-NEXT:    xscvsdqp v2, v2
+; CHECK-NEXT:    stxv v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: ubConv2qp_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    lbz r3, 0(r5)
+; CHECK-P8-NEXT:    add r3, r3, r4
+; CHECK-P8-NEXT:    clrldi r3, r3, 32
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                           i8* nocapture readonly %c) {
 entry:
   %conv = zext i8 %b to i32
@@ -390,13 +941,6 @@ entry:
   store fp128 %conv2, fp128* %a, align 16
   ret void
 
-; CHECK-LABEL: ubConv2qp_04
-; CHECK: lbz r[[REG0:[0-9]+]], 0(r5)
-; CHECK: add r4, r[[REG0]], r4
-; CHECK: mtvsrwa v[[REG:[0-9]+]], r4
-; CHECK-NEXT: xscvsdqp v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxv v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ;  Convert QP to DP
@@ -416,6 +960,23 @@ define double @qpConv2dp(fp128* nocapture readonly %a) {
 ; CHECK-NEXT:    xscvqpdp v2, v2
 ; CHECK-NEXT:    xscpsgndp f1, v2, v2
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2dp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __trunckfdf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %conv = fptrunc fp128 %0 to double
@@ -432,6 +993,30 @@ define void @qpConv2dp_02(double* nocapture %res) {
 ; CHECK-NEXT:    xscvqpdp v2, v2
 ; CHECK-NEXT:    stxsd v2, 0(r3)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2dp_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC6 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC6 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 0(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __trunckfdf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stfdx f1, 0, r30
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @f128global, align 16
   %conv = fptrunc fp128 %0 to double
@@ -450,6 +1035,35 @@ define void @qpConv2dp_03(double* nocapture %res, i32 signext %idx) {
 ; CHECK-NEXT:    xscvqpdp v2, v2
 ; CHECK-NEXT:    stxsdx v2, r3, r4
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2dp_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LC7 at toc@ha
+; CHECK-P8-NEXT:    mr r29, r3
+; CHECK-P8-NEXT:    ld r4, .LC7 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 0(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __trunckfdf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    sldi r3, r30, 3
+; CHECK-P8-NEXT:    stfdx f1, r29, r3
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds ([4 x fp128], [4 x fp128]* @f128Array, i64 0, i64 0), align 16
   %conv = fptrunc fp128 %0 to double
@@ -469,6 +1083,34 @@ define void @qpConv2dp_04(fp128* nocapture readonly %a, fp128* nocapture readonl
 ; CHECK-NEXT:    xscvqpdp v2, v2
 ; CHECK-NEXT:    stxsd v2, 0(r5)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2dp_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __trunckfdf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stfdx f1, 0, r30
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* %b, align 16
@@ -488,6 +1130,23 @@ define float @qpConv2sp(fp128* nocapture readonly %a) {
 ; CHECK-NEXT:    xscvqpdpo v2, v2
 ; CHECK-NEXT:    xsrsp f1, v2
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __trunckfsf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %conv = fptrunc fp128 %0 to float
@@ -505,6 +1164,30 @@ define void @qpConv2sp_02(float* nocapture %res) {
 ; CHECK-NEXT:    xsrsp f0, v2
 ; CHECK-NEXT:    stfs f0, 0(r3)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sp_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC6 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC6 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 0(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __trunckfsf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stfsx f1, 0, r30
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* @f128global, align 16
   %conv = fptrunc fp128 %0 to float
@@ -524,6 +1207,35 @@ define void @qpConv2sp_03(float* nocapture %res, i32 signext %idx) {
 ; CHECK-NEXT:    xsrsp f0, v2
 ; CHECK-NEXT:    stfsx f0, r3, r4
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sp_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LC7 at toc@ha
+; CHECK-P8-NEXT:    mr r29, r3
+; CHECK-P8-NEXT:    ld r4, .LC7 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 48(r4)
+; CHECK-P8-NEXT:    ld r4, 56(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __trunckfsf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    sldi r3, r30, 2
+; CHECK-P8-NEXT:    stfsx f1, r29, r3
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds ([4 x fp128], [4 x fp128]* @f128Array, i64 0, i64 3), align 16
   %conv = fptrunc fp128 %0 to float
@@ -544,6 +1256,34 @@ define void @qpConv2sp_04(fp128* nocapture readonly %a, fp128* nocapture readonl
 ; CHECK-NEXT:    xsrsp f0, v2
 ; CHECK-NEXT:    stfs f0, 0(r5)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sp_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __trunckfsf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stfsx f1, 0, r30
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* %b, align 16
@@ -562,6 +1302,20 @@ define fp128 @dpConv2qp(double %a) {
 ; CHECK-NEXT:    xscpsgndp v2, f1, f1
 ; CHECK-NEXT:    xscvdpqp v2, v2
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: dpConv2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    bl __extenddfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fpext double %a to fp128
   ret fp128 %conv
@@ -577,6 +1331,25 @@ define void @dpConv2qp_02(double* nocapture readonly %a) {
 ; CHECK-NEXT:    xscvdpqp v2, v2
 ; CHECK-NEXT:    stxvx v2, 0, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: dpConv2qp_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    lfdx f1, 0, r3
+; CHECK-P8-NEXT:    bl __extenddfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addis r5, r2, .LC8 at toc@ha
+; CHECK-P8-NEXT:    ld r5, .LC8 at toc@l(r5)
+; CHECK-P8-NEXT:    std r4, 8(r5)
+; CHECK-P8-NEXT:    std r3, 0(r5)
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load double, double* %a, align 8
   %conv = fpext double %0 to fp128
@@ -595,6 +1368,26 @@ define void @dpConv2qp_02b(double* nocapture readonly %a, i32 signext %idx) {
 ; CHECK-NEXT:    xscvdpqp v2, v2
 ; CHECK-NEXT:    stxvx v2, 0, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: dpConv2qp_02b:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    sldi r4, r4, 3
+; CHECK-P8-NEXT:    lfdx f1, r3, r4
+; CHECK-P8-NEXT:    bl __extenddfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addis r5, r2, .LC8 at toc@ha
+; CHECK-P8-NEXT:    ld r5, .LC8 at toc@l(r5)
+; CHECK-P8-NEXT:    std r4, 8(r5)
+; CHECK-P8-NEXT:    std r3, 0(r5)
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %idxprom = sext i32 %idx to i64
   %arrayidx = getelementptr inbounds double, double* %a, i64 %idxprom
@@ -609,10 +1402,35 @@ define void @dpConv2qp_03(fp128* nocapture %res, i32 signext %idx, double %a) {
 ; CHECK-LABEL: dpConv2qp_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscpsgndp v2, f1, f1
-; CHECK-DAG:     sldi r4, r4, 4
-; CHECK-DAG:     xscvdpqp v2, v2
+; CHECK-NEXT:    sldi r4, r4, 4
+; CHECK-NEXT:    xscvdpqp v2, v2
 ; CHECK-NEXT:    stxvx v2, r3, r4
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: dpConv2qp_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r29, r3
+; CHECK-P8-NEXT:    bl __extenddfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    sldi r5, r30, 4
+; CHECK-P8-NEXT:    stdux r3, r29, r5
+; CHECK-P8-NEXT:    std r4, 8(r29)
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fpext double %a to fp128
   %idxprom = sext i32 %idx to i64
@@ -629,6 +1447,26 @@ define void @dpConv2qp_04(double %a, fp128* nocapture %res) {
 ; CHECK-NEXT:    xscvdpqp v2, v2
 ; CHECK-NEXT:    stxv v2, 0(r4)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: dpConv2qp_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    bl __extenddfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fpext double %a to fp128
   store fp128 %conv, fp128* %res, align 16
@@ -642,6 +1480,20 @@ define fp128 @spConv2qp(float %a) {
 ; CHECK-NEXT:    xscpsgndp v2, f1, f1
 ; CHECK-NEXT:    xscvdpqp v2, v2
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: spConv2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    bl __extendsfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fpext float %a to fp128
   ret fp128 %conv
@@ -657,6 +1509,25 @@ define void @spConv2qp_02(float* nocapture readonly %a) {
 ; CHECK-NEXT:    xscvdpqp v2, v2
 ; CHECK-NEXT:    stxvx v2, 0, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: spConv2qp_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    lfsx f1, 0, r3
+; CHECK-P8-NEXT:    bl __extendsfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addis r5, r2, .LC8 at toc@ha
+; CHECK-P8-NEXT:    ld r5, .LC8 at toc@l(r5)
+; CHECK-P8-NEXT:    std r4, 8(r5)
+; CHECK-P8-NEXT:    std r3, 0(r5)
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load float, float* %a, align 4
   %conv = fpext float %0 to fp128
@@ -675,6 +1546,26 @@ define void @spConv2qp_02b(float* nocapture readonly %a, i32 signext %idx) {
 ; CHECK-NEXT:    xscvdpqp v2, v2
 ; CHECK-NEXT:    stxvx v2, 0, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: spConv2qp_02b:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    sldi r4, r4, 2
+; CHECK-P8-NEXT:    lfsx f1, r3, r4
+; CHECK-P8-NEXT:    bl __extendsfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addis r5, r2, .LC8 at toc@ha
+; CHECK-P8-NEXT:    ld r5, .LC8 at toc@l(r5)
+; CHECK-P8-NEXT:    std r4, 8(r5)
+; CHECK-P8-NEXT:    std r3, 0(r5)
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %idxprom = sext i32 %idx to i64
   %arrayidx = getelementptr inbounds float, float* %a, i64 %idxprom
@@ -689,10 +1580,35 @@ define void @spConv2qp_03(fp128* nocapture %res, i32 signext %idx, float %a) {
 ; CHECK-LABEL: spConv2qp_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscpsgndp v2, f1, f1
-; CHECK-DAG:     sldi r4, r4, 4
-; CHECK-DAG:     xscvdpqp v2, v2
+; CHECK-NEXT:    sldi r4, r4, 4
+; CHECK-NEXT:    xscvdpqp v2, v2
 ; CHECK-NEXT:    stxvx v2, r3, r4
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: spConv2qp_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r29, r3
+; CHECK-P8-NEXT:    bl __extendsfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    sldi r5, r30, 4
+; CHECK-P8-NEXT:    stdux r3, r29, r5
+; CHECK-P8-NEXT:    std r4, 8(r29)
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fpext float %a to fp128
   %idxprom = sext i32 %idx to i64
@@ -709,13 +1625,32 @@ define void @spConv2qp_04(float %a, fp128* nocapture %res) {
 ; CHECK-NEXT:    xscvdpqp v2, v2
 ; CHECK-NEXT:    stxv v2, 0(r4)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: spConv2qp_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    bl __extendsfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fpext float %a to fp128
   store fp128 %conv, fp128* %res, align 16
   ret void
 }
 
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 
 ; Function Attrs: norecurse nounwind
 define void @cvdp2sw2qp(double %val, fp128* nocapture %res) {
@@ -726,6 +1661,29 @@ define void @cvdp2sw2qp(double %val, fp128* nocapture %res) {
 ; CHECK-NEXT:    xscvsdqp v2, v2
 ; CHECK-NEXT:    stxv v2, 0(r4)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: cvdp2sw2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    xscvdpsxws f0, f1
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mffprwz r3, f0
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %val to i32
   %conv1 = sitofp i32 %conv to fp128
@@ -741,6 +1699,28 @@ define void @cvdp2sdw2qp(double %val, fp128* nocapture %res) {
 ; CHECK-NEXT:    xscvsdqp v2, v2
 ; CHECK-NEXT:    stxv v2, 0(r4)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: cvdp2sdw2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    xscvdpsxds f0, f1
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mffprd r3, f0
+; CHECK-P8-NEXT:    bl __floatdikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %val to i64
   %conv1 = sitofp i64 %conv to fp128
@@ -757,6 +1737,29 @@ define void @cvsp2sw2qp(float %val, fp128* nocapture %res) {
 ; CHECK-NEXT:    xscvsdqp v2, v2
 ; CHECK-NEXT:    stxv v2, 0(r4)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: cvsp2sw2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    xscvdpsxws f0, f1
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mffprwz r3, f0
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %val to i32
   %conv1 = sitofp i32 %conv to fp128
@@ -772,6 +1775,28 @@ define void @cvsp2sdw2qp(float %val, fp128* nocapture %res) {
 ; CHECK-NEXT:    xscvsdqp v2, v2
 ; CHECK-NEXT:    stxv v2, 0(r4)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: cvsp2sdw2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    xscvdpsxds f0, f1
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mffprd r3, f0
+; CHECK-P8-NEXT:    bl __floatdikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %val to i64
   %conv1 = sitofp i64 %conv to fp128
@@ -788,6 +1813,29 @@ define void @cvdp2uw2qp(double %val, fp128* nocapture %res) {
 ; CHECK-NEXT:    xscvudqp v2, v2
 ; CHECK-NEXT:    stxv v2, 0(r4)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: cvdp2uw2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    xscvdpuxws f0, f1
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mffprwz r3, f0
+; CHECK-P8-NEXT:    clrldi r3, r3, 32
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %val to i32
   %conv1 = uitofp i32 %conv to fp128
@@ -803,6 +1851,28 @@ define void @cvdp2udw2qp(double %val, fp128* nocapture %res) {
 ; CHECK-NEXT:    xscvudqp v2, v2
 ; CHECK-NEXT:    stxv v2, 0(r4)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: cvdp2udw2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    xscvdpuxds f0, f1
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mffprd r3, f0
+; CHECK-P8-NEXT:    bl __floatundikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %val to i64
   %conv1 = uitofp i64 %conv to fp128
@@ -819,6 +1889,29 @@ define void @cvsp2uw2qp(float %val, fp128* nocapture %res) {
 ; CHECK-NEXT:    xscvudqp v2, v2
 ; CHECK-NEXT:    stxv v2, 0(r4)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: cvsp2uw2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    xscvdpuxws f0, f1
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mffprwz r3, f0
+; CHECK-P8-NEXT:    clrldi r3, r3, 32
+; CHECK-P8-NEXT:    bl __floatunsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %val to i32
   %conv1 = uitofp i32 %conv to fp128
@@ -834,6 +1927,28 @@ define void @cvsp2udw2qp(float %val, fp128* nocapture %res) {
 ; CHECK-NEXT:    xscvudqp v2, v2
 ; CHECK-NEXT:    stxv v2, 0(r4)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: cvsp2udw2qp:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    xscvdpuxds f0, f1
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mffprd r3, f0
+; CHECK-P8-NEXT:    bl __floatundikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %val to i64
   %conv1 = uitofp i64 %conv to fp128

diff  --git a/llvm/test/CodeGen/PowerPC/f128-fma.ll b/llvm/test/CodeGen/PowerPC/f128-fma.ll
index f63ae04699f4..e26867d1d734 100644
--- a/llvm/test/CodeGen/PowerPC/f128-fma.ll
+++ b/llvm/test/CodeGen/PowerPC/f128-fma.ll
@@ -1,7 +1,59 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s | FileCheck %s
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown \
+; RUN:   -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s | FileCheck %s \
+; RUN:   -check-prefix=CHECK-P8
 
 define void @qpFmadd(fp128* nocapture readonly %a, fp128* nocapture %b,
+; CHECK-LABEL: qpFmadd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    lxv v4, 0(r5)
+; CHECK-NEXT:    xsmaddqp v4, v2, v3
+; CHECK-NEXT:    stxv v4, 0(r6)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpFmadd:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r8, 8(r3)
+; CHECK-P8-NEXT:    ld r9, 0(r4)
+; CHECK-P8-NEXT:    ld r10, 8(r4)
+; CHECK-P8-NEXT:    mr r28, r6
+; CHECK-P8-NEXT:    ld r30, 0(r5)
+; CHECK-P8-NEXT:    ld r29, 8(r5)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    mr r4, r8
+; CHECK-P8-NEXT:    mr r5, r9
+; CHECK-P8-NEXT:    mr r6, r10
+; CHECK-P8-NEXT:    bl __mulkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r30
+; CHECK-P8-NEXT:    mr r6, r29
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r28)
+; CHECK-P8-NEXT:    std r4, 8(r28)
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                    fp128* nocapture readonly %c, fp128* nocapture %res) {
 entry:
   %0 = load fp128, fp128* %a, align 16
@@ -10,19 +62,59 @@ entry:
   %madd = tail call fp128 @llvm.fmuladd.f128(fp128 %0, fp128 %1, fp128 %2)
   store fp128 %madd, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpFmadd
-; CHECK-NOT: bl fmal
-; CHECK-DAG: lxv v[[REG3:[0-9]+]], 0(r3)
-; CHECK-DAG: lxv v[[REG4:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG5:[0-9]+]], 0(r5)
-; CHECK: xsmaddqp v[[REG5]], v[[REG3]], v[[REG4]]
-; CHECK-NEXT: stxv v[[REG5]], 0(r6)
-; CHECK-NEXT: blr
 }
 declare fp128 @llvm.fmuladd.f128(fp128, fp128, fp128)
 
 ; Function Attrs: norecurse nounwind
 define void @qpFmadd_02(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpFmadd_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    lxv v4, 0(r5)
+; CHECK-NEXT:    xsmaddqp v2, v3, v4
+; CHECK-NEXT:    stxv v2, 0(r6)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpFmadd_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    ld r7, 0(r5)
+; CHECK-P8-NEXT:    ld r8, 8(r5)
+; CHECK-P8-NEXT:    ld r30, 0(r3)
+; CHECK-P8-NEXT:    ld r29, 8(r3)
+; CHECK-P8-NEXT:    mr r28, r6
+; CHECK-P8-NEXT:    ld r3, 0(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r4)
+; CHECK-P8-NEXT:    mr r5, r7
+; CHECK-P8-NEXT:    mr r6, r8
+; CHECK-P8-NEXT:    bl __mulkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r4, r29
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r28)
+; CHECK-P8-NEXT:    std r4, 8(r28)
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                         fp128* nocapture readonly %b,
                         fp128* nocapture readonly %c, fp128* nocapture %res) {
 entry:
@@ -33,18 +125,53 @@ entry:
   %add = fadd contract fp128 %0, %mul
   store fp128 %add, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpFmadd_02
-; CHECK-NOT: bl __multf3
-; CHECK-DAG: lxv v[[REG3:[0-9]+]], 0(r3)
-; CHECK-DAG: lxv v[[REG4:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG5:[0-9]+]], 0(r5)
-; CHECK: xsmaddqp v[[REG3]], v[[REG4]], v[[REG5]]
-; CHECK-NEXT: stxv v[[REG3]], 0(r6)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpFmadd_03(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpFmadd_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    lxv v4, 0(r5)
+; CHECK-NEXT:    xsmaddqp v4, v2, v3
+; CHECK-NEXT:    stxv v4, 0(r6)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpFmadd_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    mr r30, r6
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r29, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __mulkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    ld r5, 0(r29)
+; CHECK-P8-NEXT:    ld r6, 8(r29)
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                         fp128* nocapture readonly %b,
                         fp128* nocapture readonly %c, fp128* nocapture %res) {
 entry:
@@ -55,18 +182,61 @@ entry:
   %add = fadd contract fp128 %mul, %2
   store fp128 %add, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpFmadd_03
-; CHECK-NOT: bl __multf3
-; CHECK-DAG: lxv v[[REG3:[0-9]+]], 0(r3)
-; CHECK-DAG: lxv v[[REG4:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG5:[0-9]+]], 0(r5)
-; CHECK: xsmaddqp v[[REG5]], v[[REG3]], v[[REG4]]
-; CHECK-NEXT: stxv v[[REG5]], 0(r6)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpFnmadd(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpFnmadd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    lxv v4, 0(r5)
+; CHECK-NEXT:    xsnmaddqp v2, v3, v4
+; CHECK-NEXT:    stxv v2, 0(r6)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpFnmadd:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    ld r7, 0(r5)
+; CHECK-P8-NEXT:    ld r8, 8(r5)
+; CHECK-P8-NEXT:    ld r30, 0(r3)
+; CHECK-P8-NEXT:    ld r29, 8(r3)
+; CHECK-P8-NEXT:    mr r28, r6
+; CHECK-P8-NEXT:    ld r3, 0(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r4)
+; CHECK-P8-NEXT:    mr r5, r7
+; CHECK-P8-NEXT:    mr r6, r8
+; CHECK-P8-NEXT:    bl __mulkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r4, r29
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    li r5, 1
+; CHECK-P8-NEXT:    std r3, 0(r28)
+; CHECK-P8-NEXT:    sldi r5, r5, 63
+; CHECK-P8-NEXT:    xor r4, r4, r5
+; CHECK-P8-NEXT:    std r4, 8(r28)
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                       fp128* nocapture readonly %b,
                       fp128* nocapture readonly %c, fp128* nocapture %res) {
 entry:
@@ -78,18 +248,56 @@ entry:
   %sub = fsub fp128 0xL00000000000000008000000000000000, %add
   store fp128 %sub, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpFnmadd
-; CHECK-NOT: bl __multf3
-; CHECK-DAG: lxv v[[REG3:[0-9]+]], 0(r3)
-; CHECK-DAG: lxv v[[REG4:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG5:[0-9]+]], 0(r5)
-; CHECK: xsnmaddqp v[[REG3]], v[[REG4]], v[[REG5]]
-; CHECK-NEXT: stxv v[[REG3]], 0(r6)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpFnmadd_02(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpFnmadd_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    lxv v4, 0(r5)
+; CHECK-NEXT:    xsnmaddqp v4, v2, v3
+; CHECK-NEXT:    stxv v4, 0(r6)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpFnmadd_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    mr r30, r6
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r29, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __mulkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    ld r5, 0(r29)
+; CHECK-P8-NEXT:    ld r6, 8(r29)
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    li r5, 1
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    sldi r5, r5, 63
+; CHECK-P8-NEXT:    xor r4, r4, r5
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                       fp128* nocapture readonly %b,
                       fp128* nocapture readonly %c, fp128* nocapture %res) {
 entry:
@@ -101,18 +309,58 @@ entry:
   %sub = fsub fp128 0xL00000000000000008000000000000000, %add
   store fp128 %sub, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpFnmadd_02
-; CHECK-NOT: bl __multf3
-; CHECK-DAG: lxv v[[REG3:[0-9]+]], 0(r3)
-; CHECK-DAG: lxv v[[REG4:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG5:[0-9]+]], 0(r5)
-; CHECK: xsnmaddqp v[[REG5]], v[[REG3]], v[[REG4]]
-; CHECK-NEXT: stxv v[[REG5]], 0(r6)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpFmsub(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpFmsub:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    lxv v4, 0(r5)
+; CHECK-NEXT:    xsnmsubqp v2, v3, v4
+; CHECK-NEXT:    stxv v2, 0(r6)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpFmsub:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    ld r7, 0(r5)
+; CHECK-P8-NEXT:    ld r8, 8(r5)
+; CHECK-P8-NEXT:    ld r30, 0(r3)
+; CHECK-P8-NEXT:    ld r29, 8(r3)
+; CHECK-P8-NEXT:    mr r28, r6
+; CHECK-P8-NEXT:    ld r3, 0(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r4)
+; CHECK-P8-NEXT:    mr r5, r7
+; CHECK-P8-NEXT:    mr r6, r8
+; CHECK-P8-NEXT:    bl __mulkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r4, r29
+; CHECK-P8-NEXT:    bl __subkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r28)
+; CHECK-P8-NEXT:    std r4, 8(r28)
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                       fp128* nocapture readonly %b,
                       fp128* nocapture readonly %c, fp128* nocapture %res) {
 entry:
@@ -123,18 +371,53 @@ entry:
   %sub = fsub contract nsz fp128 %0, %mul
   store fp128 %sub, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpFmsub
-; CHECK-NOT: bl __multf3
-; CHECK-DAG: lxv v[[REG3:[0-9]+]], 0(r3)
-; CHECK-DAG: lxv v[[REG4:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG5:[0-9]+]], 0(r5)
-; CHECK: xsnmsubqp v[[REG3]], v[[REG4]], v[[REG5]]
-; CHECK-NEXT: stxv v[[REG3]], 0(r6)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpFmsub_02(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpFmsub_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    lxv v4, 0(r5)
+; CHECK-NEXT:    xsmsubqp v4, v2, v3
+; CHECK-NEXT:    stxv v4, 0(r6)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpFmsub_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    mr r30, r6
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r29, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __mulkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    ld r5, 0(r29)
+; CHECK-P8-NEXT:    ld r6, 8(r29)
+; CHECK-P8-NEXT:    bl __subkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                       fp128* nocapture readonly %b,
                       fp128* nocapture readonly %c, fp128* nocapture %res) {
 entry:
@@ -145,18 +428,62 @@ entry:
   %sub = fsub contract fp128 %mul, %2
   store fp128 %sub, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpFmsub_02
-; CHECK-NOT: bl __multf3
-; CHECK-DAG: lxv v[[REG3:[0-9]+]], 0(r3)
-; CHECK-DAG: lxv v[[REG4:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG5:[0-9]+]], 0(r5)
-; CHECK: xsmsubqp v[[REG5]], v[[REG3]], v[[REG4]]
-; CHECK-NEXT: stxv v[[REG5]], 0(r6)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpFnmsub(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpFnmsub:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v4, 0(r5)
+; CHECK-NEXT:    xsnegqp v3, v3
+; CHECK-NEXT:    xsnmaddqp v2, v3, v4
+; CHECK-NEXT:    stxv v2, 0(r6)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpFnmsub:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    ld r7, 0(r5)
+; CHECK-P8-NEXT:    ld r8, 8(r5)
+; CHECK-P8-NEXT:    ld r30, 0(r3)
+; CHECK-P8-NEXT:    ld r29, 8(r3)
+; CHECK-P8-NEXT:    mr r28, r6
+; CHECK-P8-NEXT:    ld r3, 0(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r4)
+; CHECK-P8-NEXT:    mr r5, r7
+; CHECK-P8-NEXT:    mr r6, r8
+; CHECK-P8-NEXT:    bl __mulkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r4, r29
+; CHECK-P8-NEXT:    bl __subkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    li r5, 1
+; CHECK-P8-NEXT:    std r3, 0(r28)
+; CHECK-P8-NEXT:    sldi r5, r5, 63
+; CHECK-P8-NEXT:    xor r4, r4, r5
+; CHECK-P8-NEXT:    std r4, 8(r28)
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                       fp128* nocapture readonly %b,
                       fp128* nocapture readonly %c, fp128* nocapture %res) {
 entry:
@@ -168,19 +495,56 @@ entry:
   %sub1 = fsub fp128 0xL00000000000000008000000000000000, %sub
   store fp128 %sub1, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpFnmsub
-; CHECK-NOT: bl __multf3
-; CHECK-DAG: lxv v[[REG3:[0-9]+]], 0(r3)
-; CHECK-DAG: lxv v[[REG4:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG5:[0-9]+]], 0(r5)
-; CHECK: xsnegqp v[[REG4]], v[[REG4]]
-; CHECK: xsnmaddqp v[[REG3]], v[[REG4]], v[[REG5]]
-; CHECK-NEXT: stxv v[[REG3]], 0(r6)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpFnmsub_02(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpFnmsub_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    lxv v4, 0(r5)
+; CHECK-NEXT:    xsnmsubqp v4, v2, v3
+; CHECK-NEXT:    stxv v4, 0(r6)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpFnmsub_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    mr r30, r6
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r29, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __mulkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    ld r5, 0(r29)
+; CHECK-P8-NEXT:    ld r6, 8(r29)
+; CHECK-P8-NEXT:    bl __subkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    li r5, 1
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    sldi r5, r5, 63
+; CHECK-P8-NEXT:    xor r4, r4, r5
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                       fp128* nocapture readonly %b,
                       fp128* nocapture readonly %c, fp128* nocapture %res) {
 entry:
@@ -192,12 +556,4 @@ entry:
   %sub1 = fsub fp128 0xL00000000000000008000000000000000, %sub
   store fp128 %sub1, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qpFnmsub_02
-; CHECK-NOT: bl __multf3
-; CHECK-DAG: lxv v[[REG3:[0-9]+]], 0(r3)
-; CHECK-DAG: lxv v[[REG4:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG5:[0-9]+]], 0(r5)
-; CHECK: xsnmsubqp v[[REG5]], v[[REG3]], v[[REG4]]
-; CHECK-NEXT: stxv v[[REG5]], 0(r6)
-; CHECK-NEXT: blr
 }

diff  --git a/llvm/test/CodeGen/PowerPC/f128-passByValue.ll b/llvm/test/CodeGen/PowerPC/f128-passByValue.ll
index 0a8fc1b0e777..ceae50431fdf 100644
--- a/llvm/test/CodeGen/PowerPC/f128-passByValue.ll
+++ b/llvm/test/CodeGen/PowerPC/f128-passByValue.ll
@@ -1,6 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown -ppc-vsr-nums-as-vr \
 ; RUN:   -verify-machineinstrs -ppc-asm-full-reg-names < %s | FileCheck %s
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown -ppc-vsr-nums-as-vr \
+; RUN:   -verify-machineinstrs -ppc-asm-full-reg-names < %s | FileCheck %s \
+; RUN:   -check-prefix=CHECK-P8
 
 ; Function Attrs: norecurse nounwind readnone
 define fp128 @loadConstant() {
@@ -10,6 +13,14 @@ define fp128 @loadConstant() {
 ; CHECK-NEXT:    addi r3, r3, .LCPI0_0 at toc@l
 ; CHECK-NEXT:    lxvx v2, 0, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: loadConstant:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    lis r3, 1
+; CHECK-P8-NEXT:    ori r3, r3, 5
+; CHECK-P8-NEXT:    sldi r4, r3, 46
+; CHECK-P8-NEXT:    li r3, 0
+; CHECK-P8-NEXT:    blr
   entry:
     ret fp128 0xL00000000000000004001400000000000
 }
@@ -24,6 +35,26 @@ define fp128 @loadConstant2(fp128 %a, fp128 %b) {
 ; CHECK-NEXT:    lxvx v3, 0, r3
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: loadConstant2:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    lis r5, 1
+; CHECK-P8-NEXT:    ori r5, r5, 5
+; CHECK-P8-NEXT:    sldi r6, r5, 46
+; CHECK-P8-NEXT:    li r5, 0
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
   entry:
     %add = fadd fp128 %a, %b
       %add1 = fadd fp128 %add, 0xL00000000000000004001400000000000
@@ -39,6 +70,21 @@ define signext i32 @fp128Param(fp128 %a) {
 ; CHECK-NEXT:    mfvsrwz r3, v2
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: fp128Param:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi fp128 %a to i32
   ret i32 %conv
@@ -51,6 +97,20 @@ define fp128 @fp128Return(fp128 %a, fp128 %b) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: fp128Return:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %add = fadd fp128 %a, %b
   ret fp128 %add
@@ -67,6 +127,27 @@ define fp128 @fp128Array(fp128* nocapture readonly %farray,
 ; CHECK-NEXT:    lxv v3, -16(r3)
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: fp128Array:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    sldi r4, r4, 4
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    add r6, r3, r4
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    ld r5, -16(r6)
+; CHECK-P8-NEXT:    ld r6, -8(r6)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                          i32 signext %loopcnt, fp128* nocapture readnone %sum) {
 entry:
   %0 = load fp128, fp128* %farray, align 16
@@ -98,6 +179,144 @@ define fp128 @maxVecParam(fp128 %p1, fp128 %p2, fp128 %p3, fp128 %p4, fp128 %p5,
 ; CHECK-NEXT:    xsaddqp v2, v2, v13
 ; CHECK-NEXT:    xssubqp v2, v2, v0
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: maxVecParam:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 208
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r14, -144
+; CHECK-P8-NEXT:    .cfi_offset r15, -136
+; CHECK-P8-NEXT:    .cfi_offset r16, -128
+; CHECK-P8-NEXT:    .cfi_offset r17, -120
+; CHECK-P8-NEXT:    .cfi_offset r18, -112
+; CHECK-P8-NEXT:    .cfi_offset r19, -104
+; CHECK-P8-NEXT:    .cfi_offset r20, -96
+; CHECK-P8-NEXT:    .cfi_offset r21, -88
+; CHECK-P8-NEXT:    .cfi_offset r22, -80
+; CHECK-P8-NEXT:    .cfi_offset r23, -72
+; CHECK-P8-NEXT:    .cfi_offset r24, -64
+; CHECK-P8-NEXT:    .cfi_offset r25, -56
+; CHECK-P8-NEXT:    .cfi_offset r26, -48
+; CHECK-P8-NEXT:    .cfi_offset r27, -40
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    .cfi_offset r31, -8
+; CHECK-P8-NEXT:    std r14, -144(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r15, -136(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r16, -128(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r17, -120(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r18, -112(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r19, -104(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r20, -96(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r21, -88(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r22, -80(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r23, -72(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r24, -64(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r31, -8(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -208(r1)
+; CHECK-P8-NEXT:    mr r17, r7
+; CHECK-P8-NEXT:    ld r7, 432(r1)
+; CHECK-P8-NEXT:    ld r26, 400(r1)
+; CHECK-P8-NEXT:    ld r25, 408(r1)
+; CHECK-P8-NEXT:    ld r24, 384(r1)
+; CHECK-P8-NEXT:    mr r20, r10
+; CHECK-P8-NEXT:    ld r23, 392(r1)
+; CHECK-P8-NEXT:    ld r22, 368(r1)
+; CHECK-P8-NEXT:    ld r21, 376(r1)
+; CHECK-P8-NEXT:    ld r16, 352(r1)
+; CHECK-P8-NEXT:    mr r19, r9
+; CHECK-P8-NEXT:    mr r18, r8
+; CHECK-P8-NEXT:    ld r15, 360(r1)
+; CHECK-P8-NEXT:    ld r14, 336(r1)
+; CHECK-P8-NEXT:    ld r31, 344(r1)
+; CHECK-P8-NEXT:    ld r30, 320(r1)
+; CHECK-P8-NEXT:    std r7, 56(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    ld r7, 440(r1)
+; CHECK-P8-NEXT:    ld r29, 328(r1)
+; CHECK-P8-NEXT:    ld r28, 304(r1)
+; CHECK-P8-NEXT:    ld r27, 312(r1)
+; CHECK-P8-NEXT:    std r7, 48(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    ld r7, 416(r1)
+; CHECK-P8-NEXT:    std r7, 40(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    ld r7, 424(r1)
+; CHECK-P8-NEXT:    std r7, 32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r17
+; CHECK-P8-NEXT:    mr r6, r18
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r19
+; CHECK-P8-NEXT:    mr r6, r20
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r28
+; CHECK-P8-NEXT:    mr r6, r27
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r30
+; CHECK-P8-NEXT:    mr r6, r29
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r14
+; CHECK-P8-NEXT:    mr r6, r31
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r16
+; CHECK-P8-NEXT:    mr r6, r15
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r22
+; CHECK-P8-NEXT:    mr r6, r21
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r24
+; CHECK-P8-NEXT:    mr r6, r23
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r26
+; CHECK-P8-NEXT:    mr r6, r25
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    ld r5, 40(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r6, 32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    ld r5, 56(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r6, 48(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    bl __subkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 208
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r31, -8(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r24, -64(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r23, -72(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r22, -80(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r21, -88(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    ld r20, -96(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r19, -104(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r18, -112(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r17, -120(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r16, -128(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r15, -136(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r14, -144(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    blr
                           fp128 %p6, fp128 %p7, fp128 %p8, fp128 %p9, fp128 %p10,
                           fp128 %p11, fp128 %p12, fp128 %p13) {
 entry:
@@ -126,6 +345,43 @@ define fp128 @mixParam_01(fp128 %a, i32 signext %i, fp128 %b) {
 ; CHECK-NEXT:    xscvsdqp v3, v3
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: mixParam_01:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r5, r6
+; CHECK-P8-NEXT:    mr r6, r7
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r29, r3
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r28, r4
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r29
+; CHECK-P8-NEXT:    mr r4, r28
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %add = fadd fp128 %a, %b
   %conv = sitofp i32 %i to fp128
@@ -141,6 +397,43 @@ define fastcc fp128 @mixParam_01f(fp128 %a, i32 signext %i, fp128 %b) {
 ; CHECK-NEXT:    xscvsdqp v3, v3
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: mixParam_01f:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r5, r6
+; CHECK-P8-NEXT:    mr r6, r7
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r29, r3
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r28, r4
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r29
+; CHECK-P8-NEXT:    mr r4, r28
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %add = fadd fp128 %a, %b
   %conv = sitofp i32 %i to fp128
@@ -164,6 +457,54 @@ define fp128 @mixParam_02(fp128 %p1, double %p2, i64* nocapture %p3,
 ; CHECK-NEXT:    xsaddqp v2, v4, v2
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: mixParam_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -32
+; CHECK-P8-NEXT:    .cfi_offset r30, -24
+; CHECK-P8-NEXT:    .cfi_offset f31, -8
+; CHECK-P8-NEXT:    std r29, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    mr r11, r4
+; CHECK-P8-NEXT:    lwz r4, 160(r1)
+; CHECK-P8-NEXT:    add r5, r7, r9
+; CHECK-P8-NEXT:    fmr f31, f1
+; CHECK-P8-NEXT:    add r5, r5, r10
+; CHECK-P8-NEXT:    add r4, r5, r4
+; CHECK-P8-NEXT:    clrldi r4, r4, 32
+; CHECK-P8-NEXT:    std r4, 0(r6)
+; CHECK-P8-NEXT:    mr r6, r3
+; CHECK-P8-NEXT:    ld r5, 0(r8)
+; CHECK-P8-NEXT:    ld r4, 8(r8)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r5, r6
+; CHECK-P8-NEXT:    mr r6, r11
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    fmr f1, f31
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r29, r4
+; CHECK-P8-NEXT:    bl __extenddfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r4, r29
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r30, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                           i16 signext %p4, fp128* nocapture readonly %p5,
                           i32 signext %p6, i8 zeroext %p7, i32 zeroext %p8) {
 entry:
@@ -196,6 +537,53 @@ define fastcc fp128 @mixParam_02f(fp128 %p1, double %p2, i64* nocapture %p3,
 ; CHECK-NEXT:    xsaddqp v2, v4, v2
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: mixParam_02f:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -32
+; CHECK-P8-NEXT:    .cfi_offset r30, -24
+; CHECK-P8-NEXT:    .cfi_offset f31, -8
+; CHECK-P8-NEXT:    std r29, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    mr r11, r4
+; CHECK-P8-NEXT:    add r4, r6, r8
+; CHECK-P8-NEXT:    mr r6, r3
+; CHECK-P8-NEXT:    fmr f31, f1
+; CHECK-P8-NEXT:    add r4, r4, r9
+; CHECK-P8-NEXT:    add r4, r4, r10
+; CHECK-P8-NEXT:    clrldi r4, r4, 32
+; CHECK-P8-NEXT:    std r4, 0(r5)
+; CHECK-P8-NEXT:    ld r5, 0(r7)
+; CHECK-P8-NEXT:    ld r4, 8(r7)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r5, r6
+; CHECK-P8-NEXT:    mr r6, r11
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    fmr f1, f31
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    mr r29, r4
+; CHECK-P8-NEXT:    bl __extenddfkf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r4, r29
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r30, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                                   i16 signext %p4, fp128* nocapture readonly %p5,
                                   i32 signext %p6, i8 zeroext %p7, i32 zeroext %p8) {
 entry:
@@ -228,6 +616,46 @@ define void @mixParam_03(fp128 %f1, double* nocapture %d1, <4 x i32> %vec1,
 ; CHECK-NEXT:    xscvqpdp v2, v2
 ; CHECK-NEXT:    stxsd v2, 0(r5)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: mixParam_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    ld r6, 168(r1)
+; CHECK-P8-NEXT:    std r4, 8(r9)
+; CHECK-P8-NEXT:    std r3, 0(r9)
+; CHECK-P8-NEXT:    mr r3, r10
+; CHECK-P8-NEXT:    mr r28, r5
+; CHECK-P8-NEXT:    stvx v2, 0, r6
+; CHECK-P8-NEXT:    ld r30, 0(r9)
+; CHECK-P8-NEXT:    ld r29, 8(r9)
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r4, r29
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __trunckfdf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stfdx f1, 0, r28
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                          fp128* nocapture %f2, i32 signext %i1, i8 zeroext %c1,
                          <4 x i32>* nocapture %vec2) {
 entry:
@@ -254,6 +682,45 @@ define fastcc void @mixParam_03f(fp128 %f1, double* nocapture %d1, <4 x i32> %ve
 ; CHECK-NEXT:    xscvqpdp v2, v2
 ; CHECK-NEXT:    stxsd v2, 0(r3)
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: mixParam_03f:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r28, -32
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    std r4, 8(r6)
+; CHECK-P8-NEXT:    std r3, 0(r6)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    mr r28, r5
+; CHECK-P8-NEXT:    stvx v2, 0, r9
+; CHECK-P8-NEXT:    ld r30, 0(r6)
+; CHECK-P8-NEXT:    ld r29, 8(r6)
+; CHECK-P8-NEXT:    bl __floatsikf
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    mr r5, r3
+; CHECK-P8-NEXT:    mr r6, r4
+; CHECK-P8-NEXT:    mr r3, r30
+; CHECK-P8-NEXT:    mr r4, r29
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __trunckfdf2
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stfdx f1, 0, r28
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                                  fp128* nocapture %f2, i32 signext %i1, i8 zeroext %c1,
                                  <4 x i32>* nocapture %vec2) {
 entry:
@@ -285,6 +752,23 @@ define signext i32 @noopt_call_crash() #0 {
 ; CHECK-NEXT:    ld r0, 16(r1)
 ; CHECK-NEXT:    mtlr r0
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: noopt_call_crash:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    bl in
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl out
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    li r3, 0
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %call = call fp128 @in()
   call void @out(fp128 %call)

diff  --git a/llvm/test/CodeGen/PowerPC/f128-rounding.ll b/llvm/test/CodeGen/PowerPC/f128-rounding.ll
index 56f63be5734e..288ba603e642 100644
--- a/llvm/test/CodeGen/PowerPC/f128-rounding.ll
+++ b/llvm/test/CodeGen/PowerPC/f128-rounding.ll
@@ -1,76 +1,241 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \
 ; RUN:   -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s | FileCheck %s
-
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \
+; RUN:   -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s | FileCheck %s \
+; RUN:   -check-prefix=CHECK-P8
 
 define void @qp_trunc(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qp_trunc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsrqpi 1, v2, v2, 1
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_trunc:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r6, 8(r3)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    bl truncl
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.trunc.f128(fp128 %0)
   store fp128 %1, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qp_trunc
-; CHECK: xsrqpi 1, v{{[0-9]+}}, v{{[0-9]+}}, 1
-; CHECK: blr
 }
 declare fp128     @llvm.trunc.f128(fp128 %Val)
 
 define void @qp_rint(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qp_rint:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsrqpix 0, v2, v2, 3
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_rint:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r6, 8(r3)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    bl rintl
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.rint.f128(fp128 %0)
   store fp128 %1, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qp_rint
-; CHECK: xsrqpix 0, v{{[0-9]+}}, v{{[0-9]+}}, 3
-; CHECK: blr
 }
 declare fp128     @llvm.rint.f128(fp128 %Val)
 
 define void @qp_nearbyint(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qp_nearbyint:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsrqpi 0, v2, v2, 3
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_nearbyint:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r6, 8(r3)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    bl nearbyintl
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.nearbyint.f128(fp128 %0)
   store fp128 %1, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qp_nearbyint
-; CHECK: xsrqpi 0, v{{[0-9]+}}, v{{[0-9]+}}, 3
-; CHECK: blr
 }
 declare fp128     @llvm.nearbyint.f128(fp128 %Val)
 
 define void @qp_round(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qp_round:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsrqpi 0, v2, v2, 0
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_round:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r6, 8(r3)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    bl roundl
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.round.f128(fp128 %0)
   store fp128 %1, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qp_round
-; CHECK: xsrqpi 0, v{{[0-9]+}}, v{{[0-9]+}}, 0
-; CHECK: blr
 }
 declare fp128     @llvm.round.f128(fp128 %Val)
 
 define void @qp_floor(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qp_floor:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsrqpi 1, v2, v2, 3
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_floor:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r6, 8(r3)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    bl floorl
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.floor.f128(fp128 %0)
   store fp128 %1, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qp_floor
-; CHECK: xsrqpi 1, v{{[0-9]+}}, v{{[0-9]+}}, 3
-; CHECK: blr
 }
 declare fp128     @llvm.floor.f128(fp128 %Val)
 
 define void @qp_ceil(fp128* nocapture readonly %a, fp128* nocapture %res) {
+; CHECK-LABEL: qp_ceil:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xsrqpi 1, v2, v2, 2
+; CHECK-NEXT:    stxv v2, 0(r4)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qp_ceil:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r6, 8(r3)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    mr r4, r6
+; CHECK-P8-NEXT:    bl ceill
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    std r4, 8(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = tail call fp128 @llvm.ceil.f128(fp128 %0)
   store fp128 %1, fp128* %res, align 16
   ret void
-; CHECK-LABEL: qp_ceil
-; CHECK: xsrqpi 1, v{{[0-9]+}}, v{{[0-9]+}}, 2
-; CHECK: blr
 }
 declare fp128     @llvm.ceil.f128(fp128 %Val)
 

diff  --git a/llvm/test/CodeGen/PowerPC/f128-truncateNconv.ll b/llvm/test/CodeGen/PowerPC/f128-truncateNconv.ll
index 10d56fb2c47a..cf9f48f5b47b 100644
--- a/llvm/test/CodeGen/PowerPC/f128-truncateNconv.ll
+++ b/llvm/test/CodeGen/PowerPC/f128-truncateNconv.ll
@@ -1,6 +1,10 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -relocation-model=pic -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   -verify-machineinstrs -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s \
 ; RUN:   | FileCheck %s
+; RUN: llc -relocation-model=pic -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown \
+; RUN:   -verify-machineinstrs -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s \
+; RUN:   | FileCheck %s -check-prefix=CHECK-P8
 
 @f128Array = global [4 x fp128] [fp128 0xL00000000000000004004C00000000000,
                                  fp128 0xLF000000000000000400808AB851EB851,
@@ -10,20 +14,70 @@
 
 ; Function Attrs: norecurse nounwind readonly
 define i64 @qpConv2sdw(fp128* nocapture readonly %a) {
+; CHECK-LABEL: qpConv2sdw:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xscvqpsdz v2, v2
+; CHECK-NEXT:    mfvsrd r3, v2
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sdw:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfdi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %conv = fptosi fp128 %0 to i64
   ret i64 %conv
 
-; CHECK-LABEL: qpConv2sdw
-; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK-NEXT: xscvqpsdz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: mfvsrd r3, v[[CONV]]
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2sdw_02(i64* nocapture %res) local_unnamed_addr #1 {
+; CHECK-LABEL: qpConv2sdw_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-NEXT:    lxv v2, 32(r4)
+; CHECK-NEXT:    xscvqpsdz v2, v2
+; CHECK-NEXT:    stxsd v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sdw_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 32(r4)
+; CHECK-P8-NEXT:    ld r4, 40(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfdi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds
                             ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
@@ -32,17 +86,43 @@ entry:
   store i64 %conv, i64* %res, align 8
   ret void
 
-; CHECK-LABEL: qpConv2sdw_02
-; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0 at toc@ha
-; CHECK: ld r[[REG0]], .LC0 at toc@l(r[[REG0]])
-; CHECK: lxv v[[REG:[0-9]+]], 32(r[[REG0]])
-; CHECK-NEXT: xscvqpsdz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxsd v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind readonly
 define i64 @qpConv2sdw_03(fp128* nocapture readonly %a) {
+; CHECK-LABEL: qpConv2sdw_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    addis r3, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r3, .LC0 at toc@l(r3)
+; CHECK-NEXT:    lxv v3, 16(r3)
+; CHECK-NEXT:    xsaddqp v2, v2, v3
+; CHECK-NEXT:    xscvqpsdz v2, v2
+; CHECK-NEXT:    mfvsrd r3, v2
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sdw_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r6, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    ld r5, 16(r6)
+; CHECK-P8-NEXT:    ld r6, 24(r6)
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfdi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* getelementptr inbounds
@@ -52,19 +132,46 @@ entry:
   %conv = fptosi fp128 %add to i64
   ret i64 %conv
 
-; CHECK-LABEL: qpConv2sdw_03
-; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0 at toc@ha
-; CHECK: ld r[[REG0]], .LC0 at toc@l(r[[REG0]])
-; CHECK: lxv v[[REG1:[0-9]+]], 16(r[[REG0]])
-; CHECK: xsaddqp v[[REG]], v[[REG]], v[[REG1]]
-; CHECK-NEXT: xscvqpsdz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: mfvsrd r3, v[[CONV]]
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2sdw_04(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpConv2sdw_04:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    xsaddqp v2, v2, v3
+; CHECK-NEXT:    xscvqpsdz v2, v2
+; CHECK-NEXT:    stxsd v2, 0(r5)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sdw_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfdi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                            fp128* nocapture readonly %b, i64* nocapture %res) {
 entry:
   %0 = load fp128, fp128* %a, align 16
@@ -74,17 +181,48 @@ entry:
   store i64 %conv, i64* %res, align 8
   ret void
 
-; CHECK-LABEL: qpConv2sdw_04
-; CHECK-DAG: lxv v[[REG1:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK: xsaddqp v[[REG]], v[[REG]], v[[REG1]]
-; CHECK-NEXT: xscvqpsdz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxsd v[[CONV]], 0(r5)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2sdw_testXForm(i64* nocapture %res, i32 signext %idx) {
+; CHECK-LABEL: qpConv2sdw_testXForm:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
+; CHECK-NEXT:    sldi r4, r4, 3
+; CHECK-NEXT:    ld r5, .LC0 at toc@l(r5)
+; CHECK-NEXT:    lxv v2, 32(r5)
+; CHECK-NEXT:    xscvqpsdz v2, v2
+; CHECK-NEXT:    stxsdx v2, r3, r4
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sdw_testXForm:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    mr r29, r3
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 32(r4)
+; CHECK-P8-NEXT:    ld r4, 40(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfdi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    sldi r4, r30, 3
+; CHECK-P8-NEXT:    stdx r3, r29, r4
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds
                             ([4 x fp128], [4 x fp128]* @f128Array,
@@ -95,28 +233,74 @@ entry:
   store i64 %conv, i64* %arrayidx, align 8
   ret void
 
-; CHECK-LABEL: qpConv2sdw_testXForm
-; CHECK: xscvqpsdz v[[CONV:[0-9]+]],
-; CHECK: stxsdx v[[CONV]], r3, r4
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind readonly
 define i64 @qpConv2udw(fp128* nocapture readonly %a) {
+; CHECK-LABEL: qpConv2udw:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xscvqpudz v2, v2
+; CHECK-NEXT:    mfvsrd r3, v2
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2udw:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixunskfdi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %conv = fptoui fp128 %0 to i64
   ret i64 %conv
 
-; CHECK-LABEL: qpConv2udw
-; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK-NEXT: xscvqpudz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: mfvsrd r3, v[[CONV]]
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2udw_02(i64* nocapture %res) {
+; CHECK-LABEL: qpConv2udw_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-NEXT:    lxv v2, 32(r4)
+; CHECK-NEXT:    xscvqpudz v2, v2
+; CHECK-NEXT:    stxsd v2, 0(r3)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2udw_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 32(r4)
+; CHECK-P8-NEXT:    ld r4, 40(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixunskfdi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds
                             ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
@@ -125,17 +309,43 @@ entry:
   store i64 %conv, i64* %res, align 8
   ret void
 
-; CHECK-LABEL: qpConv2udw_02
-; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0 at toc@ha
-; CHECK: ld r[[REG0]], .LC0 at toc@l(r[[REG0]])
-; CHECK: lxv v[[REG:[0-9]+]], 32(r[[REG0]])
-; CHECK-NEXT: xscvqpudz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxsd v[[CONV]], 0(r3)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind readonly
 define i64 @qpConv2udw_03(fp128* nocapture readonly %a) {
+; CHECK-LABEL: qpConv2udw_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    addis r3, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r3, .LC0 at toc@l(r3)
+; CHECK-NEXT:    lxv v3, 16(r3)
+; CHECK-NEXT:    xsaddqp v2, v2, v3
+; CHECK-NEXT:    xscvqpudz v2, v2
+; CHECK-NEXT:    mfvsrd r3, v2
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2udw_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r6, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    ld r5, 16(r6)
+; CHECK-P8-NEXT:    ld r6, 24(r6)
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixunskfdi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* getelementptr inbounds
@@ -145,19 +355,46 @@ entry:
   %conv = fptoui fp128 %add to i64
   ret i64 %conv
 
-; CHECK-LABEL: qpConv2udw_03
-; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0 at toc@ha
-; CHECK-DAG: ld r[[REG0]], .LC0 at toc@l(r[[REG0]])
-; CHECK-DAG: lxv v[[REG1:[0-9]+]], 16(r[[REG0]])
-; CHECK: xsaddqp v[[REG]], v[[REG]], v[[REG1]]
-; CHECK-NEXT: xscvqpudz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: mfvsrd r3, v[[CONV]]
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2udw_04(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpConv2udw_04:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    xsaddqp v2, v2, v3
+; CHECK-NEXT:    xscvqpudz v2, v2
+; CHECK-NEXT:    stxsd v2, 0(r5)
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2udw_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixunskfdi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    std r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                            fp128* nocapture readonly %b, i64* nocapture %res) {
 entry:
   %0 = load fp128, fp128* %a, align 16
@@ -167,17 +404,48 @@ entry:
   store i64 %conv, i64* %res, align 8
   ret void
 
-; CHECK-LABEL: qpConv2udw_04
-; CHECK-DAG: lxv v[[REG1:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK: xsaddqp v[[REG]], v[[REG]], v[[REG1]]
-; CHECK-NEXT: xscvqpudz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxsd v[[CONV]], 0(r5)
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2udw_testXForm(i64* nocapture %res, i32 signext %idx) {
+; CHECK-LABEL: qpConv2udw_testXForm:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
+; CHECK-NEXT:    sldi r4, r4, 3
+; CHECK-NEXT:    ld r5, .LC0 at toc@l(r5)
+; CHECK-NEXT:    lxvx v2, 0, r5
+; CHECK-NEXT:    xscvqpudz v2, v2
+; CHECK-NEXT:    stxsdx v2, r3, r4
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2udw_testXForm:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r29, -24
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -64(r1)
+; CHECK-P8-NEXT:    mr r30, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    mr r29, r3
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 0(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixunskfdi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    sldi r4, r30, 3
+; CHECK-P8-NEXT:    stdx r3, r29, r4
+; CHECK-P8-NEXT:    addi r1, r1, 64
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds
                             ([4 x fp128], [4 x fp128]* @f128Array,
@@ -188,29 +456,76 @@ entry:
   store i64 %conv, i64* %arrayidx, align 8
   ret void
 
-; CHECK-LABEL: qpConv2udw_testXForm
-; CHECK: xscvqpudz v[[CONV:[0-9]+]],
-; CHECK: stxsdx v[[CONV]], r3, r4
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind readonly
 define signext i32 @qpConv2sw(fp128* nocapture readonly %a)  {
+; CHECK-LABEL: qpConv2sw:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xscvqpswz v2, v2
+; CHECK-NEXT:    mfvsrwz r3, v2
+; CHECK-NEXT:    extsw r3, r3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sw:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %conv = fptosi fp128 %0 to i32
   ret i32 %conv
 
-; CHECK-LABEL: qpConv2sw
-; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK-NEXT: xscvqpswz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: mfvsrwz r[[REG2:[0-9]+]], v[[CONV]]
-; CHECK-NEXT: extsw r3, r[[REG2]]
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2sw_02(i32* nocapture %res) {
+; CHECK-LABEL: qpConv2sw_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-NEXT:    lxv v2, 32(r4)
+; CHECK-NEXT:    xscvqpswz v2, v2
+; CHECK-NEXT:    stxsiwx v2, 0, r3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sw_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 32(r4)
+; CHECK-P8-NEXT:    ld r4, 40(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stw r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds
                             ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
@@ -219,17 +534,45 @@ entry:
   store i32 %conv, i32* %res, align 4
   ret void
 
-; CHECK-LABEL: qpConv2sw_02
-; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0 at toc@ha
-; CHECK: ld r[[REG0]], .LC0 at toc@l(r[[REG0]])
-; CHECK: lxv v[[REG:[0-9]+]], 32(r[[REG0]])
-; CHECK-NEXT: xscvqpswz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxsiwx v[[CONV]], 0, r3
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind readonly
 define signext i32 @qpConv2sw_03(fp128* nocapture readonly %a)  {
+; CHECK-LABEL: qpConv2sw_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    addis r3, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r3, .LC0 at toc@l(r3)
+; CHECK-NEXT:    lxv v3, 16(r3)
+; CHECK-NEXT:    xsaddqp v2, v2, v3
+; CHECK-NEXT:    xscvqpswz v2, v2
+; CHECK-NEXT:    mfvsrwz r3, v2
+; CHECK-NEXT:    extsw r3, r3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sw_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r6, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    ld r5, 16(r6)
+; CHECK-P8-NEXT:    ld r6, 24(r6)
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* getelementptr inbounds
@@ -239,20 +582,46 @@ entry:
   %conv = fptosi fp128 %add to i32
   ret i32 %conv
 
-; CHECK-LABEL: qpConv2sw_03
-; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0 at toc@ha
-; CHECK-DAG: ld r[[REG0]], .LC0 at toc@l(r[[REG0]])
-; CHECK-DAG: lxv v[[REG1:[0-9]+]], 16(r[[REG0]])
-; CHECK-NEXT: xsaddqp v[[ADD:[0-9]+]], v[[REG]], v[[REG1]]
-; CHECK-NEXT: xscvqpswz v[[CONV:[0-9]+]], v[[ADD]]
-; CHECK-NEXT: mfvsrwz r[[REG2:[0-9]+]], v[[CONV]]
-; CHECK-NEXT: extsw r3, r[[REG2]]
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2sw_04(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpConv2sw_04:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    xsaddqp v2, v2, v3
+; CHECK-NEXT:    xscvqpswz v2, v2
+; CHECK-NEXT:    stxsiwx v2, 0, r5
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sw_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stw r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                           fp128* nocapture readonly %b, i32* nocapture %res) {
 entry:
   %0 = load fp128, fp128* %a, align 16
@@ -262,31 +631,75 @@ entry:
   store i32 %conv, i32* %res, align 4
   ret void
 
-; CHECK-LABEL: qpConv2sw_04
-; CHECK-DAG: lxv v[[REG1:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK-NEXT: xsaddqp v[[ADD:[0-9]+]], v[[REG]], v[[REG1]]
-; CHECK-NEXT: xscvqpswz v[[CONV:[0-9]+]], v[[ADD]]
-; CHECK-NEXT: stxsiwx v[[CONV]], 0, r5
-; CHECK-NEXT: blr
 }
 
 ; Function Attrs: norecurse nounwind readonly
 define zeroext i32 @qpConv2uw(fp128* nocapture readonly %a)  {
+; CHECK-LABEL: qpConv2uw:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    xscvqpuwz v2, v2
+; CHECK-NEXT:    mfvsrwz r3, v2
+; CHECK-NEXT:    clrldi r3, r3, 32
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2uw:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixunskfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %conv = fptoui fp128 %0 to i32
   ret i32 %conv
 
-; CHECK-LABEL: qpConv2uw
-; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK-NEXT: xscvqpuwz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: mfvsrwz r3, v[[CONV]]
-; CHECK: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2uw_02(i32* nocapture %res) {
+; CHECK-LABEL: qpConv2uw_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-NEXT:    lxv v2, 32(r4)
+; CHECK-NEXT:    xscvqpuwz v2, v2
+; CHECK-NEXT:    stxsiwx v2, 0, r3
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2uw_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 32(r4)
+; CHECK-P8-NEXT:    ld r4, 40(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixunskfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stw r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds
                             ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
@@ -295,17 +708,44 @@ entry:
   store i32 %conv, i32* %res, align 4
   ret void
 
-; CHECK-LABEL: qpConv2uw_02
-; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0 at toc@ha
-; CHECK: ld r[[REG0]], .LC0 at toc@l(r[[REG0]])
-; CHECK: lxv v[[REG:[0-9]+]], 32(r[[REG0]])
-; CHECK-NEXT: xscvqpuwz v[[CONV:[0-9]+]], v[[REG]]
-; CHECK-NEXT: stxsiwx v[[CONV]], 0, r3
-; CHECK: blr
 }
 
 ; Function Attrs: norecurse nounwind readonly
 define zeroext i32 @qpConv2uw_03(fp128* nocapture readonly %a)  {
+; CHECK-LABEL: qpConv2uw_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    addis r3, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r3, .LC0 at toc@l(r3)
+; CHECK-NEXT:    lxv v3, 16(r3)
+; CHECK-NEXT:    xsaddqp v2, v2, v3
+; CHECK-NEXT:    xscvqpuwz v2, v2
+; CHECK-NEXT:    mfvsrwz r3, v2
+; CHECK-NEXT:    clrldi r3, r3, 32
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2uw_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r6, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    ld r5, 16(r6)
+; CHECK-P8-NEXT:    ld r6, 24(r6)
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixunskfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* getelementptr inbounds
@@ -315,19 +755,46 @@ entry:
   %conv = fptoui fp128 %add to i32
   ret i32 %conv
 
-; CHECK-LABEL: qpConv2uw_03
-; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0 at toc@ha
-; CHECK-DAG: ld r[[REG0]], .LC0 at toc@l(r[[REG0]])
-; CHECK-DAG: lxv v[[REG1:[0-9]+]], 16(r[[REG0]])
-; CHECK-NEXT: xsaddqp v[[ADD:[0-9]+]], v[[REG]], v[[REG1]]
-; CHECK-NEXT: xscvqpuwz v[[CONV:[0-9]+]], v[[ADD]]
-; CHECK-NEXT: mfvsrwz r3, v[[CONV]]
-; CHECK: blr
 }
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2uw_04(fp128* nocapture readonly %a,
+; CHECK-LABEL: qpConv2uw_04:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lxv v2, 0(r3)
+; CHECK-NEXT:    lxv v3, 0(r4)
+; CHECK-NEXT:    xsaddqp v2, v2, v3
+; CHECK-NEXT:    xscvqpuwz v2, v2
+; CHECK-NEXT:    stxsiwx v2, 0, r5
+; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2uw_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixunskfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stw r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
                           fp128* nocapture readonly %b, i32* nocapture %res) {
 entry:
   %0 = load fp128, fp128* %a, align 16
@@ -337,16 +804,8 @@ entry:
   store i32 %conv, i32* %res, align 4
   ret void
 
-; CHECK-LABEL: qpConv2uw_04
-; CHECK-DAG: lxv v[[REG1:[0-9]+]], 0(r4)
-; CHECK-DAG: lxv v[[REG:[0-9]+]], 0(r3)
-; CHECK-NEXT: xsaddqp v[[ADD:[0-9]+]], v[[REG]], v[[REG1]]
-; CHECK-NEXT: xscvqpuwz v[[CONV:[0-9]+]], v[[ADD]]
-; CHECK-NEXT: stxsiwx v[[CONV]], 0, r5
-; CHECK: blr
 }
 
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 
 ; Function Attrs: norecurse nounwind readonly
 define signext i16 @qpConv2shw(fp128* nocapture readonly %a) {
@@ -357,6 +816,24 @@ define signext i16 @qpConv2shw(fp128* nocapture readonly %a) {
 ; CHECK-NEXT:    mfvsrwz r3, v2
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2shw:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %conv = fptosi fp128 %0 to i16
@@ -373,6 +850,30 @@ define void @qpConv2shw_02(i16* nocapture %res) {
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    stxsihx v2, 0, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2shw_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 32(r4)
+; CHECK-P8-NEXT:    ld r4, 40(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    sth r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds
                             ([4 x fp128], [4 x fp128]* @f128Array,
@@ -387,14 +888,38 @@ define signext i16 @qpConv2shw_03(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qpConv2shw_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv v2, 0(r3)
-; CHECK-NEXT:    addis [[REG:r[0-9]+]], r2, .LC0 at toc@ha
-; CHECK-NEXT:    ld [[REG1:r[0-9]+]], .LC0 at toc@l([[REG]])
-; CHECK-NEXT:    lxv v3, 16([[REG1]])
+; CHECK-NEXT:    addis r3, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r3, .LC0 at toc@l(r3)
+; CHECK-NEXT:    lxv v3, 16(r3)
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    mfvsrwz r3, v2
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2shw_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r6, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    ld r5, 16(r6)
+; CHECK-P8-NEXT:    ld r6, 24(r6)
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* getelementptr inbounds
@@ -407,7 +932,6 @@ entry:
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2shw_04(fp128* nocapture readonly %a,
-                           fp128* nocapture readonly %b, i16* nocapture %res) {
 ; CHECK-LABEL: qpConv2shw_04:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv v2, 0(r3)
@@ -416,6 +940,35 @@ define void @qpConv2shw_04(fp128* nocapture readonly %a,
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    stxsihx v2, 0, r5
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2shw_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    sth r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
+                           fp128* nocapture readonly %b, i16* nocapture %res) {
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* %b, align 16
@@ -434,6 +987,23 @@ define zeroext i16 @qpConv2uhw(fp128* nocapture readonly %a) {
 ; CHECK-NEXT:    mfvsrwz r3, v2
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2uhw:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %conv = fptoui fp128 %0 to i16
@@ -450,6 +1020,30 @@ define void @qpConv2uhw_02(i16* nocapture %res) {
 ; CHECK-NEXT:    xscvqpuwz v2, v2
 ; CHECK-NEXT:    stxsihx v2, 0, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2uhw_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 32(r4)
+; CHECK-P8-NEXT:    ld r4, 40(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    sth r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds
                             ([4 x fp128], [4 x fp128]* @f128Array,
@@ -464,14 +1058,37 @@ define zeroext i16 @qpConv2uhw_03(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qpConv2uhw_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv v2, 0(r3)
-; CHECK-NEXT:    addis [[REG:r[0-9]+]], r2, .LC0 at toc@ha
-; CHECK-NEXT:    ld [[REG1:r[0-9]+]], .LC0 at toc@l([[REG]])
-; CHECK-NEXT:    lxv v3, 16([[REG1]])
+; CHECK-NEXT:    addis r3, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r3, .LC0 at toc@l(r3)
+; CHECK-NEXT:    lxv v3, 16(r3)
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    mfvsrwz r3, v2
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2uhw_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r6, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    ld r5, 16(r6)
+; CHECK-P8-NEXT:    ld r6, 24(r6)
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* getelementptr inbounds
@@ -484,7 +1101,6 @@ entry:
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2uhw_04(fp128* nocapture readonly %a,
-                           fp128* nocapture readonly %b, i16* nocapture %res) {
 ; CHECK-LABEL: qpConv2uhw_04:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv v2, 0(r3)
@@ -493,6 +1109,35 @@ define void @qpConv2uhw_04(fp128* nocapture readonly %a,
 ; CHECK-NEXT:    xscvqpuwz v2, v2
 ; CHECK-NEXT:    stxsihx v2, 0, r5
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2uhw_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    sth r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
+                           fp128* nocapture readonly %b, i16* nocapture %res) {
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* %b, align 16
@@ -511,6 +1156,24 @@ define signext i8 @qpConv2sb(fp128* nocapture readonly %a) {
 ; CHECK-NEXT:    mfvsrwz r3, v2
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sb:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %conv = fptosi fp128 %0 to i8
@@ -527,6 +1190,30 @@ define void @qpConv2sb_02(i8* nocapture %res) {
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    stxsibx v2, 0, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sb_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 32(r4)
+; CHECK-P8-NEXT:    ld r4, 40(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stb r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds
                             ([4 x fp128], [4 x fp128]* @f128Array,
@@ -541,14 +1228,38 @@ define signext i8 @qpConv2sb_03(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qpConv2sb_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv v2, 0(r3)
-; CHECK-NEXT:    addis [[REG:r[0-9]+]], r2, .LC0 at toc@ha
-; CHECK-NEXT:    ld [[REG1:r[0-9]+]], .LC0 at toc@l([[REG]])
-; CHECK-NEXT:    lxv v3, 16([[REG1]])
+; CHECK-NEXT:    addis r3, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r3, .LC0 at toc@l(r3)
+; CHECK-NEXT:    lxv v3, 16(r3)
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    mfvsrwz r3, v2
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sb_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r6, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    ld r5, 16(r6)
+; CHECK-P8-NEXT:    ld r6, 24(r6)
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    extsw r3, r3
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* getelementptr inbounds
@@ -561,7 +1272,6 @@ entry:
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2sb_04(fp128* nocapture readonly %a,
-                          fp128* nocapture readonly %b, i8* nocapture %res) {
 ; CHECK-LABEL: qpConv2sb_04:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv v2, 0(r3)
@@ -570,6 +1280,35 @@ define void @qpConv2sb_04(fp128* nocapture readonly %a,
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    stxsibx v2, 0, r5
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2sb_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stb r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
+                          fp128* nocapture readonly %b, i8* nocapture %res) {
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* %b, align 16
@@ -588,6 +1327,23 @@ define zeroext i8 @qpConv2ub(fp128* nocapture readonly %a) {
 ; CHECK-NEXT:    mfvsrwz r3, v2
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2ub:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    ld r5, 0(r3)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %conv = fptoui fp128 %0 to i8
@@ -604,6 +1360,30 @@ define void @qpConv2ub_02(i8* nocapture %res) {
 ; CHECK-NEXT:    xscvqpuwz v2, v2
 ; CHECK-NEXT:    stxsibx v2, 0, r3
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2ub_02:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    mr r30, r3
+; CHECK-P8-NEXT:    ld r4, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r5, 32(r4)
+; CHECK-P8-NEXT:    ld r4, 40(r4)
+; CHECK-P8-NEXT:    mr r3, r5
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stb r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* getelementptr inbounds
                             ([4 x fp128], [4 x fp128]* @f128Array,
@@ -618,14 +1398,37 @@ define zeroext i8 @qpConv2ub_03(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qpConv2ub_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv v2, 0(r3)
-; CHECK-NEXT:    addis [[REG:r[0-9]+]], r2, .LC0 at toc@ha
-; CHECK-NEXT:    ld [[REG1:r[0-9]+]], .LC0 at toc@l([[REG]])
-; CHECK-NEXT:    lxv v3, 16([[REG1]])
+; CHECK-NEXT:    addis r3, r2, .LC0 at toc@ha
+; CHECK-NEXT:    ld r3, .LC0 at toc@l(r3)
+; CHECK-NEXT:    lxv v3, 16(r3)
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    mfvsrwz r3, v2
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2ub_03:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -32(r1)
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    addis r4, r2, .LC0 at toc@ha
+; CHECK-P8-NEXT:    ld r7, 0(r3)
+; CHECK-P8-NEXT:    ld r6, .LC0 at toc@l(r4)
+; CHECK-P8-NEXT:    ld r4, 8(r3)
+; CHECK-P8-NEXT:    mr r3, r7
+; CHECK-P8-NEXT:    ld r5, 16(r6)
+; CHECK-P8-NEXT:    ld r6, 24(r6)
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    addi r1, r1, 32
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* getelementptr inbounds
@@ -638,7 +1441,6 @@ entry:
 
 ; Function Attrs: norecurse nounwind
 define void @qpConv2ub_04(fp128* nocapture readonly %a,
-                          fp128* nocapture readonly %b, i8* nocapture %res) {
 ; CHECK-LABEL: qpConv2ub_04:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv v2, 0(r3)
@@ -647,6 +1449,35 @@ define void @qpConv2ub_04(fp128* nocapture readonly %a,
 ; CHECK-NEXT:    xscvqpuwz v2, v2
 ; CHECK-NEXT:    stxsibx v2, 0, r5
 ; CHECK-NEXT:    blr
+;
+; CHECK-P8-LABEL: qpConv2ub_04:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    mflr r0
+; CHECK-P8-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT:    .cfi_offset lr, 16
+; CHECK-P8-NEXT:    .cfi_offset r30, -16
+; CHECK-P8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT:    std r0, 16(r1)
+; CHECK-P8-NEXT:    stdu r1, -48(r1)
+; CHECK-P8-NEXT:    ld r9, 0(r3)
+; CHECK-P8-NEXT:    ld r7, 8(r3)
+; CHECK-P8-NEXT:    ld r8, 0(r4)
+; CHECK-P8-NEXT:    ld r6, 8(r4)
+; CHECK-P8-NEXT:    mr r30, r5
+; CHECK-P8-NEXT:    mr r3, r9
+; CHECK-P8-NEXT:    mr r4, r7
+; CHECK-P8-NEXT:    mr r5, r8
+; CHECK-P8-NEXT:    bl __addkf3
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    bl __fixkfsi
+; CHECK-P8-NEXT:    nop
+; CHECK-P8-NEXT:    stb r3, 0(r30)
+; CHECK-P8-NEXT:    addi r1, r1, 48
+; CHECK-P8-NEXT:    ld r0, 16(r1)
+; CHECK-P8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    mtlr r0
+; CHECK-P8-NEXT:    blr
+                          fp128* nocapture readonly %b, i8* nocapture %res) {
 entry:
   %0 = load fp128, fp128* %a, align 16
   %1 = load fp128, fp128* %b, align 16


        


More information about the llvm-commits mailing list