[llvm] a853c42 - Pre-commit load/store cases for PowerPC direct-move

Qiu Chaofan via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 15 01:36:04 PST 2022


Author: Qiu Chaofan
Date: 2022-11-15T17:35:49+08:00
New Revision: a853c42a6a5e5af8eedde3d54e9f02beb857be88

URL: https://github.com/llvm/llvm-project/commit/a853c42a6a5e5af8eedde3d54e9f02beb857be88
DIFF: https://github.com/llvm/llvm-project/commit/a853c42a6a5e5af8eedde3d54e9f02beb857be88.diff

LOG: Pre-commit load/store cases for PowerPC direct-move

Added: 
    

Modified: 
    llvm/test/CodeGen/PowerPC/fp-strict-conv.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll b/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll
index ec0d02712867a..64475201ece73 100644
--- a/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll
@@ -326,6 +326,330 @@ entry:
   ret float %conv
 }
 
+define void @d_to_i32_store(double %m, ptr %addr) #0 {
+; CHECK-LABEL: d_to_i32_store:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpsxws f0, f1
+; CHECK-NEXT:    mffprwz r3, f0
+; CHECK-NEXT:    stw r3, 0(r4)
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: d_to_i32_store:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiwz f0, f1
+; NOVSX-NEXT:    addi r3, r1, -4
+; NOVSX-NEXT:    stfiwx f0, 0, r3
+; NOVSX-NEXT:    lwz r3, -4(r1)
+; NOVSX-NEXT:    stw r3, 0(r4)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict") #0
+  store i32 %conv, ptr %addr, align 4
+  ret void
+}
+
+define void @d_to_i64_store(double %m, ptr %addr) #0 {
+; CHECK-LABEL: d_to_i64_store:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpsxds f0, f1
+; CHECK-NEXT:    mffprd r3, f0
+; CHECK-NEXT:    std r3, 0(r4)
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: d_to_i64_store:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctidz f0, f1
+; NOVSX-NEXT:    stfd f0, -8(r1)
+; NOVSX-NEXT:    ld r3, -8(r1)
+; NOVSX-NEXT:    std r3, 0(r4)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict") #0
+  store i64 %conv, ptr %addr, align 8
+  ret void
+}
+
+define void @d_to_u64_store(double %m, ptr %addr) #0 {
+; CHECK-LABEL: d_to_u64_store:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpuxds f0, f1
+; CHECK-NEXT:    mffprd r3, f0
+; CHECK-NEXT:    std r3, 0(r4)
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: d_to_u64_store:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiduz f0, f1
+; NOVSX-NEXT:    stfd f0, -8(r1)
+; NOVSX-NEXT:    ld r3, -8(r1)
+; NOVSX-NEXT:    std r3, 0(r4)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict") #0
+  store i64 %conv, ptr %addr, align 8
+  ret void
+}
+
+define void @d_to_u32_store(double %m, ptr %addr) #0 {
+; CHECK-LABEL: d_to_u32_store:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpuxws f0, f1
+; CHECK-NEXT:    mffprwz r3, f0
+; CHECK-NEXT:    stw r3, 0(r4)
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: d_to_u32_store:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiwuz f0, f1
+; NOVSX-NEXT:    addi r3, r1, -4
+; NOVSX-NEXT:    stfiwx f0, 0, r3
+; NOVSX-NEXT:    lwz r3, -4(r1)
+; NOVSX-NEXT:    stw r3, 0(r4)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict") #0
+  store i32 %conv, ptr %addr, align 4
+  ret void
+}
+
+define void @f_to_i32_store(float %m, ptr %addr) #0 {
+; CHECK-LABEL: f_to_i32_store:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpsxws f0, f1
+; CHECK-NEXT:    mffprwz r3, f0
+; CHECK-NEXT:    stw r3, 0(r4)
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: f_to_i32_store:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiwz f0, f1
+; NOVSX-NEXT:    addi r3, r1, -4
+; NOVSX-NEXT:    stfiwx f0, 0, r3
+; NOVSX-NEXT:    lwz r3, -4(r1)
+; NOVSX-NEXT:    stw r3, 0(r4)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict") #0
+  store i32 %conv, ptr %addr, align 4
+  ret void
+}
+
+define void @f_to_i64_store(float %m, ptr %addr) #0 {
+; CHECK-LABEL: f_to_i64_store:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpsxds f0, f1
+; CHECK-NEXT:    mffprd r3, f0
+; CHECK-NEXT:    std r3, 0(r4)
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: f_to_i64_store:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctidz f0, f1
+; NOVSX-NEXT:    stfd f0, -8(r1)
+; NOVSX-NEXT:    ld r3, -8(r1)
+; NOVSX-NEXT:    std r3, 0(r4)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict") #0
+  store i64 %conv, ptr %addr, align 8
+  ret void
+}
+
+define void @f_to_u64_store(float %m, ptr %addr) #0 {
+; CHECK-LABEL: f_to_u64_store:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpuxds f0, f1
+; CHECK-NEXT:    mffprd r3, f0
+; CHECK-NEXT:    std r3, 0(r4)
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: f_to_u64_store:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiduz f0, f1
+; NOVSX-NEXT:    stfd f0, -8(r1)
+; NOVSX-NEXT:    ld r3, -8(r1)
+; NOVSX-NEXT:    std r3, 0(r4)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict") #0
+  store i64 %conv, ptr %addr, align 8
+  ret void
+}
+
+define void @f_to_u32_store(float %m, ptr %addr) #0 {
+; CHECK-LABEL: f_to_u32_store:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpuxws f0, f1
+; CHECK-NEXT:    mffprwz r3, f0
+; CHECK-NEXT:    stw r3, 0(r4)
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: f_to_u32_store:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiwuz f0, f1
+; NOVSX-NEXT:    addi r3, r1, -4
+; NOVSX-NEXT:    stfiwx f0, 0, r3
+; NOVSX-NEXT:    lwz r3, -4(r1)
+; NOVSX-NEXT:    stw r3, 0(r4)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict") #0
+  store i32 %conv, ptr %addr, align 4
+  ret void
+}
+
+define double @load_i32_to_d(ptr %addr) #0 {
+; CHECK-LABEL: load_i32_to_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lwz r3, 0(r3)
+; CHECK-NEXT:    mtfprwa f0, r3
+; CHECK-NEXT:    xscvsxddp f1, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: load_i32_to_d:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    lfiwax f0, 0, r3
+; NOVSX-NEXT:    fcfid f1, f0
+; NOVSX-NEXT:    blr
+entry:
+  %m = load i32, ptr %addr, align 4
+  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  ret double %conv
+}
+
+define double @load_i64_to_d(ptr %addr) #0 {
+; CHECK-LABEL: load_i64_to_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ld r3, 0(r3)
+; CHECK-NEXT:    mtfprd f0, r3
+; CHECK-NEXT:    xscvsxddp f1, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: load_i64_to_d:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    lfd f0, 0(r3)
+; NOVSX-NEXT:    fcfid f1, f0
+; NOVSX-NEXT:    blr
+entry:
+  %m = load i64, ptr %addr, align 8
+  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  ret double %conv
+}
+
+define double @load_u32_to_d(ptr %addr) #0 {
+; CHECK-LABEL: load_u32_to_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lwz r3, 0(r3)
+; CHECK-NEXT:    mtfprwz f0, r3
+; CHECK-NEXT:    xscvuxddp f1, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: load_u32_to_d:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    lfiwzx f0, 0, r3
+; NOVSX-NEXT:    fcfidu f1, f0
+; NOVSX-NEXT:    blr
+entry:
+  %m = load i32, ptr %addr, align 4
+  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  ret double %conv
+}
+
+define double @load_u64_to_d(ptr %addr) #0 {
+; CHECK-LABEL: load_u64_to_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ld r3, 0(r3)
+; CHECK-NEXT:    mtfprd f0, r3
+; CHECK-NEXT:    xscvuxddp f1, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: load_u64_to_d:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    lfd f0, 0(r3)
+; NOVSX-NEXT:    fcfidu f1, f0
+; NOVSX-NEXT:    blr
+entry:
+  %m = load i64, ptr %addr, align 8
+  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  ret double %conv
+}
+
+define float @load_i32_to_f(ptr %addr) #0 {
+; CHECK-LABEL: load_i32_to_f:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lwz r3, 0(r3)
+; CHECK-NEXT:    mtfprwa f0, r3
+; CHECK-NEXT:    xscvsxdsp f1, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: load_i32_to_f:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    lfiwax f0, 0, r3
+; NOVSX-NEXT:    fcfids f1, f0
+; NOVSX-NEXT:    blr
+entry:
+  %m = load i32, ptr %addr, align 4
+  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  ret float %conv
+}
+
+define float @load_i64_to_f(ptr %addr) #0 {
+; CHECK-LABEL: load_i64_to_f:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ld r3, 0(r3)
+; CHECK-NEXT:    mtfprd f0, r3
+; CHECK-NEXT:    xscvsxdsp f1, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: load_i64_to_f:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    lfd f0, 0(r3)
+; NOVSX-NEXT:    fcfids f1, f0
+; NOVSX-NEXT:    blr
+entry:
+  %m = load i64, ptr %addr, align 8
+  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  ret float %conv
+}
+
+define float @load_u32_to_f(ptr %addr) #0 {
+; CHECK-LABEL: load_u32_to_f:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lwz r3, 0(r3)
+; CHECK-NEXT:    mtfprwz f0, r3
+; CHECK-NEXT:    xscvuxdsp f1, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: load_u32_to_f:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    lfiwzx f0, 0, r3
+; NOVSX-NEXT:    fcfidus f1, f0
+; NOVSX-NEXT:    blr
+entry:
+  %m = load i32, ptr %addr, align 4
+  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  ret float %conv
+}
+
+define float @load_u64_to_f(ptr %addr) #0 {
+; CHECK-LABEL: load_u64_to_f:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ld r3, 0(r3)
+; CHECK-NEXT:    mtfprd f0, r3
+; CHECK-NEXT:    xscvuxdsp f1, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: load_u64_to_f:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    lfd f0, 0(r3)
+; NOVSX-NEXT:    fcfidus f1, f0
+; NOVSX-NEXT:    blr
+entry:
+  %m = load i64, ptr %addr, align 8
+  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  ret float %conv
+}
+
 define void @fptoint_nofpexcept_f64(double %m, ptr %addr1, ptr %addr2) {
 ; MIR-LABEL: name: fptoint_nofpexcept_f64
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS


        


More information about the llvm-commits mailing list