[llvm] c9babbc - Pre-commit PhaseOrdering/always-inline-alloca-promotion.ll

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 2 20:02:47 PDT 2025


Author: Amara Emerson
Date: 2025-07-02T19:51:01-07:00
New Revision: c9babbc2065dabd892150085f24cbe660990c8c1

URL: https://github.com/llvm/llvm-project/commit/c9babbc2065dabd892150085f24cbe660990c8c1
DIFF: https://github.com/llvm/llvm-project/commit/c9babbc2065dabd892150085f24cbe660990c8c1.diff

LOG: Pre-commit PhaseOrdering/always-inline-alloca-promotion.ll

Added: 
    llvm/test/Transforms/PhaseOrdering/always-inline-alloca-promotion.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/PhaseOrdering/always-inline-alloca-promotion.ll b/llvm/test/Transforms/PhaseOrdering/always-inline-alloca-promotion.ll
new file mode 100644
index 0000000000000..63279b04bda28
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/always-inline-alloca-promotion.ll
@@ -0,0 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt --Os -S %s -o - | FileCheck %s
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128-Fn32"
+target triple = "arm64e-apple-ios19.0.0"
+
+; This test checks if we generate a phi for the <vscale 16 x float> value or we manage
+; to promote it during inlining.
+
+; Function Attrs: mustprogress optsize ssp uwtable(sync)
+define void @pluto() #0 {
+; CHECK-LABEL: define void @pluto(
+; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 48 to ptr), align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp sgt i64 [[TMP1]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> zeroinitializer, <vscale x 4 x float> zeroinitializer, i64 0)
+; CHECK-NEXT:    br label %[[SNORK_EXIT:.*]]
+; CHECK:       [[SNORK_EXIT]]:
+; CHECK-NEXT:    [[DOT0:%.*]] = phi <vscale x 16 x float> [ undef, [[TMP0:%.*]] ], [ [[SPEC_SELECT:%.*]], %[[SNORK_EXIT]] ]
+; CHECK-NEXT:    [[SPEC_SELECT]] = select i1 [[TMP2]], <vscale x 16 x float> [[TMP3]], <vscale x 16 x float> [[DOT0]]
+; CHECK-NEXT:    [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SPEC_SELECT]], i64 0)
+; CHECK-NEXT:    tail call void @llvm.aarch64.sme.mopa.nxv4f32(i32 0, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x float> zeroinitializer, <vscale x 4 x float> [[TMP4]])
+; CHECK-NEXT:    br label %[[SNORK_EXIT]]
+;
+  br label %1
+
+1:                                                ; preds = %1, %0
+  call void @ham() #5
+  br label %1
+}
+
+; Function Attrs: alwaysinline mustprogress optsize ssp uwtable(sync)
+define void @ham() #1 {
+; CHECK-LABEL: define void @ham(
+; CHECK-SAME: ) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT:  [[SNORK_EXIT:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 48 to ptr), align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[TMP0]], 0
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> zeroinitializer, <vscale x 4 x float> zeroinitializer, i64 0)
+; CHECK-NEXT:    [[SPEC_SELECT:%.*]] = select i1 [[TMP1]], <vscale x 16 x float> [[TMP2]], <vscale x 16 x float> undef
+; CHECK-NEXT:    [[TMP3:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SPEC_SELECT]], i64 0)
+; CHECK-NEXT:    tail call void @llvm.aarch64.sme.mopa.nxv4f32(i32 0, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x float> zeroinitializer, <vscale x 4 x float> [[TMP3]])
+; CHECK-NEXT:    ret void
+;
+  %1 = alloca <vscale x 16 x float>, align 16
+  %2 = load i64, ptr inttoptr (i64 48 to ptr), align 8
+  %3 = call i64 @snork(i64 noundef %2, ptr noundef nonnull align 16 %1) #5
+  %4 = load <vscale x 16 x float>, ptr %1, align 16
+  %5 = call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> %4, i64 0)
+  call void @llvm.aarch64.sme.mopa.nxv4f32(i32 0, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x float> zeroinitializer, <vscale x 4 x float> %5)
+  ret void
+}
+
+; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float>, <vscale x 4 x float>, i64 immarg) #2
+
+; Function Attrs: alwaysinline mustprogress nounwind optsize ssp uwtable(sync)
+define i64 @snork(i64 noundef %0, ptr noundef nonnull align 16 %1) #3 {
+; CHECK-LABEL: define noundef i64 @snork(
+; CHECK-SAME: i64 noundef [[TMP0:%.*]], ptr noundef nonnull writeonly align 16 captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i64 [[TMP0]], 0
+; CHECK-NEXT:    br i1 [[TMP3]], label %[[BB4:.*]], label %[[BB6:.*]]
+; CHECK:       [[BB4]]:
+; CHECK-NEXT:    [[TMP5:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> zeroinitializer, <vscale x 4 x float> zeroinitializer, i64 0)
+; CHECK-NEXT:    store <vscale x 16 x float> [[TMP5]], ptr [[TMP1]], align 16
+; CHECK-NEXT:    br label %[[BB6]]
+; CHECK:       [[BB6]]:
+; CHECK-NEXT:    ret i64 0
+;
+  %3 = icmp sgt i64 %0, 0
+  br i1 %3, label %4, label %6
+
+4:                                                ; preds = %2
+  %5 = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> zeroinitializer, <vscale x 4 x float> zeroinitializer, i64 0)
+  store <vscale x 16 x float> %5, ptr %1, align 16
+  br label %6
+
+6:                                                ; preds = %4, %2
+  ret i64 0
+}
+
+; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float>, i64 immarg) #2
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
+declare void @llvm.aarch64.sme.mopa.nxv4f32(i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>) #4
+
+attributes #0 = { mustprogress optsize ssp uwtable(sync) "aarch64_new_za" "aarch64_pstate_sm_enabled" "frame-pointer"="non-leaf" "no-builtin-calloc" "no-builtin-stpcpy" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m4" }
+attributes #1 = { alwaysinline mustprogress optsize ssp uwtable(sync) "aarch64_inout_za" "aarch64_pstate_sm_enabled" "frame-pointer"="non-leaf" "no-builtin-calloc" "no-builtin-stpcpy" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m4" }
+attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+attributes #3 = { alwaysinline mustprogress nounwind optsize ssp uwtable(sync) "aarch64_inout_za" "aarch64_pstate_sm_enabled" "frame-pointer"="non-leaf" "no-builtin-calloc" "no-builtin-stpcpy" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m4" }
+attributes #4 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) }
+attributes #5 = { optsize "aarch64_inout_za" "aarch64_pstate_sm_enabled" "no-builtin-calloc" "no-builtin-stpcpy" }


        


More information about the llvm-commits mailing list