[clang] [llvm] Vectorize: Support fminimumnum and fmaximumnum (PR #131781)
YunQiang Su via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 31 02:44:03 PDT 2025
================
@@ -0,0 +1,1059 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt --passes=loop-vectorize --mtriple=riscv64 -mattr="+zvfh,+v" -S < %s | FileCheck %s --check-prefix=RV64
+; RUN: opt --passes=loop-vectorize --mtriple=aarch64 -mattr="+neon" -S < %s | FileCheck %s --check-prefix=ARM64
+; FIXME: ARM64+SVE cannot output vscale style code
+; RUN: opt --passes=loop-vectorize --mtriple=x86_64 -S < %s | FileCheck %s --check-prefix=X64
+
+ at af32 = dso_local local_unnamed_addr global [4096 x float] zeroinitializer, align 4
+ at bf32 = dso_local local_unnamed_addr global [4096 x float] zeroinitializer, align 4
+ at cf32 = dso_local local_unnamed_addr global [4096 x float] zeroinitializer, align 4
+ at af64 = dso_local local_unnamed_addr global [4096 x double] zeroinitializer, align 8
+ at bf64 = dso_local local_unnamed_addr global [4096 x double] zeroinitializer, align 8
+ at cf64 = dso_local local_unnamed_addr global [4096 x double] zeroinitializer, align 8
+ at af16 = dso_local local_unnamed_addr global [4096 x half] zeroinitializer, align 2
+ at bf16 = dso_local local_unnamed_addr global [4096 x half] zeroinitializer, align 2
+ at cf16 = dso_local local_unnamed_addr global [4096 x half] zeroinitializer, align 2
+
+; Function Attrs: nofree norecurse nosync nounwind memory(readwrite, argmem: none, inaccessiblemem: none) uwtable
+define dso_local void @f32min() local_unnamed_addr {
+; RV64-LABEL: define dso_local void @f32min(
+; RV64-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; RV64-NEXT: [[ENTRY:.*]]:
+; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; RV64-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
+; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP1]]
+; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; RV64: [[VECTOR_PH]]:
+; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; RV64-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
+; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP3]]
+; RV64-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
+; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; RV64-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
+; RV64: [[VECTOR_BODY]]:
+; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @af32, i64 0, i64 [[INDEX]]
+; RV64-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i32 0
+; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
+; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @bf32, i64 0, i64 [[INDEX]]
+; RV64-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i32 0
+; RV64-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP9]], align 4
+; RV64-NEXT: [[TMP10:%.*]] = call <vscale x 4 x float> @llvm.minimumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD1]])
+; RV64-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @cf32, i64 0, i64 [[INDEX]]
+; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i32 0
+; RV64-NEXT: store <vscale x 4 x float> [[TMP10]], ptr [[TMP12]], align 4
+; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; RV64-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; RV64-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; RV64: [[MIDDLE_BLOCK]]:
+; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]]
+; RV64-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
+; RV64: [[SCALAR_PH]]:
+; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; RV64-NEXT: br label %[[FOR_BODY:.*]]
+; RV64: [[FOR_COND_CLEANUP]]:
+; RV64-NEXT: ret void
+; RV64: [[FOR_BODY]]:
+; RV64-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; RV64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @af32, i64 0, i64 [[INDVARS_IV]]
+; RV64-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; RV64-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @bf32, i64 0, i64 [[INDVARS_IV]]
+; RV64-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; RV64-NEXT: [[TMP16:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP14]], float [[TMP15]])
+; RV64-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @cf32, i64 0, i64 [[INDVARS_IV]]
+; RV64-NEXT: store float [[TMP16]], ptr [[ARRAYIDX4]], align 4
+; RV64-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; RV64-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
+; RV64-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+; ARM64-LABEL: define dso_local void @f32min(
+; ARM64-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; ARM64-NEXT: [[ENTRY:.*]]:
+; ARM64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; ARM64: [[VECTOR_PH]]:
+; ARM64-NEXT: br label %[[VECTOR_BODY:.*]]
+; ARM64: [[VECTOR_BODY]]:
+; ARM64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ARM64-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @af32, i64 0, i64 [[INDEX]]
+; ARM64-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 0
+; ARM64-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 4
+; ARM64-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; ARM64-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; ARM64-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @bf32, i64 0, i64 [[INDEX]]
+; ARM64-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i32 0
+; ARM64-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i32 4
+; ARM64-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP4]], align 4
+; ARM64-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP5]], align 4
+; ARM64-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD2]])
+; ARM64-NEXT: [[TMP7:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD1]], <4 x float> [[WIDE_LOAD3]])
+; ARM64-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @cf32, i64 0, i64 [[INDEX]]
+; ARM64-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i32 0
+; ARM64-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i32 4
+; ARM64-NEXT: store <4 x float> [[TMP6]], ptr [[TMP9]], align 4
+; ARM64-NEXT: store <4 x float> [[TMP7]], ptr [[TMP10]], align 4
+; ARM64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; ARM64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
+; ARM64-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; ARM64: [[MIDDLE_BLOCK]]:
+; ARM64-NEXT: br i1 true, label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
+; ARM64: [[SCALAR_PH]]:
+; ARM64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; ARM64-NEXT: br label %[[FOR_BODY:.*]]
+; ARM64: [[FOR_COND_CLEANUP]]:
+; ARM64-NEXT: ret void
+; ARM64: [[FOR_BODY]]:
+; ARM64-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; ARM64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @af32, i64 0, i64 [[INDVARS_IV]]
+; ARM64-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; ARM64-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @bf32, i64 0, i64 [[INDVARS_IV]]
+; ARM64-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; ARM64-NEXT: [[TMP14:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP12]], float [[TMP13]])
+; ARM64-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @cf32, i64 0, i64 [[INDVARS_IV]]
+; ARM64-NEXT: store float [[TMP14]], ptr [[ARRAYIDX4]], align 4
+; ARM64-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; ARM64-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
+; ARM64-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+; X64-LABEL: define dso_local void @f32min() local_unnamed_addr {
+; X64-NEXT: [[ENTRY:.*]]:
+; X64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; X64: [[VECTOR_PH]]:
+; X64-NEXT: br label %[[VECTOR_BODY:.*]]
+; X64: [[VECTOR_BODY]]:
+; X64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; X64-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @af32, i64 0, i64 [[INDEX]]
+; X64-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 0
+; X64-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 4
+; X64-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; X64-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; X64-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @bf32, i64 0, i64 [[INDEX]]
+; X64-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i32 0
+; X64-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i32 4
+; X64-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP4]], align 4
+; X64-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP5]], align 4
+; X64-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD2]])
+; X64-NEXT: [[TMP7:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD1]], <4 x float> [[WIDE_LOAD3]])
+; X64-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @cf32, i64 0, i64 [[INDEX]]
+; X64-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i32 0
+; X64-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i32 4
+; X64-NEXT: store <4 x float> [[TMP6]], ptr [[TMP9]], align 4
+; X64-NEXT: store <4 x float> [[TMP7]], ptr [[TMP10]], align 4
+; X64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; X64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
+; X64-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; X64: [[MIDDLE_BLOCK]]:
+; X64-NEXT: br i1 true, label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
+; X64: [[SCALAR_PH]]:
+; X64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; X64-NEXT: br label %[[FOR_BODY:.*]]
+; X64: [[FOR_COND_CLEANUP]]:
+; X64-NEXT: ret void
+; X64: [[FOR_BODY]]:
+; X64-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; X64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @af32, i64 0, i64 [[INDVARS_IV]]
+; X64-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; X64-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @bf32, i64 0, i64 [[INDVARS_IV]]
+; X64-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; X64-NEXT: [[TMP14:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP12]], float [[TMP13]])
+; X64-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x float], ptr @cf32, i64 0, i64 [[INDVARS_IV]]
+; X64-NEXT: store float [[TMP14]], ptr [[ARRAYIDX4]], align 4
+; X64-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; X64-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
+; X64-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds nuw [4096 x float], ptr @af32, i64 0, i64 %indvars.iv
+ %0 = load float, ptr %arrayidx, align 4
----------------
wzssyqa wrote:
It has been named.
The name of them are
{a,b,c}{f64,f32,f16}.
Ok I will use a better name than `a, b, c`
https://github.com/llvm/llvm-project/pull/131781
More information about the llvm-commits
mailing list