[llvm] [AArch64] Spare N2I roundtrip when splatting float comparison (PR #141806)
David Green via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 6 01:22:20 PDT 2025
================
@@ -0,0 +1,378 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-NOFULLFP16
+; RUN: llc < %s -mtriple=aarch64 --enable-no-nans-fp-math | FileCheck %s --check-prefixes=CHECK,CHECK-NONANS
+; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FULLFP16
+
+define <1 x float> @dup_v1i32_oeq(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_oeq:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp oeq float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ogt(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ogt:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp ogt float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_oge(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_oge:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp oge float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_olt(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_olt:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp olt float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ole(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ole:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s1, s0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp ole float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_one(float %a, float %b) {
+; CHECK-NOFULLFP16-LABEL: dup_v1i32_one:
+; CHECK-NOFULLFP16: // %bb.0: // %entry
+; CHECK-NOFULLFP16-NEXT: fcmgt s2, s0, s1
+; CHECK-NOFULLFP16-NEXT: fcmgt s0, s1, s0
+; CHECK-NOFULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NOFULLFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NOFULLFP16-NEXT: ret
+;
+; CHECK-NONANS-LABEL: dup_v1i32_one:
+; CHECK-NONANS: // %bb.0: // %entry
+; CHECK-NONANS-NEXT: fcmeq s0, s0, s1
+; CHECK-NONANS-NEXT: mvn v0.8b, v0.8b
+; CHECK-NONANS-NEXT: ret
+;
+; CHECK-FULLFP16-LABEL: dup_v1i32_one:
+; CHECK-FULLFP16: // %bb.0: // %entry
+; CHECK-FULLFP16-NEXT: fcmgt s2, s0, s1
+; CHECK-FULLFP16-NEXT: fcmgt s0, s1, s0
+; CHECK-FULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-FULLFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-FULLFP16-NEXT: ret
+entry:
+ %0 = fcmp one float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ord(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ord:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s2, s0, s1
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp ord float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ueq(float %a, float %b) {
+; CHECK-NOFULLFP16-LABEL: dup_v1i32_ueq:
+; CHECK-NOFULLFP16: // %bb.0: // %entry
+; CHECK-NOFULLFP16-NEXT: fcmgt s2, s0, s1
+; CHECK-NOFULLFP16-NEXT: fcmgt s0, s1, s0
+; CHECK-NOFULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
+; CHECK-NOFULLFP16-NEXT: ret
+;
+; CHECK-NONANS-LABEL: dup_v1i32_ueq:
+; CHECK-NONANS: // %bb.0: // %entry
+; CHECK-NONANS-NEXT: fcmeq s0, s0, s1
+; CHECK-NONANS-NEXT: ret
+;
+; CHECK-FULLFP16-LABEL: dup_v1i32_ueq:
+; CHECK-FULLFP16: // %bb.0: // %entry
+; CHECK-FULLFP16-NEXT: fcmgt s2, s0, s1
+; CHECK-FULLFP16-NEXT: fcmgt s0, s1, s0
+; CHECK-FULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
+; CHECK-FULLFP16-NEXT: ret
+entry:
+ %0 = fcmp ueq float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ugt(float %a, float %b) {
+; CHECK-NOFULLFP16-LABEL: dup_v1i32_ugt:
+; CHECK-NOFULLFP16: // %bb.0: // %entry
+; CHECK-NOFULLFP16-NEXT: fcmge s0, s1, s0
+; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
+; CHECK-NOFULLFP16-NEXT: ret
+;
+; CHECK-NONANS-LABEL: dup_v1i32_ugt:
+; CHECK-NONANS: // %bb.0: // %entry
+; CHECK-NONANS-NEXT: fcmgt s0, s0, s1
+; CHECK-NONANS-NEXT: ret
+;
+; CHECK-FULLFP16-LABEL: dup_v1i32_ugt:
+; CHECK-FULLFP16: // %bb.0: // %entry
+; CHECK-FULLFP16-NEXT: fcmge s0, s1, s0
+; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
+; CHECK-FULLFP16-NEXT: ret
+entry:
+ %0 = fcmp ugt float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_uge(float %a, float %b) {
+; CHECK-NOFULLFP16-LABEL: dup_v1i32_uge:
+; CHECK-NOFULLFP16: // %bb.0: // %entry
+; CHECK-NOFULLFP16-NEXT: fcmgt s0, s1, s0
+; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
+; CHECK-NOFULLFP16-NEXT: ret
+;
+; CHECK-NONANS-LABEL: dup_v1i32_uge:
+; CHECK-NONANS: // %bb.0: // %entry
+; CHECK-NONANS-NEXT: fcmge s0, s0, s1
+; CHECK-NONANS-NEXT: ret
+;
+; CHECK-FULLFP16-LABEL: dup_v1i32_uge:
+; CHECK-FULLFP16: // %bb.0: // %entry
+; CHECK-FULLFP16-NEXT: fcmgt s0, s1, s0
+; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
+; CHECK-FULLFP16-NEXT: ret
+entry:
+ %0 = fcmp uge float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ult(float %a, float %b) {
+; CHECK-NOFULLFP16-LABEL: dup_v1i32_ult:
+; CHECK-NOFULLFP16: // %bb.0: // %entry
+; CHECK-NOFULLFP16-NEXT: fcmge s0, s0, s1
+; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
+; CHECK-NOFULLFP16-NEXT: ret
+;
+; CHECK-NONANS-LABEL: dup_v1i32_ult:
+; CHECK-NONANS: // %bb.0: // %entry
+; CHECK-NONANS-NEXT: fcmgt s0, s1, s0
+; CHECK-NONANS-NEXT: ret
+;
+; CHECK-FULLFP16-LABEL: dup_v1i32_ult:
+; CHECK-FULLFP16: // %bb.0: // %entry
+; CHECK-FULLFP16-NEXT: fcmge s0, s0, s1
+; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
+; CHECK-FULLFP16-NEXT: ret
+entry:
+ %0 = fcmp ult float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ule(float %a, float %b) {
+; CHECK-NOFULLFP16-LABEL: dup_v1i32_ule:
+; CHECK-NOFULLFP16: // %bb.0: // %entry
+; CHECK-NOFULLFP16-NEXT: fcmgt s0, s0, s1
+; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
+; CHECK-NOFULLFP16-NEXT: ret
+;
+; CHECK-NONANS-LABEL: dup_v1i32_ule:
+; CHECK-NONANS: // %bb.0: // %entry
+; CHECK-NONANS-NEXT: fcmge s0, s1, s0
+; CHECK-NONANS-NEXT: ret
+;
+; CHECK-FULLFP16-LABEL: dup_v1i32_ule:
+; CHECK-FULLFP16: // %bb.0: // %entry
+; CHECK-FULLFP16-NEXT: fcmgt s0, s0, s1
+; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
+; CHECK-FULLFP16-NEXT: ret
+entry:
+ %0 = fcmp ule float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_une(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_une:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp une float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_uno(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_uno:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s2, s0, s1
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp uno float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <4 x float> @dup_v4i32(float %a, float %b) {
+; CHECK-LABEL: dup_v4i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s0, s1
+; CHECK-NEXT: dup v0.4s, v0.s[0]
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp oge float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <4 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <4 x i32> %vecinit.i to <4 x float>
+ %2 = shufflevector <4 x float> %1, <4 x float> poison, <4 x i32> zeroinitializer
+ ret <4 x float> %2
+}
+
+define <4 x float> @dup_v4i32_reversed(float %a, float %b) {
+; CHECK-LABEL: dup_v4i32_reversed:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: dup v0.4s, v0.s[0]
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp ogt float %b, %a
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <4 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <4 x i32> %vecinit.i to <4 x float>
+ %2 = shufflevector <4 x float> %1, <4 x float> poison, <4 x i32> zeroinitializer
+ ret <4 x float> %2
+}
+
+define <2 x double> @dup_v2i64(double %a, double %b) {
+; CHECK-LABEL: dup_v2i64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt d0, d0, d1
+; CHECK-NEXT: dup v0.2d, v0.d[0]
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp ogt double %a, %b
+ %vcmpd.i = sext i1 %0 to i64
+ %vecinit.i = insertelement <2 x i64> poison, i64 %vcmpd.i, i64 0
+ %1 = bitcast <2 x i64> %vecinit.i to <2 x double>
+ %2 = shufflevector <2 x double> %1, <2 x double> poison, <2 x i32> zeroinitializer
+ ret <2 x double> %2
+}
+
+define <8 x half> @dup_v8i16(half %a, half %b) {
+; CHECK-NOFULLFP16-LABEL: dup_v8i16:
+; CHECK-NOFULLFP16: // %bb.0: // %entry
+; CHECK-NOFULLFP16-NEXT: fcvt s1, h1
+; CHECK-NOFULLFP16-NEXT: fcvt s0, h0
+; CHECK-NOFULLFP16-NEXT: fcmeq s0, s0, s1
+; CHECK-NOFULLFP16-NEXT: ret
+;
+; CHECK-NONANS-LABEL: dup_v8i16:
+; CHECK-NONANS: // %bb.0: // %entry
+; CHECK-NONANS-NEXT: fcvt s1, h1
+; CHECK-NONANS-NEXT: fcvt s0, h0
+; CHECK-NONANS-NEXT: fcmeq s0, s0, s1
+; CHECK-NONANS-NEXT: ret
+;
+; CHECK-FULLFP16-LABEL: dup_v8i16:
+; CHECK-FULLFP16: // %bb.0: // %entry
+; CHECK-FULLFP16-NEXT: fcmp h0, h1
+; CHECK-FULLFP16-NEXT: csetm w8, eq
+; CHECK-FULLFP16-NEXT: fmov s0, w8
+; CHECK-FULLFP16-NEXT: ret
+; FIXME: Could be replaced with fcmeq + dup but the type of the former is
+; promoted to i32 during selection and then the optimization does not apply.
+
+ entry:
+ %0 = fcmp oeq half %a, %b
+ %vcmpd.i = sext i1 %0 to i16
+ %vecinit.i = insertelement <8 x i16> poison, i16 %vcmpd.i, i64 0
+ %1 = bitcast <8 x i16> %vecinit.i to <8 x half>
+ ret <8 x half> %1
+}
+
+; Unrelated to dups but still uses one less instruction.
----------------
davemgreen wrote:
I think it is the same number of instructions for this one. Some of the others that need to expand previously produced fcmp+csetm+csinv. Is it possibly to limit this to places where the result will be used in a vector register?
https://github.com/llvm/llvm-project/pull/141806
More information about the llvm-commits
mailing list