[llvm] [AArch64] Improve bf16 fp_extend lowering. (PR #118966)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 6 05:16:30 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: David Green (davemgreen)
<details>
<summary>Changes</summary>
A bf16 fp_extend is just a shift into the higher bits. This changes the lowering from using a relatively ugly tablegen pattern, to ISel generating the shift using an extended vector. This is cleaner and should optimize better. StrictFP goes through the same route as it cannot round or set flags.
---
Patch is 187.20 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/118966.diff
11 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+57-4)
- (modified) llvm/lib/Target/AArch64/AArch64InstrInfo.td (-18)
- (modified) llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll (+6-8)
- (modified) llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll (+28-38)
- (modified) llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll (+43-69)
- (modified) llvm/test/CodeGen/AArch64/atomicrmw-fmin.ll (+43-69)
- (modified) llvm/test/CodeGen/AArch64/atomicrmw-fsub.ll (+28-38)
- (modified) llvm/test/CodeGen/AArch64/bf16-instructions.ll (+413-689)
- (modified) llvm/test/CodeGen/AArch64/bf16-v8-instructions.ll (+619-1073)
- (modified) llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll (+8-16)
- (modified) llvm/test/CodeGen/AArch64/round-fptosi-sat-scalar.ll (+12-20)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d1354ccf376609..f490929b0afe71 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -753,6 +753,14 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(Op, MVT::v8bf16, Expand);
}
+ // For bf16, fpextend is custom lowered to optionally expanded into shifts.
+ setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom);
+ setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
+ setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Custom);
+ setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
+ setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
+ setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f32, Custom);
+
auto LegalizeNarrowFP = [this](MVT ScalarVT) {
for (auto Op : {
ISD::SETCC,
@@ -893,10 +901,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(Op, MVT::f16, Legal);
}
- // Strict conversion to a larger type is legal
- for (auto VT : {MVT::f32, MVT::f64})
- setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
-
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
setOperationAction(ISD::GET_ROUNDING, MVT::i32, Custom);
@@ -4419,6 +4423,54 @@ SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()))
return LowerFixedLengthFPExtendToSVE(Op, DAG);
+ bool IsStrict = Op->isStrictFPOpcode();
+ SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
+ EVT Op0VT = Op0.getValueType();
+ if (VT == MVT::f64) {
+ // FP16->FP32 extends are legal for v32 and v4f32.
+ if (Op0VT == MVT::f32 || Op0VT == MVT::f16)
+ return Op;
+ // Split bf16->f64 extends into two fpextends.
+ if (Op0VT == MVT::bf16 && IsStrict) {
+ SDValue Ext1 =
+ DAG.getNode(ISD::STRICT_FP_EXTEND, SDLoc(Op), {MVT::f32, MVT::Other},
+ {Op0, Op.getOperand(0)});
+ return DAG.getNode(ISD::STRICT_FP_EXTEND, SDLoc(Op), {VT, MVT::Other},
+ {Ext1, Ext1.getValue(1)});
+ }
+ if (Op0VT == MVT::bf16)
+ return DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), VT,
+ DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), MVT::f32, Op0));
+ return SDValue();
+ }
+
+ if (VT.getScalarType() == MVT::f32) {
+ // FP16->FP32 extends are legal for v32 and v4f32.
+ if (Op0VT.getScalarType() == MVT::f16)
+ return Op;
+ if (Op0VT.getScalarType() == MVT::bf16) {
+ SDLoc DL(Op);
+ EVT IVT = VT.changeTypeToInteger();
+ if (!Op0VT.isVector()) {
+ Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4bf16, Op0);
+ IVT = MVT::v4i32;
+ }
+
+ EVT Op0IVT = Op0.getValueType().changeTypeToInteger();
+ SDValue Ext =
+ DAG.getNode(ISD::ANY_EXTEND, DL, IVT, DAG.getBitcast(Op0IVT, Op0));
+ SDValue Shift =
+ DAG.getNode(ISD::SHL, DL, IVT, Ext, DAG.getConstant(16, DL, IVT));
+ if (!Op0VT.isVector())
+ Shift = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Shift,
+ DAG.getConstant(0, DL, MVT::i64));
+ Shift = DAG.getBitcast(VT, Shift);
+ return IsStrict ? DAG.getMergeValues({Shift, Op.getOperand(0)}, DL)
+ : Shift;
+ }
+ return SDValue();
+ }
+
assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
return SDValue();
}
@@ -7266,6 +7318,7 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
case ISD::STRICT_FP_ROUND:
return LowerFP_ROUND(Op, DAG);
case ISD::FP_EXTEND:
+ case ISD::STRICT_FP_EXTEND:
return LowerFP_EXTEND(Op, DAG);
case ISD::FRAMEADDR:
return LowerFRAMEADDR(Op, DAG);
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index d015cc15581ad0..dd55ac566dc299 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -5115,22 +5115,6 @@ let Predicates = [HasFullFP16] in {
//===----------------------------------------------------------------------===//
defm FCVT : FPConversion<"fcvt">;
-// Helper to get bf16 into fp32.
-def cvt_bf16_to_fp32 :
- OutPatFrag<(ops node:$Rn),
- (f32 (COPY_TO_REGCLASS
- (i32 (UBFMWri
- (i32 (COPY_TO_REGCLASS (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
- node:$Rn, hsub), GPR32)),
- (i64 (i32shift_a (i64 16))),
- (i64 (i32shift_b (i64 16))))),
- FPR32))>;
-// Pattern for bf16 -> fp32.
-def : Pat<(f32 (any_fpextend (bf16 FPR16:$Rn))),
- (cvt_bf16_to_fp32 FPR16:$Rn)>;
-// Pattern for bf16 -> fp64.
-def : Pat<(f64 (any_fpextend (bf16 FPR16:$Rn))),
- (FCVTDSr (f32 (cvt_bf16_to_fp32 FPR16:$Rn)))>;
//===----------------------------------------------------------------------===//
// Floating point single operand instructions.
@@ -8343,8 +8327,6 @@ def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>
def : Pat<(v2i64 (sext (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
def : Pat<(v2i64 (zext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
-// Vector bf16 -> fp32 is implemented morally as a zext + shift.
-def : Pat<(v4f32 (any_fpextend (v4bf16 V64:$Rn))), (SHLLv4i16 V64:$Rn)>;
// Also match an extend from the upper half of a 128 bit source register.
def : Pat<(v8i16 (anyext (v8i8 (extract_high_v16i8 (v16i8 V128:$Rn)) ))),
(USHLLv16i8_shift V128:$Rn, (i32 0))>;
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll
index 1aa28f5c2733db..9a1203f18243dd 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll
@@ -156,11 +156,10 @@ define i32 @fptosi_bf(bfloat %a) nounwind ssp {
; CHECK-LABEL: fptosi_bf:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fmov s1, s0
-; CHECK-NEXT: // implicit-def: $s0
+; CHECK-NEXT: // implicit-def: $d0
; CHECK-NEXT: fmov s0, s1
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-NEXT: fcvtzs w0, s0
; CHECK-NEXT: ret
entry:
@@ -173,11 +172,10 @@ define i32 @fptoui_sbf(bfloat %a) nounwind ssp {
; CHECK-LABEL: fptoui_sbf:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fmov s1, s0
-; CHECK-NEXT: // implicit-def: $s0
+; CHECK-NEXT: // implicit-def: $d0
; CHECK-NEXT: fmov s0, s1
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-NEXT: fcvtzu w0, s0
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll b/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll
index ed9c1b037d0cc7..fb40dfcbe101db 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll
@@ -182,17 +182,14 @@ define half @test_atomicrmw_fadd_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align2(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align2:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB2_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fadd s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -202,36 +199,34 @@ define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB2_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align2:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB2_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fadd s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB2_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align2:
@@ -281,17 +276,14 @@ define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align4(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align4:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB3_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fadd s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -301,36 +293,34 @@ define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align4(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB3_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align4:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB3_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fadd s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB3_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align4:
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll b/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll
index 888b795876f7df..818dcf3a0b4876 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll
@@ -184,17 +184,14 @@ define half @test_atomicrmw_fmax_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align2(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align2:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB2_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fmaxnm s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -204,36 +201,34 @@ define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB2_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align2:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB2_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fmaxnm s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB2_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align2:
@@ -283,17 +278,14 @@ define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align4(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align4:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB3_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fmaxnm s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -303,36 +295,34 @@ define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align4(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB3_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align4:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB3_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fmaxnm s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB3_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align4:
@@ -653,31 +643,23 @@ define <2 x bfloat> @test_atomicrmw_fmax_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; NOLSE-LABEL: test_atomicrmw_fmax_v2bf16_seq_cst_align4:
; NOLSE: // %bb.0:
; NOLSE-NEXT: // kill: def $d0 killed $d0 def $q0
-; NOLSE-NEXT: mov h1, v0.h[1]
-; NOLSE-NEXT: fmov w10, s0
+; NOLSE-NEXT: dup v1.4h, v0.h[1]
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w10, w10, #16
-; NOLSE-NEXT: fmov w9, s1
-; NOLSE-NEXT: fmov s1, w10
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s0, w9
+; NOLSE-NEXT: shll v0.4s, v0.4h, #16
+; NOLSE-NEXT: shll v1.4s, v1.4h, #16
; NOLSE-NEXT: .LBB7_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxr w9, [x0]
; NOLSE-NEXT: fmov s2, w9
-; NOLSE-NEXT: mov h3, v2.h[1]
-; NOLSE-NEXT: fmov w11, s2
-; NOLSE-NEXT: lsl w11, w11, #16
-; NOLSE-NEXT: fmov w10, s3
-; NOLSE-NEXT: fmov s3, w11
-; NOLSE-NEXT: lsl w10, w10, #16
-; NOLSE-NEXT: fmaxnm s3, s3, s1
-; NOLSE-NEXT: fmov s2, w10
+; NOLSE-NEXT: dup v3.4h, v2.h[1]
+; NOLSE-NEXT: shll v2.4s, v2.4h, #16
; NOLSE-NEXT: fmaxnm s2, s2, s0
-; NOLSE-NEXT: fmov w11, s3
+; NOLSE-NEXT: shll v3.4s, v3.4h, #16
+; NOLSE-NEXT: fmaxnm s3, s3, s1
+; NOLSE-NEXT: fmov w11, s2
; NOLSE-NEXT: ubfx w13, w11, #16, #1
; NOLSE-NEXT: add w11, w11, w8
-; NOLSE-NEXT: fmov w10, s2
+; NOLSE-NEXT: fmov w10, s3
; NOLSE-NEXT: add w11, w13, w11
; NOLSE-NEXT: lsr w11, w11, #16
; NOLSE-NEXT: ubfx w12, w10, #16, #1
@@ -697,25 +679,17 @@ define <2 x bfloat> @test_atomicrmw_fmax_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; LSE-LABEL: test_atomicrmw_fmax_v2bf16_seq_cst_align4:
; LSE: // %bb.0:
; LSE-NEXT: // kill: def $d0 killed $d0 def $q0
-; LSE-NEXT: mov h1, v0.h[1]
-; LSE-NEXT: fmov w10, s0
+; LSE-NEXT: dup v1.4h, v0.h[1]
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr s0, [x0]
-; LSE-NEXT: lsl w10, w10, #1...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/118966
More information about the llvm-commits
mailing list