[llvm-branch-commits] [llvm] 0fa1dda - [NFC][SVE] s/fast/contract/ in test sve-fp-combine.ll
Peter Waller via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Wed Dec 8 08:12:29 PST 2021
Author: Peter Waller
Date: 2021-12-08T16:06:05Z
New Revision: 0fa1dda5eb642f678d9aa5f70c5e525583f653e0
URL: https://github.com/llvm/llvm-project/commit/0fa1dda5eb642f678d9aa5f70c5e525583f653e0
DIFF: https://github.com/llvm/llvm-project/commit/0fa1dda5eb642f678d9aa5f70c5e525583f653e0.diff
LOG: [NFC][SVE] s/fast/contract/ in test sve-fp-combine.ll
These tests doesn't currently make use of any fast math flag other than
contract. This will change in D109525 when a dependency on nsz will be
introduced where negation is involved.
Added:
Modified:
llvm/test/CodeGen/AArch64/sve-fp-combine.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AArch64/sve-fp-combine.ll b/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
index 6fcf45d9286c8..d9bf9653097c3 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
@@ -6,8 +6,8 @@ define <vscale x 8 x half> @fmla_h_sel(<vscale x 8 x i1> %pred, <vscale x 8 x ha
; CHECK: // %bb.0:
; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 8 x half> %m1, %m2
- %add = fadd fast <vscale x 8 x half> %acc, %mul
+ %mul = fmul contract <vscale x 8 x half> %m1, %m2
+ %add = fadd contract <vscale x 8 x half> %acc, %mul
%res = select <vscale x 8 x i1> %pred, <vscale x 8 x half> %add, <vscale x 8 x half> %acc
ret <vscale x 8 x half> %res
}
@@ -17,8 +17,8 @@ define <vscale x 4 x half> @fmla_hx4_sel(<vscale x 4 x i1> %pred, <vscale x 4 x
; CHECK: // %bb.0:
; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x half> %m1, %m2
- %add = fadd fast <vscale x 4 x half> %acc, %mul
+ %mul = fmul contract <vscale x 4 x half> %m1, %m2
+ %add = fadd contract <vscale x 4 x half> %acc, %mul
%res = select <vscale x 4 x i1> %pred, <vscale x 4 x half> %add, <vscale x 4 x half> %acc
ret <vscale x 4 x half> %res
}
@@ -28,8 +28,8 @@ define <vscale x 2 x half> @fmla_hx2_sel(<vscale x 2 x i1> %pred, <vscale x 2 x
; CHECK: // %bb.0:
; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x half> %m1, %m2
- %add = fadd fast <vscale x 2 x half> %acc, %mul
+ %mul = fmul contract <vscale x 2 x half> %m1, %m2
+ %add = fadd contract <vscale x 2 x half> %acc, %mul
%res = select <vscale x 2 x i1> %pred, <vscale x 2 x half> %add, <vscale x 2 x half> %acc
ret <vscale x 2 x half> %res
}
@@ -39,8 +39,8 @@ define <vscale x 4 x float> @fmla_s_sel(<vscale x 4 x i1> %pred, <vscale x 4 x f
; CHECK: // %bb.0:
; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x float> %m1, %m2
- %add = fadd fast <vscale x 4 x float> %acc, %mul
+ %mul = fmul contract <vscale x 4 x float> %m1, %m2
+ %add = fadd contract <vscale x 4 x float> %acc, %mul
%res = select <vscale x 4 x i1> %pred, <vscale x 4 x float> %add, <vscale x 4 x float> %acc
ret <vscale x 4 x float> %res
}
@@ -50,8 +50,8 @@ define <vscale x 2 x float> @fmla_sx2_sel(<vscale x 2 x i1> %pred, <vscale x 2 x
; CHECK: // %bb.0:
; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x float> %m1, %m2
- %add = fadd fast <vscale x 2 x float> %acc, %mul
+ %mul = fmul contract <vscale x 2 x float> %m1, %m2
+ %add = fadd contract <vscale x 2 x float> %acc, %mul
%res = select <vscale x 2 x i1> %pred, <vscale x 2 x float> %add, <vscale x 2 x float> %acc
ret <vscale x 2 x float> %res
}
@@ -61,8 +61,8 @@ define <vscale x 2 x double> @fmla_d_sel(<vscale x 2 x i1> %pred, <vscale x 2 x
; CHECK: // %bb.0:
; CHECK-NEXT: fmla z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x double> %m1, %m2
- %add = fadd fast <vscale x 2 x double> %acc, %mul
+ %mul = fmul contract <vscale x 2 x double> %m1, %m2
+ %add = fadd contract <vscale x 2 x double> %acc, %mul
%res = select <vscale x 2 x i1> %pred, <vscale x 2 x double> %add, <vscale x 2 x double> %acc
ret <vscale x 2 x double> %res
}
@@ -72,8 +72,8 @@ define <vscale x 8 x half> @fmls_h_sel(<vscale x 8 x i1> %pred, <vscale x 8 x ha
; CHECK: // %bb.0:
; CHECK-NEXT: fmls z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 8 x half> %m1, %m2
- %sub = fsub fast <vscale x 8 x half> %acc, %mul
+ %mul = fmul contract <vscale x 8 x half> %m1, %m2
+ %sub = fsub contract <vscale x 8 x half> %acc, %mul
%res = select <vscale x 8 x i1> %pred, <vscale x 8 x half> %sub, <vscale x 8 x half> %acc
ret <vscale x 8 x half> %res
}
@@ -83,8 +83,8 @@ define <vscale x 4 x half> @fmls_hx4_sel(<vscale x 4 x i1> %pred, <vscale x 4 x
; CHECK: // %bb.0:
; CHECK-NEXT: fmls z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x half> %m1, %m2
- %sub = fsub fast <vscale x 4 x half> %acc, %mul
+ %mul = fmul contract <vscale x 4 x half> %m1, %m2
+ %sub = fsub contract <vscale x 4 x half> %acc, %mul
%res = select <vscale x 4 x i1> %pred, <vscale x 4 x half> %sub, <vscale x 4 x half> %acc
ret <vscale x 4 x half> %res
}
@@ -94,8 +94,8 @@ define <vscale x 2 x half> @fmls_hx2_sel(<vscale x 2 x i1> %pred, <vscale x 2 x
; CHECK: // %bb.0:
; CHECK-NEXT: fmls z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x half> %m1, %m2
- %sub = fsub fast <vscale x 2 x half> %acc, %mul
+ %mul = fmul contract <vscale x 2 x half> %m1, %m2
+ %sub = fsub contract <vscale x 2 x half> %acc, %mul
%res = select <vscale x 2 x i1> %pred, <vscale x 2 x half> %sub, <vscale x 2 x half> %acc
ret <vscale x 2 x half> %res
}
@@ -105,8 +105,8 @@ define <vscale x 4 x float> @fmls_s_sel(<vscale x 4 x i1> %pred, <vscale x 4 x f
; CHECK: // %bb.0:
; CHECK-NEXT: fmls z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x float> %m1, %m2
- %sub = fsub fast <vscale x 4 x float> %acc, %mul
+ %mul = fmul contract <vscale x 4 x float> %m1, %m2
+ %sub = fsub contract <vscale x 4 x float> %acc, %mul
%res = select <vscale x 4 x i1> %pred, <vscale x 4 x float> %sub, <vscale x 4 x float> %acc
ret <vscale x 4 x float> %res
}
@@ -116,8 +116,8 @@ define <vscale x 2 x float> @fmls_sx2_sel(<vscale x 2 x i1> %pred, <vscale x 2 x
; CHECK: // %bb.0:
; CHECK-NEXT: fmls z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x float> %m1, %m2
- %sub = fsub fast <vscale x 2 x float> %acc, %mul
+ %mul = fmul contract <vscale x 2 x float> %m1, %m2
+ %sub = fsub contract <vscale x 2 x float> %acc, %mul
%res = select <vscale x 2 x i1> %pred, <vscale x 2 x float> %sub, <vscale x 2 x float> %acc
ret <vscale x 2 x float> %res
}
@@ -127,8 +127,8 @@ define <vscale x 2 x double> @fmls_d_sel(<vscale x 2 x i1> %pred, <vscale x 2 x
; CHECK: // %bb.0:
; CHECK-NEXT: fmls z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x double> %m1, %m2
- %sub = fsub fast <vscale x 2 x double> %acc, %mul
+ %mul = fmul contract <vscale x 2 x double> %m1, %m2
+ %sub = fsub contract <vscale x 2 x double> %acc, %mul
%res = select <vscale x 2 x i1> %pred, <vscale x 2 x double> %sub, <vscale x 2 x double> %acc
ret <vscale x 2 x double> %res
}
@@ -139,8 +139,8 @@ define <vscale x 8 x half> @fmad_h(<vscale x 8 x half> %m1, <vscale x 8 x half>
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 8 x half> %m1, %m2
- %res = fadd fast <vscale x 8 x half> %acc, %mul
+ %mul = fmul contract <vscale x 8 x half> %m1, %m2
+ %res = fadd contract <vscale x 8 x half> %acc, %mul
ret <vscale x 8 x half> %res
}
@@ -150,8 +150,8 @@ define <vscale x 4 x half> @fmad_hx4(<vscale x 4 x half> %m1, <vscale x 4 x half
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x half> %m1, %m2
- %res = fadd fast <vscale x 4 x half> %acc, %mul
+ %mul = fmul contract <vscale x 4 x half> %m1, %m2
+ %res = fadd contract <vscale x 4 x half> %acc, %mul
ret <vscale x 4 x half> %res
}
@@ -161,8 +161,8 @@ define <vscale x 2 x half> @fmad_hx2(<vscale x 2 x half> %m1, <vscale x 2 x half
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x half> %m1, %m2
- %res = fadd fast <vscale x 2 x half> %acc, %mul
+ %mul = fmul contract <vscale x 2 x half> %m1, %m2
+ %res = fadd contract <vscale x 2 x half> %acc, %mul
ret <vscale x 2 x half> %res
}
@@ -172,8 +172,8 @@ define <vscale x 4 x float> @fmad_s(<vscale x 4 x float> %m1, <vscale x 4 x floa
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fmad z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x float> %m1, %m2
- %res = fadd fast <vscale x 4 x float> %acc, %mul
+ %mul = fmul contract <vscale x 4 x float> %m1, %m2
+ %res = fadd contract <vscale x 4 x float> %acc, %mul
ret <vscale x 4 x float> %res
}
@@ -183,8 +183,8 @@ define <vscale x 2 x float> @fmad_sx2(<vscale x 2 x float> %m1, <vscale x 2 x fl
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmad z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x float> %m1, %m2
- %res = fadd fast <vscale x 2 x float> %acc, %mul
+ %mul = fmul contract <vscale x 2 x float> %m1, %m2
+ %res = fadd contract <vscale x 2 x float> %acc, %mul
ret <vscale x 2 x float> %res
}
@@ -194,8 +194,8 @@ define <vscale x 2 x double> @fmad_d(<vscale x 2 x double> %m1, <vscale x 2 x do
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmad z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x double> %m1, %m2
- %res = fadd fast <vscale x 2 x double> %acc, %mul
+ %mul = fmul contract <vscale x 2 x double> %m1, %m2
+ %res = fadd contract <vscale x 2 x double> %acc, %mul
ret <vscale x 2 x double> %res
}
@@ -205,8 +205,8 @@ define <vscale x 8 x half> @fmla_h(<vscale x 8 x half> %acc, <vscale x 8 x half>
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 8 x half> %m1, %m2
- %res = fadd fast <vscale x 8 x half> %acc, %mul
+ %mul = fmul contract <vscale x 8 x half> %m1, %m2
+ %res = fadd contract <vscale x 8 x half> %acc, %mul
ret <vscale x 8 x half> %res
}
@@ -216,8 +216,8 @@ define <vscale x 4 x half> @fmla_hx4(<vscale x 4 x half> %acc, <vscale x 4 x hal
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x half> %m1, %m2
- %res = fadd fast <vscale x 4 x half> %acc, %mul
+ %mul = fmul contract <vscale x 4 x half> %m1, %m2
+ %res = fadd contract <vscale x 4 x half> %acc, %mul
ret <vscale x 4 x half> %res
}
@@ -227,8 +227,8 @@ define <vscale x 2 x half> @fmla_hx2(<vscale x 2 x half> %acc, <vscale x 2 x hal
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x half> %m1, %m2
- %res = fadd fast <vscale x 2 x half> %acc, %mul
+ %mul = fmul contract <vscale x 2 x half> %m1, %m2
+ %res = fadd contract <vscale x 2 x half> %acc, %mul
ret <vscale x 2 x half> %res
}
@@ -238,8 +238,8 @@ define <vscale x 4 x float> @fmla_s(<vscale x 4 x float> %acc, <vscale x 4 x flo
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x float> %m1, %m2
- %res = fadd fast <vscale x 4 x float> %acc, %mul
+ %mul = fmul contract <vscale x 4 x float> %m1, %m2
+ %res = fadd contract <vscale x 4 x float> %acc, %mul
ret <vscale x 4 x float> %res
}
@@ -249,8 +249,8 @@ define <vscale x 2 x float> @fmla_sx2(<vscale x 2 x float> %acc, <vscale x 2 x f
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x float> %m1, %m2
- %res = fadd fast <vscale x 2 x float> %acc, %mul
+ %mul = fmul contract <vscale x 2 x float> %m1, %m2
+ %res = fadd contract <vscale x 2 x float> %acc, %mul
ret <vscale x 2 x float> %res
}
@@ -260,8 +260,8 @@ define <vscale x 2 x double> @fmla_d(<vscale x 2 x double> %acc, <vscale x 2 x d
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmla z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x double> %m1, %m2
- %res = fadd fast <vscale x 2 x double> %acc, %mul
+ %mul = fmul contract <vscale x 2 x double> %m1, %m2
+ %res = fadd contract <vscale x 2 x double> %acc, %mul
ret <vscale x 2 x double> %res
}
@@ -271,8 +271,8 @@ define <vscale x 8 x half> @fmls_h(<vscale x 8 x half> %acc, <vscale x 8 x half>
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: fmls z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 8 x half> %m1, %m2
- %res = fsub fast <vscale x 8 x half> %acc, %mul
+ %mul = fmul contract <vscale x 8 x half> %m1, %m2
+ %res = fsub contract <vscale x 8 x half> %acc, %mul
ret <vscale x 8 x half> %res
}
@@ -282,8 +282,8 @@ define <vscale x 4 x half> @fmls_hx4(<vscale x 4 x half> %acc, <vscale x 4 x hal
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fmls z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x half> %m1, %m2
- %res = fsub fast <vscale x 4 x half> %acc, %mul
+ %mul = fmul contract <vscale x 4 x half> %m1, %m2
+ %res = fsub contract <vscale x 4 x half> %acc, %mul
ret <vscale x 4 x half> %res
}
@@ -293,8 +293,8 @@ define <vscale x 2 x half> @fmls_hx2(<vscale x 2 x half> %acc, <vscale x 2 x hal
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmls z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x half> %m1, %m2
- %res = fsub fast <vscale x 2 x half> %acc, %mul
+ %mul = fmul contract <vscale x 2 x half> %m1, %m2
+ %res = fsub contract <vscale x 2 x half> %acc, %mul
ret <vscale x 2 x half> %res
}
@@ -304,8 +304,8 @@ define <vscale x 4 x float> @fmls_s(<vscale x 4 x float> %acc, <vscale x 4 x flo
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fmls z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x float> %m1, %m2
- %res = fsub fast <vscale x 4 x float> %acc, %mul
+ %mul = fmul contract <vscale x 4 x float> %m1, %m2
+ %res = fsub contract <vscale x 4 x float> %acc, %mul
ret <vscale x 4 x float> %res
}
@@ -315,8 +315,8 @@ define <vscale x 2 x float> @fmls_sx2(<vscale x 2 x float> %acc, <vscale x 2 x f
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmls z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x float> %m1, %m2
- %res = fsub fast <vscale x 2 x float> %acc, %mul
+ %mul = fmul contract <vscale x 2 x float> %m1, %m2
+ %res = fsub contract <vscale x 2 x float> %acc, %mul
ret <vscale x 2 x float> %res
}
@@ -326,8 +326,8 @@ define <vscale x 2 x double> @fmls_d(<vscale x 2 x double> %acc, <vscale x 2 x d
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmls z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x double> %m1, %m2
- %res = fsub fast <vscale x 2 x double> %acc, %mul
+ %mul = fmul contract <vscale x 2 x double> %m1, %m2
+ %res = fsub contract <vscale x 2 x double> %acc, %mul
ret <vscale x 2 x double> %res
}
@@ -337,8 +337,8 @@ define <vscale x 8 x half> @fmsb_h(<vscale x 8 x half> %m1, <vscale x 8 x half>
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: fmsb z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 8 x half> %m1, %m2
- %res = fsub fast <vscale x 8 x half> %acc, %mul
+ %mul = fmul contract <vscale x 8 x half> %m1, %m2
+ %res = fsub contract <vscale x 8 x half> %acc, %mul
ret <vscale x 8 x half> %res
}
@@ -348,8 +348,8 @@ define <vscale x 4 x half> @fmsb_hx4(<vscale x 4 x half> %m1, <vscale x 4 x half
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fmsb z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x half> %m1, %m2
- %res = fsub fast <vscale x 4 x half> %acc, %mul
+ %mul = fmul contract <vscale x 4 x half> %m1, %m2
+ %res = fsub contract <vscale x 4 x half> %acc, %mul
ret <vscale x 4 x half> %res
}
@@ -359,8 +359,8 @@ define <vscale x 2 x half> @fmsb_hx2(<vscale x 2 x half> %m1, <vscale x 2 x half
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmsb z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x half> %m1, %m2
- %res = fsub fast <vscale x 2 x half> %acc, %mul
+ %mul = fmul contract <vscale x 2 x half> %m1, %m2
+ %res = fsub contract <vscale x 2 x half> %acc, %mul
ret <vscale x 2 x half> %res
}
@@ -370,8 +370,8 @@ define <vscale x 4 x float> @fmsb_s(<vscale x 4 x float> %m1, <vscale x 4 x floa
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fmsb z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x float> %m1, %m2
- %res = fsub fast <vscale x 4 x float> %acc, %mul
+ %mul = fmul contract <vscale x 4 x float> %m1, %m2
+ %res = fsub contract <vscale x 4 x float> %acc, %mul
ret <vscale x 4 x float> %res
}
@@ -381,8 +381,8 @@ define <vscale x 2 x float> @fmsb_sx2(<vscale x 2 x float> %m1, <vscale x 2 x fl
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmsb z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x float> %m1, %m2
- %res = fsub fast <vscale x 2 x float> %acc, %mul
+ %mul = fmul contract <vscale x 2 x float> %m1, %m2
+ %res = fsub contract <vscale x 2 x float> %acc, %mul
ret <vscale x 2 x float> %res
}
@@ -392,8 +392,8 @@ define <vscale x 2 x double> @fmsb_d(<vscale x 2 x double> %m1, <vscale x 2 x do
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmsb z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x double> %m1, %m2
- %res = fsub fast <vscale x 2 x double> %acc, %mul
+ %mul = fmul contract <vscale x 2 x double> %m1, %m2
+ %res = fsub contract <vscale x 2 x double> %acc, %mul
ret <vscale x 2 x double> %res
}
@@ -403,9 +403,9 @@ define <vscale x 8 x half> @fnmad_h(<vscale x 8 x half> %m1, <vscale x 8 x half>
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 8 x half> %m1
- %mul = fmul fast <vscale x 8 x half> %neg_m1, %m2
- %res = fsub fast <vscale x 8 x half> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 8 x half> %m1
+ %mul = fmul contract <vscale x 8 x half> %neg_m1, %m2
+ %res = fsub contract <vscale x 8 x half> %mul, %acc
ret <vscale x 8 x half> %res
}
@@ -415,9 +415,9 @@ define <vscale x 4 x half> @fnmad_hx4(<vscale x 4 x half> %m1, <vscale x 4 x hal
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 4 x half> %m1
- %mul = fmul fast <vscale x 4 x half> %neg_m1, %m2
- %res = fsub fast <vscale x 4 x half> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 4 x half> %m1
+ %mul = fmul contract <vscale x 4 x half> %neg_m1, %m2
+ %res = fsub contract <vscale x 4 x half> %mul, %acc
ret <vscale x 4 x half> %res
}
@@ -427,9 +427,9 @@ define <vscale x 2 x half> @fnmad_hx2(<vscale x 2 x half> %m1, <vscale x 2 x hal
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 2 x half> %m1
- %mul = fmul fast <vscale x 2 x half> %neg_m1, %m2
- %res = fsub fast <vscale x 2 x half> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 2 x half> %m1
+ %mul = fmul contract <vscale x 2 x half> %neg_m1, %m2
+ %res = fsub contract <vscale x 2 x half> %mul, %acc
ret <vscale x 2 x half> %res
}
@@ -439,9 +439,9 @@ define <vscale x 4 x float> @fnmad_s(<vscale x 4 x float> %m1, <vscale x 4 x flo
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 4 x float> %m1
- %mul = fmul fast <vscale x 4 x float> %neg_m1, %m2
- %res = fsub fast <vscale x 4 x float> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 4 x float> %m1
+ %mul = fmul contract <vscale x 4 x float> %neg_m1, %m2
+ %res = fsub contract <vscale x 4 x float> %mul, %acc
ret <vscale x 4 x float> %res
}
@@ -451,9 +451,9 @@ define <vscale x 2 x float> @fnmad_sx2(<vscale x 2 x float> %m1, <vscale x 2 x f
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 2 x float> %m1
- %mul = fmul fast <vscale x 2 x float> %neg_m1, %m2
- %res = fsub fast <vscale x 2 x float> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 2 x float> %m1
+ %mul = fmul contract <vscale x 2 x float> %neg_m1, %m2
+ %res = fsub contract <vscale x 2 x float> %mul, %acc
ret <vscale x 2 x float> %res
}
@@ -463,9 +463,9 @@ define <vscale x 2 x double> @fnmad_d(<vscale x 2 x double> %m1, <vscale x 2 x d
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 2 x double> %m1
- %mul = fmul fast <vscale x 2 x double> %neg_m1, %m2
- %res = fsub fast <vscale x 2 x double> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 2 x double> %m1
+ %mul = fmul contract <vscale x 2 x double> %neg_m1, %m2
+ %res = fsub contract <vscale x 2 x double> %mul, %acc
ret <vscale x 2 x double> %res
}
@@ -475,9 +475,9 @@ define <vscale x 8 x half> @fnmla_h(<vscale x 8 x half> %acc, <vscale x 8 x half
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: fnmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 8 x half> %m1
- %mul = fmul fast <vscale x 8 x half> %neg_m1, %m2
- %res = fsub fast <vscale x 8 x half> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 8 x half> %m1
+ %mul = fmul contract <vscale x 8 x half> %neg_m1, %m2
+ %res = fsub contract <vscale x 8 x half> %mul, %acc
ret <vscale x 8 x half> %res
}
@@ -487,9 +487,9 @@ define <vscale x 4 x half> @fnmla_hx4(<vscale x 4 x half> %acc, <vscale x 4 x ha
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fnmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 4 x half> %m1
- %mul = fmul fast <vscale x 4 x half> %neg_m1, %m2
- %res = fsub fast <vscale x 4 x half> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 4 x half> %m1
+ %mul = fmul contract <vscale x 4 x half> %neg_m1, %m2
+ %res = fsub contract <vscale x 4 x half> %mul, %acc
ret <vscale x 4 x half> %res
}
@@ -499,9 +499,9 @@ define <vscale x 2 x half> @fnmla_hx2(<vscale x 2 x half> %acc, <vscale x 2 x ha
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 2 x half> %m1
- %mul = fmul fast <vscale x 2 x half> %neg_m1, %m2
- %res = fsub fast <vscale x 2 x half> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 2 x half> %m1
+ %mul = fmul contract <vscale x 2 x half> %neg_m1, %m2
+ %res = fsub contract <vscale x 2 x half> %mul, %acc
ret <vscale x 2 x half> %res
}
@@ -511,9 +511,9 @@ define <vscale x 4 x float> @fnmla_s(<vscale x 4 x float> %acc, <vscale x 4 x fl
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fnmla z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 4 x float> %m1
- %mul = fmul fast <vscale x 4 x float> %neg_m1, %m2
- %res = fsub fast <vscale x 4 x float> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 4 x float> %m1
+ %mul = fmul contract <vscale x 4 x float> %neg_m1, %m2
+ %res = fsub contract <vscale x 4 x float> %mul, %acc
ret <vscale x 4 x float> %res
}
@@ -523,9 +523,9 @@ define <vscale x 2 x float> @fnmla_sx2(<vscale x 2 x float> %acc, <vscale x 2 x
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmla z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 2 x float> %m1
- %mul = fmul fast <vscale x 2 x float> %neg_m1, %m2
- %res = fsub fast <vscale x 2 x float> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 2 x float> %m1
+ %mul = fmul contract <vscale x 2 x float> %neg_m1, %m2
+ %res = fsub contract <vscale x 2 x float> %mul, %acc
ret <vscale x 2 x float> %res
}
@@ -535,9 +535,9 @@ define <vscale x 2 x double> @fnmla_d(<vscale x 2 x double> %acc, <vscale x 2 x
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmla z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
- %neg_m1 = fneg fast <vscale x 2 x double> %m1
- %mul = fmul fast <vscale x 2 x double> %neg_m1, %m2
- %res = fsub fast <vscale x 2 x double> %mul, %acc
+ %neg_m1 = fneg contract <vscale x 2 x double> %m1
+ %mul = fmul contract <vscale x 2 x double> %neg_m1, %m2
+ %res = fsub contract <vscale x 2 x double> %mul, %acc
ret <vscale x 2 x double> %res
}
@@ -547,9 +547,9 @@ define <vscale x 8 x half> @fnmla_h_reversed(<vscale x 8 x half> %acc, <vscale x
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: fnmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 8 x half> %m1, %m2
- %add = fadd fast <vscale x 8 x half> %mul, %acc
- %res = fneg fast <vscale x 8 x half> %add
+ %mul = fmul contract <vscale x 8 x half> %m1, %m2
+ %add = fadd contract <vscale x 8 x half> %mul, %acc
+ %res = fneg contract <vscale x 8 x half> %add
ret <vscale x 8 x half> %res
}
@@ -559,9 +559,9 @@ define <vscale x 4 x half> @fnmla_hx4_reversed(<vscale x 4 x half> %acc, <vscale
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fnmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x half> %m1, %m2
- %add = fadd fast <vscale x 4 x half> %mul, %acc
- %res = fneg fast <vscale x 4 x half> %add
+ %mul = fmul contract <vscale x 4 x half> %m1, %m2
+ %add = fadd contract <vscale x 4 x half> %mul, %acc
+ %res = fneg contract <vscale x 4 x half> %add
ret <vscale x 4 x half> %res
}
@@ -571,9 +571,9 @@ define <vscale x 2 x half> @fnmla_hx2_reversed(<vscale x 2 x half> %acc, <vscale
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmla z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x half> %m1, %m2
- %add = fadd fast <vscale x 2 x half> %mul, %acc
- %res = fneg fast <vscale x 2 x half> %add
+ %mul = fmul contract <vscale x 2 x half> %m1, %m2
+ %add = fadd contract <vscale x 2 x half> %mul, %acc
+ %res = fneg contract <vscale x 2 x half> %add
ret <vscale x 2 x half> %res
}
@@ -583,9 +583,9 @@ define <vscale x 4 x float> @fnmla_s_reversed(<vscale x 4 x float> %acc, <vscale
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fnmla z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x float> %m1, %m2
- %add = fadd fast <vscale x 4 x float> %mul, %acc
- %res = fneg fast <vscale x 4 x float> %add
+ %mul = fmul contract <vscale x 4 x float> %m1, %m2
+ %add = fadd contract <vscale x 4 x float> %mul, %acc
+ %res = fneg contract <vscale x 4 x float> %add
ret <vscale x 4 x float> %res
}
@@ -595,9 +595,9 @@ define <vscale x 2 x float> @fnmla_sx2_reversed(<vscale x 2 x float> %acc, <vsca
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmla z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x float> %m1, %m2
- %add = fadd fast <vscale x 2 x float> %mul, %acc
- %res = fneg fast <vscale x 2 x float> %add
+ %mul = fmul contract <vscale x 2 x float> %m1, %m2
+ %add = fadd contract <vscale x 2 x float> %mul, %acc
+ %res = fneg contract <vscale x 2 x float> %add
ret <vscale x 2 x float> %res
}
@@ -607,9 +607,9 @@ define <vscale x 2 x double> @fnmla_d_reversed(<vscale x 2 x double> %acc, <vsca
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmla z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x double> %m1, %m2
- %add = fadd fast <vscale x 2 x double> %mul, %acc
- %res = fneg fast <vscale x 2 x double> %add
+ %mul = fmul contract <vscale x 2 x double> %m1, %m2
+ %add = fadd contract <vscale x 2 x double> %mul, %acc
+ %res = fneg contract <vscale x 2 x double> %add
ret <vscale x 2 x double> %res
}
@@ -619,8 +619,8 @@ define <vscale x 8 x half> @fnmls_h(<vscale x 8 x half> %acc, <vscale x 8 x half
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: fnmls z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 8 x half> %m1, %m2
- %res = fsub fast <vscale x 8 x half> %mul, %acc
+ %mul = fmul contract <vscale x 8 x half> %m1, %m2
+ %res = fsub contract <vscale x 8 x half> %mul, %acc
ret <vscale x 8 x half> %res
}
@@ -630,8 +630,8 @@ define <vscale x 4 x half> @fnmls_hx4(<vscale x 4 x half> %acc, <vscale x 4 x ha
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fnmls z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x half> %m1, %m2
- %res = fsub fast <vscale x 4 x half> %mul, %acc
+ %mul = fmul contract <vscale x 4 x half> %m1, %m2
+ %res = fsub contract <vscale x 4 x half> %mul, %acc
ret <vscale x 4 x half> %res
}
@@ -641,8 +641,8 @@ define <vscale x 2 x half> @fnmls_hx2(<vscale x 2 x half> %acc, <vscale x 2 x ha
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmls z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x half> %m1, %m2
- %res = fsub fast <vscale x 2 x half> %mul, %acc
+ %mul = fmul contract <vscale x 2 x half> %m1, %m2
+ %res = fsub contract <vscale x 2 x half> %mul, %acc
ret <vscale x 2 x half> %res
}
@@ -652,8 +652,8 @@ define <vscale x 4 x float> @fnmls_s(<vscale x 4 x float> %acc, <vscale x 4 x fl
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fnmls z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x float> %m1, %m2
- %res = fsub fast <vscale x 4 x float> %mul, %acc
+ %mul = fmul contract <vscale x 4 x float> %m1, %m2
+ %res = fsub contract <vscale x 4 x float> %mul, %acc
ret <vscale x 4 x float> %res
}
@@ -663,8 +663,8 @@ define <vscale x 2 x float> @fnmls_sx2(<vscale x 2 x float> %acc, <vscale x 2 x
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmls z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x float> %m1, %m2
- %res = fsub fast <vscale x 2 x float> %mul, %acc
+ %mul = fmul contract <vscale x 2 x float> %m1, %m2
+ %res = fsub contract <vscale x 2 x float> %mul, %acc
ret <vscale x 2 x float> %res
}
@@ -674,8 +674,8 @@ define <vscale x 2 x double> @fnmls_d(<vscale x 2 x double> %acc, <vscale x 2 x
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmls z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x double> %m1, %m2
- %res = fsub fast <vscale x 2 x double> %mul, %acc
+ %mul = fmul contract <vscale x 2 x double> %m1, %m2
+ %res = fsub contract <vscale x 2 x double> %mul, %acc
ret <vscale x 2 x double> %res
}
@@ -685,8 +685,8 @@ define <vscale x 8 x half> @fnmsb_h(<vscale x 8 x half> %m1, <vscale x 8 x half>
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: fnmsb z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 8 x half> %m1, %m2
- %res = fsub fast <vscale x 8 x half> %mul, %acc
+ %mul = fmul contract <vscale x 8 x half> %m1, %m2
+ %res = fsub contract <vscale x 8 x half> %mul, %acc
ret <vscale x 8 x half> %res
}
@@ -696,8 +696,8 @@ define <vscale x 4 x half> @fnmsb_hx4(<vscale x 4 x half> %m1, <vscale x 4 x hal
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fnmsb z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x half> %m1, %m2
- %res = fsub fast <vscale x 4 x half> %mul, %acc
+ %mul = fmul contract <vscale x 4 x half> %m1, %m2
+ %res = fsub contract <vscale x 4 x half> %mul, %acc
ret <vscale x 4 x half> %res
}
@@ -707,8 +707,8 @@ define <vscale x 2 x half> @fnmsb_hx2(<vscale x 2 x half> %m1, <vscale x 2 x hal
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmsb z0.h, p0/m, z1.h, z2.h
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x half> %m1, %m2
- %res = fsub fast <vscale x 2 x half> %mul, %acc
+ %mul = fmul contract <vscale x 2 x half> %m1, %m2
+ %res = fsub contract <vscale x 2 x half> %mul, %acc
ret <vscale x 2 x half> %res
}
@@ -718,8 +718,8 @@ define <vscale x 4 x float> @fnmsb_s(<vscale x 4 x float> %m1, <vscale x 4 x flo
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fnmsb z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 4 x float> %m1, %m2
- %res = fsub fast <vscale x 4 x float> %mul, %acc
+ %mul = fmul contract <vscale x 4 x float> %m1, %m2
+ %res = fsub contract <vscale x 4 x float> %mul, %acc
ret <vscale x 4 x float> %res
}
@@ -729,8 +729,8 @@ define <vscale x 2 x float> @fnmsb_sx2(<vscale x 2 x float> %m1, <vscale x 2 x f
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmsb z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x float> %m1, %m2
- %res = fsub fast <vscale x 2 x float> %mul, %acc
+ %mul = fmul contract <vscale x 2 x float> %m1, %m2
+ %res = fsub contract <vscale x 2 x float> %mul, %acc
ret <vscale x 2 x float> %res
}
@@ -740,7 +740,7 @@ define <vscale x 2 x double> @fnmsb_d(<vscale x 2 x double> %m1, <vscale x 2 x d
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fnmsb z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
- %mul = fmul fast <vscale x 2 x double> %m1, %m2
- %res = fsub fast <vscale x 2 x double> %mul, %acc
+ %mul = fmul contract <vscale x 2 x double> %m1, %m2
+ %res = fsub contract <vscale x 2 x double> %mul, %acc
ret <vscale x 2 x double> %res
}
More information about the llvm-branch-commits
mailing list